hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c862cc8467bd7e489f5a6229b8441ad05ef1b1a6 | 20,515 | py | Python | django_comments_xtd/tests/models.py | lyoniionly/django-comments-xtd | bc62a7359b9b460185e0fe4a7a1958bc9ef5599c | [
"BSD-2-Clause"
]
| null | null | null | django_comments_xtd/tests/models.py | lyoniionly/django-comments-xtd | bc62a7359b9b460185e0fe4a7a1958bc9ef5599c | [
"BSD-2-Clause"
]
| null | null | null | django_comments_xtd/tests/models.py | lyoniionly/django-comments-xtd | bc62a7359b9b460185e0fe4a7a1958bc9ef5599c | [
"BSD-2-Clause"
]
| null | null | null | from datetime import datetime
from django.db import models
from django.db.models import permalink
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.test import TestCase as DjangoTestCase
from django_comments_xtd.models import (XtdComment,
MaxThreadLevelExceededException)
class PublicManager(models.Manager):
"""Returns published articles that are not in the future."""
def published(self):
return self.get_query_set().filter(publish__lte=datetime.now())
class Article(models.Model):
"""Article, that accepts comments."""
title = models.CharField('title', max_length=200)
slug = models.SlugField('slug', unique_for_date='publish')
body = models.TextField('body')
allow_comments = models.BooleanField('allow comments', default=True)
publish = models.DateTimeField('publish', default=datetime.now)
objects = PublicManager()
class Meta:
db_table = 'demo_articles'
ordering = ('-publish',)
@permalink
def get_absolute_url(self):
return ('articles-article-detail', None,
{'year': self.publish.year,
'month': int(self.publish.strftime('%m').lower()),
'day': self.publish.day,
'slug': self.slug})
class Diary(models.Model):
"""Diary, that accepts comments."""
body = models.TextField('body')
allow_comments = models.BooleanField('allow comments', default=True)
publish = models.DateTimeField('publish', default=datetime.now)
objects = PublicManager()
class Meta:
db_table = 'demo_diary'
ordering = ('-publish',)
class ArticleBaseTestCase(DjangoTestCase):
def setUp(self):
self.article_1 = Article.objects.create(
title="September", slug="september", body="During September...")
self.article_2 = Article.objects.create(
title="October", slug="october", body="What I did on October...")
class XtdCommentManagerTestCase(ArticleBaseTestCase):
def test_for_app_models(self):
# there is no comment posted yet to article_1 nor article_2
count = XtdComment.objects.for_app_models("tests.article").count()
self.assert_(count == 0)
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post one comment to article_1
XtdComment.objects.create(content_type = article_ct,
object_pk = self.article_1.id,
content_object = self.article_1,
site = site,
comment ="just a testing comment",
submit_date = datetime.now())
count = XtdComment.objects.for_app_models("tests.article").count()
self.assert_(count == 1)
# post one comment to article_2
XtdComment.objects.create(content_type = article_ct,
object_pk = self.article_2.id,
content_object = self.article_2,
site = site,
comment = "yet another comment",
submit_date = datetime.now())
count = XtdComment.objects.for_app_models("tests.article").count()
self.assert_(count == 2)
# post a second comment to article_2
XtdComment.objects.create(content_type = article_ct,
object_pk = self.article_2.id,
content_object = self.article_2,
site = site,
comment = "and another one",
submit_date = datetime.now())
count = XtdComment.objects.for_app_models("tests.article").count()
self.assert_(count == 3)
# In order to methods save and test _calculate_thread_ata, simulate the
# following threads, in order of arrival:
#
# testcase cmt.id parent level-0 level-1 level-2
# step1 1 - c1 <- cmt1
# step1 2 - c2 <- cmt2
# step2 3 1 -- c3 <- cmt1 to cmt1
# step2 4 1 -- c4 <- cmt2 to cmt1
# step3 5 2 -- c5 <- cmt1 to cmt2
# step4 6 5 -- -- c6 <- cmt1 to cmt1 to cmt2
# step4 7 4 -- -- c7 <- cmt1 to cmt2 to cmt1
# step5 8 3 -- -- c8 <- cmt1 to cmt1 to cmt1
# step5 9 - c9 <- cmt9
def thread_test_step_1(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 1 with parent_id 0
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 1 to article",
submit_date = datetime.now())
# post Comment 2 with parent_id 0
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 2 to article",
submit_date = datetime.now())
def thread_test_step_2(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 3 to parent_id 1
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 1 to comment 1",
submit_date = datetime.now(),
parent_id = 1)
# post Comment 4 to parent_id 1
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 2 to comment 1",
submit_date = datetime.now(),
parent_id = 1)
def thread_test_step_3(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 5 to parent_id 2
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 1 to comment 1",
submit_date = datetime.now(),
parent_id = 2)
def thread_test_step_4(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 6 to parent_id 5
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="cmt 1 to cmt 1 to cmt 2",
submit_date = datetime.now(),
parent_id = 5)
# post Comment 7 to parent_id 4
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="cmt 1 to cmt 2 to cmt 1",
submit_date = datetime.now(),
parent_id = 4)
def thread_test_step_5(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 8 to parent_id 3
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="cmt 1 to cmt 1 to cmt 1",
submit_date = datetime.now(),
parent_id = 3)
# post Comment 9 with parent_id 0
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="cmt 1 to cmt 2 to cmt 1",
submit_date = datetime.now())
class BaseThreadStep1TestCase(ArticleBaseTestCase):
def setUp(self):
super(BaseThreadStep1TestCase, self).setUp()
thread_test_step_1(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c2 # 2 2 2 0 1
) = XtdComment.objects.all()
def test_threaded_comments_step_1_level_0(self):
# comment 1
self.assert_(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assert_(self.c1.level == 0 and self.c1.order == 1)
# comment 2
self.assert_(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assert_(self.c2.level == 0 and self.c2.order == 1)
class ThreadStep2TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep2TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c3, # 3 1 1 1 2
self.c4, # 4 1 1 1 3
self.c2 # 2 2 2 0 1
) = XtdComment.objects.all()
def test_threaded_comments_step_2_level_0(self):
# comment 1
self.assert_(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assert_(self.c1.level == 0 and self.c1.order == 1)
# comment 2
self.assert_(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assert_(self.c2.level == 0 and self.c2.order == 1)
def test_threaded_comments_step_2_level_1(self):
# comment 3
self.assert_(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assert_(self.c3.level == 1 and self.c3.order == 2)
# comment 4
self.assert_(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assert_(self.c4.level == 1 and self.c4.order == 3)
class ThreadStep3TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep3TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c3, # 3 1 1 1 2
self.c4, # 4 1 1 1 3
self.c2, # 2 2 2 0 1
self.c5 # 5 2 2 1 2
) = XtdComment.objects.all()
def test_threaded_comments_step_3_level_0(self):
# comment 1
self.assert_(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assert_(self.c1.level == 0 and self.c1.order == 1)
# comment 2
self.assert_(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assert_(self.c2.level == 0 and self.c2.order == 1)
def test_threaded_comments_step_3_level_1(self):
# comment 3
self.assert_(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assert_(self.c3.level == 1 and self.c3.order == 2)
# comment 4
self.assert_(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assert_(self.c4.level == 1 and self.c4.order == 3)
# comment 5
self.assert_(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assert_(self.c5.level == 1 and self.c5.order == 2)
class ThreadStep4TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep4TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c3, # 3 1 1 1 2
self.c4, # 4 1 1 1 3
self.c7, # 7 1 4 2 4
self.c2, # 2 2 2 0 1
self.c5, # 5 2 2 1 2
self.c6 # 6 2 5 2 3
) = XtdComment.objects.all()
def test_threaded_comments_step_4_level_0(self):
# comment 1
self.assert_(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assert_(self.c1.level == 0 and self.c1.order == 1)
# comment 2
self.assert_(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assert_(self.c2.level == 0 and self.c2.order == 1)
def test_threaded_comments_step_4_level_1(self):
# comment 3
self.assert_(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assert_(self.c3.level == 1 and self.c3.order == 2)
# comment 4
self.assert_(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assert_(self.c4.level == 1 and self.c4.order == 3)
# comment 5
self.assert_(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assert_(self.c5.level == 1 and self.c5.order == 2)
def test_threaded_comments_step_4_level_2(self):
# comment 6
self.assert_(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assert_(self.c6.level == 2 and self.c6.order == 3)
# comment 7
self.assert_(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assert_(self.c7.level == 2 and self.c7.order == 4)
class ThreadStep5TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep5TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
thread_test_step_5(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c3, # 3 1 1 1 2
self.c8, # 8 1 3 2 3
self.c4, # 4 1 1 1 4
self.c7, # 7 1 4 2 5
self.c2, # 2 2 2 0 1
self.c5, # 5 2 2 1 2
self.c6, # 6 2 5 2 3
self.c9 # 9 9 9 0 1
) = XtdComment.objects.all()
def test_threaded_comments_step_5_level_0(self):
# comment 1
self.assert_(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assert_(self.c1.level == 0 and self.c1.order == 1)
# comment 2
self.assert_(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assert_(self.c2.level == 0 and self.c2.order == 1)
# comment 9
self.assert_(self.c9.parent_id == 9 and self.c9.thread_id == 9)
self.assert_(self.c9.level == 0 and self.c9.order == 1)
def test_threaded_comments_step_5_level_1(self):
# comment 3
self.assert_(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assert_(self.c3.level == 1 and self.c3.order == 2)
# comment 4
self.assert_(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assert_(self.c4.level == 1 and self.c4.order == 4) # changed
# comment 5
self.assert_(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assert_(self.c5.level == 1 and self.c5.order == 2)
def test_threaded_comments_step_5_level_2(self):
# comment 6
self.assert_(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assert_(self.c6.level == 2 and self.c6.order == 3)
# comment 7
self.assert_(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assert_(self.c7.level == 2 and self.c7.order == 5) # changed
# comment 8
self.assert_(self.c8.parent_id == 3 and self.c8.thread_id == 1)
self.assert_(self.c8.level == 2 and self.c8.order == 3)
def test_exceed_max_thread_level_raises_exception(self):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
with self.assertRaises(MaxThreadLevelExceededException):
XtdComment.objects.create(content_type = article_ct,
object_pk = self.article_1.id,
content_object = self.article_1,
site = site,
comment = ("cmt 1 to cmt 2 to "
"cmt 1"),
submit_date = datetime.now(),
parent_id = 8) # already max thread
# level
class DiaryBaseTestCase(DjangoTestCase):
def setUp(self):
self.day_in_diary = Diary.objects.create(body="About Today...")
diary_ct = ContentType.objects.get(app_label="tests", model="diary")
site = Site.objects.get(pk=1)
XtdComment.objects.create(content_type = diary_ct,
object_pk = self.day_in_diary.id,
content_object = self.day_in_diary,
site = site,
comment ="cmt to day in diary",
submit_date = datetime.now())
def test_max_thread_level_by_app_model(self):
diary_ct = ContentType.objects.get(app_label="tests", model="diary")
site = Site.objects.get(pk=1)
with self.assertRaises(MaxThreadLevelExceededException):
XtdComment.objects.create(content_type = diary_ct,
object_pk = self.day_in_diary.id,
content_object = self.day_in_diary,
site = site,
comment = ("cmt to cmt to day "
"in diary"),
submit_date = datetime.now(),
parent_id = 1) # already max thread
# level
| 46.625 | 80 | 0.497295 | 14,541 | 0.708798 | 0 | 0 | 283 | 0.013795 | 0 | 0 | 4,044 | 0.197124 |
c862ff0586dafe12df4bfd251af96f7087dbad08 | 900 | py | Python | app/api/v1/routes.py | kwanj-k/storemanager-API | e51511545a717341a7b1eb100eb3eab625a8b011 | [
"MIT"
]
| 1 | 2019-05-08T08:39:08.000Z | 2019-05-08T08:39:08.000Z | app/api/v1/routes.py | kwanj-k/storemanager-API | e51511545a717341a7b1eb100eb3eab625a8b011 | [
"MIT"
]
| 2 | 2019-10-21T17:56:01.000Z | 2019-10-29T07:36:39.000Z | app/api/v1/routes.py | kwanj-k/storemanager-API | e51511545a717341a7b1eb100eb3eab625a8b011 | [
"MIT"
]
| null | null | null | """
This file contains all the version one routes
"""
# Third party imports
from flask import Blueprint, request
from flask_restplus import Api, Resource, fields
# Local application imports
from .views.products_views import v1 as pro_routes
from .views.sales_views import v1 as sales_routes
from .views.stores_views import v1 as stores_routes
from .views.auth import v1 as auth_routes
authorizations = {
'apikey': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}}
v_1 = Blueprint('v_1', __name__, url_prefix="/api/v1")
api = Api(v_1)
v1 = api.namespace(
'v1',
description='Store manager Api without persitent data storage',
authorizations=authorizations)
api.add_namespace(pro_routes, path="/products/")
api.add_namespace(sales_routes, path="/sales")
api.add_namespace(stores_routes, path="/stores")
api.add_namespace(auth_routes, path="/")
| 26.470588 | 67 | 0.73 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.284444 |
c8643e92fdc3cc522b5000bf37f329ece9e89e82 | 5,605 | py | Python | tests/test_region_aggregation.py | IAMconsortium/nomenclature | 15973d86d91e38424fe30719d44a1f23526c6eea | [
"Apache-2.0"
]
| 9 | 2021-06-10T15:11:23.000Z | 2022-02-02T16:22:01.000Z | tests/test_region_aggregation.py | IAMconsortium/nomenclature | 15973d86d91e38424fe30719d44a1f23526c6eea | [
"Apache-2.0"
]
| 83 | 2021-06-22T09:04:29.000Z | 2022-03-21T16:29:54.000Z | tests/test_region_aggregation.py | IAMconsortium/nomenclature | 15973d86d91e38424fe30719d44a1f23526c6eea | [
"Apache-2.0"
]
| 3 | 2021-06-17T10:44:48.000Z | 2021-09-16T15:30:03.000Z | from pathlib import Path
import jsonschema
import pydantic
import pytest
from nomenclature.processor.region import (
ModelMappingCollisionError,
RegionAggregationMapping,
RegionProcessor,
)
from conftest import TEST_DATA_DIR
TEST_FOLDER_REGION_MAPPING = TEST_DATA_DIR / "region_aggregation"
def test_mapping():
mapping_file = "working_mapping.yaml"
# Test that the file is read and represented correctly
obs = RegionAggregationMapping.from_file(TEST_FOLDER_REGION_MAPPING / mapping_file)
exp = {
"model": "model_a",
"file": (TEST_FOLDER_REGION_MAPPING / mapping_file).relative_to(Path.cwd()),
"native_regions": [
{"name": "region_a", "rename": "alternative_name_a"},
{"name": "region_b", "rename": "alternative_name_b"},
{"name": "region_c", "rename": None},
],
"common_regions": [
{
"name": "common_region_1",
"constituent_regions": ["region_a", "region_b"],
},
{
"name": "common_region_2",
"constituent_regions": ["region_c"],
},
],
}
assert obs.dict() == exp
@pytest.mark.parametrize(
"file, error_type, error_msg_pattern",
[
(
"illegal_mapping_invalid_format_dict.yaml",
jsonschema.ValidationError,
".*common_region_1.*not.*'array'.*",
),
(
"illegal_mapping_illegal_attribute.yaml",
jsonschema.ValidationError,
"Additional properties are not allowed.*",
),
(
"illegal_mapping_conflict_regions.yaml",
pydantic.ValidationError,
".*Name collision in native and common regions.*common_region_1.*",
),
(
"illegal_mapping_duplicate_native.yaml",
pydantic.ValidationError,
".*Name collision in native regions.*alternative_name_a.*",
),
(
"illegal_mapping_duplicate_native_rename.yaml",
pydantic.ValidationError,
".*Name collision in native regions.*alternative_name_a.*",
),
(
"illegal_mapping_duplicate_common.yaml",
pydantic.ValidationError,
".*Name collision in common regions.*common_region_1.*",
),
(
"illegal_mapping_model_only.yaml",
pydantic.ValidationError,
".*one of the two: 'native_regions', 'common_regions'.*",
),
],
)
def test_illegal_mappings(file, error_type, error_msg_pattern):
# This is to test a few different failure conditions
with pytest.raises(error_type, match=f"{error_msg_pattern}{file}.*"):
RegionAggregationMapping.from_file(TEST_FOLDER_REGION_MAPPING / file)
@pytest.mark.parametrize(
"region_processor_path",
[
TEST_DATA_DIR / "regionprocessor_working",
(TEST_DATA_DIR / "regionprocessor_working").relative_to(Path.cwd()),
],
)
def test_region_processor_working(region_processor_path):
obs = RegionProcessor.from_directory(region_processor_path)
exp_data = [
{
"model": "model_a",
"file": (
TEST_DATA_DIR / "regionprocessor_working/mapping_1.yaml"
).relative_to(Path.cwd()),
"native_regions": [
{"name": "World", "rename": None},
],
"common_regions": None,
},
{
"model": "model_b",
"file": (
TEST_DATA_DIR / "regionprocessor_working/mapping_2.yaml"
).relative_to(Path.cwd()),
"native_regions": None,
"common_regions": [
{
"name": "World",
"constituent_regions": ["region_a", "region_b"],
}
],
},
]
exp_models = {value["model"] for value in exp_data}
exp_dict = {value["model"]: value for value in exp_data}
assert exp_models == set(obs.mappings.keys())
assert all(exp_dict[m] == obs.mappings[m].dict() for m in exp_models)
def test_region_processor_not_defined(simple_definition):
# Test a RegionProcessor with regions that are not defined in the data structure
# definition
error_msg = (
"model_(a|b)\n.*region_a.*mapping_(1|2).yaml.*value_error.region_not_defined."
"*\n.*model_(a|b)\n.*region_a.*mapping_(1|2).yaml.*value_error."
"region_not_defined"
)
with pytest.raises(pydantic.ValidationError, match=error_msg):
RegionProcessor.from_directory(
TEST_DATA_DIR / "regionprocessor_not_defined"
).validate_mappings(simple_definition)
def test_region_processor_duplicate_model_mapping():
error_msg = ".*model_a.*mapping_(1|2).yaml.*mapping_(1|2).yaml"
with pytest.raises(ModelMappingCollisionError, match=error_msg):
RegionProcessor.from_directory(TEST_DATA_DIR / "regionprocessor_duplicate")
def test_region_processor_wrong_args():
# Test if pydantic correctly type checks the input of RegionProcessor.from_directory
# Test with an integer
with pytest.raises(pydantic.ValidationError, match=".*path\n.*not a valid path.*"):
RegionProcessor.from_directory(123)
# Test with a file, a path pointing to a directory is required
with pytest.raises(
pydantic.ValidationError,
match=".*path\n.*does not point to a directory.*",
):
RegionProcessor.from_directory(
TEST_DATA_DIR / "regionprocessor_working/mapping_1.yaml"
)
| 33.562874 | 88 | 0.615165 | 0 | 0 | 0 | 0 | 2,935 | 0.52364 | 0 | 0 | 2,138 | 0.381445 |
c8645ddbacbee9365d7d4fed6a9839538bcee96a | 828 | py | Python | bin/util/ckan-datasets-in-group.py | timrdf/csv2rdf4lod-automation-prod | d7e096fda18aea6236b6245b1e4a221101611640 | [
"Apache-2.0"
]
| 56 | 2015-01-15T13:11:28.000Z | 2021-11-16T14:50:48.000Z | bin/util/ckan-datasets-in-group.py | timrdf/csv2rdf4lod-automation-prod | d7e096fda18aea6236b6245b1e4a221101611640 | [
"Apache-2.0"
]
| 10 | 2015-02-17T19:19:39.000Z | 2021-12-10T21:04:37.000Z | bin/util/ckan-datasets-in-group.py | timrdf/csv2rdf4lod-automation-prod | d7e096fda18aea6236b6245b1e4a221101611640 | [
"Apache-2.0"
]
| 13 | 2015-08-25T18:48:35.000Z | 2021-12-13T15:28:16.000Z | #!/usr/bin/env python
#
#3> <> prov:specializationOf <https://github.com/timrdf/csv2rdf4lod-automation/blob/master/bin/util/ckan-datasets-in-group.py>;
#3> prov:wasDerivedFrom <https://raw.github.com/timrdf/DataFAQs/master/packages/faqt.python/faqt/faqt.py>,
#3> <https://github.com/timrdf/DataFAQs/raw/master/services/sadi/ckan/lift-ckan.py>;
#
# Requires: http://pypi.python.org/pypi/ckanclient
# easy_install http://pypi.python.org/packages/source/c/ckanclient/ckanclient-0.10.tar.gz
import ckanclient
def datasets_in_group(ckan_loc='http://datahub.io', group_name='lodcloud'):
ckan = ckanclient.CkanClient(base_location=ckan_loc+'/api')
group = ckan.group_entity_get(group_name)
for dataset in group['packages']:
print dataset
if __name__=='__main__':
datasets_in_group()
| 41.4 | 127 | 0.729469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 564 | 0.681159 |
c86486b7aff3805a872796537a88993f82f85be5 | 191 | py | Python | CaloOnlineTools/EcalTools/python/ecalExclusiveTrigFilter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CaloOnlineTools/EcalTools/python/ecalExclusiveTrigFilter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CaloOnlineTools/EcalTools/python/ecalExclusiveTrigFilter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
ecalExclusiveTrigFilter = cms.EDFilter("EcalExclusiveTrigFilter",
# Global trigger tag
l1GlobalReadoutRecord = cms.string("gtDigis")
)
| 21.222222 | 65 | 0.759162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.282723 |
c8657a8c0a88d1cd1bd12e0d16b56dc5546e1b6c | 2,300 | py | Python | render_object.py | VanGy-code/3D-House-Blender | 8a9d91b1f3cc3988c0dcd7079223f2e541f9ec71 | [
"MIT"
]
| null | null | null | render_object.py | VanGy-code/3D-House-Blender | 8a9d91b1f3cc3988c0dcd7079223f2e541f9ec71 | [
"MIT"
]
| null | null | null | render_object.py | VanGy-code/3D-House-Blender | 8a9d91b1f3cc3988c0dcd7079223f2e541f9ec71 | [
"MIT"
]
| 1 | 2021-11-22T00:50:45.000Z | 2021-11-22T00:50:45.000Z | import bpy
import os
import json
import numpy as np
from decimal import Decimal
from mathutils import Vector, Matrix
import argparse
import numpy as np
import sys
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.dirname(__file__)+'/tools')
from tools.utils import *
from tools.blender_interface import BlenderInterface
if __name__ == '__main__':
p = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.')
p.add_argument('--mesh_fpath', type=str, required=True, help='The path the output will be dumped to.')
p.add_argument('--output_dir', type=str, required=True, help='The path the output will be dumped to.')
p.add_argument('--num_observations', type=int, required=True, help='The path the output will be dumped to.')
p.add_argument('--sphere_radius', type=float, required=True, help='The path the output will be dumped to.')
p.add_argument('--mode', type=str, required=True, help='Options: train and test')
argv = sys.argv
argv = sys.argv[sys.argv.index("--") + 1:]
opt = p.parse_args(argv)
instance_name = opt.mesh_fpath.split('/')[-3]
instance_dir = os.path.join(opt.output_dir, instance_name)
# Start Render
renderer = BlenderInterface(resolution=128)
if opt.mode == 'train':
cam_locations = sample_spherical(opt.num_observations, opt.sphere_radius)
elif opt.mode == 'test':
cam_locations = get_archimedean_spiral(opt.sphere_radius, opt.num_observations)
obj_location = np.zeros((1,3))
cv_poses = look_at(cam_locations, obj_location)
blender_poses = [cv_cam2world_to_bcam2world(m) for m in cv_poses]
shapenet_rotation_mat = np.array([[1.0000000e+00, 0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, -1.0000000e+00, -1.2246468e-16],
[0.0000000e+00, 1.2246468e-16, -1.0000000e+00]])
rot_mat = np.eye(3)
hom_coords = np.array([[0., 0., 0., 1.]]).reshape(1, 4)
obj_pose = np.concatenate((rot_mat, obj_location.reshape(3,1)), axis=-1)
obj_pose = np.concatenate((obj_pose, hom_coords), axis=0)
renderer.import_mesh(opt.mesh_fpath, scale=1., object_world_matrix=obj_pose)
renderer.render(instance_dir, blender_poses, write_cam_params=True) | 41.818182 | 112 | 0.694348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 366 | 0.15913 |
c866107dba038466832105575d5f0486fb0c0c27 | 1,281 | py | Python | resources/tests/test_users.py | pacofvf/agency_performance_model | 1692d7e11ac3141715845d2a4ecf416563539f89 | [
"MIT"
]
| 2 | 2018-01-10T05:51:31.000Z | 2018-01-18T21:25:45.000Z | resources/tests/test_users.py | pacofvf/pivot_table_api | 1692d7e11ac3141715845d2a4ecf416563539f89 | [
"MIT"
]
| null | null | null | resources/tests/test_users.py | pacofvf/pivot_table_api | 1692d7e11ac3141715845d2a4ecf416563539f89 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
import unittest
import json
import base64
from mock import patch
import api
class UserTests(unittest.TestCase):
def setUp(self):
api.app.testing = True
self.app = api.app.test_client()
def test_user_creation(self):
with patch('models.db.session'):
response = self.app.post('/user',
data={'username': 'demo', 'password': 'demo'},
headers={'Authorization': 'Basic '+base64.b64encode('demo:demo')})
print response.data
data = json.loads(response.data.decode())
self.assertTrue(isinstance(data, dict))
self.assertIn('status', data)
self.assertEquals(data['status'], 'success')
def test_get_user(self):
with patch('models.user.User'):
response = self.app.get('/user/123131',
headers={'Authorization': 'Basic '+base64.b64encode('demo:demo')})
print response.data
data = json.loads(response.data.decode())
self.assertTrue(isinstance(data, dict))
self.assertIn('status', data)
self.assertEquals(data['status'], 'error')
if __name__ == '__main__':
unittest.main()
| 33.710526 | 103 | 0.565183 | 1,135 | 0.886027 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.181889 |
c86659332f0223beeafc6e01030a75e258e463d5 | 2,717 | py | Python | mapel/elections/features/clustering.py | kaszperro/mapel | d4e6486ee97f5d5a5a737c581ba3f9f874ebcef3 | [
"MIT"
]
| null | null | null | mapel/elections/features/clustering.py | kaszperro/mapel | d4e6486ee97f5d5a5a737c581ba3f9f874ebcef3 | [
"MIT"
]
| null | null | null | mapel/elections/features/clustering.py | kaszperro/mapel | d4e6486ee97f5d5a5a737c581ba3f9f874ebcef3 | [
"MIT"
]
| null | null | null |
import numpy as np
def clustering_v1(experiment, num_clusters=20):
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
import scipy.spatial.distance as ssd
# skip the paths
SKIP = ['UNID', 'ANID', 'STID', 'ANUN', 'STUN', 'STAN',
'Mallows',
'Urn',
'Identity', 'Uniformity', 'Antagonism', 'Stratification',
]
new_names = []
for i, a in enumerate(list(experiment.distances)):
if not any(tmp in a for tmp in SKIP):
new_names.append(a)
print(len(new_names))
distMatrix = np.zeros([len(new_names), len(new_names)])
for i, a in enumerate(new_names):
for j, b in enumerate(new_names):
if a != b:
distMatrix[i][j] = experiment.distances[a][b]
# Zd = linkage(ssd.squareform(distMatrix), method="complete")
# cld = fcluster(Zd, 500, criterion='distance').reshape(len(new_names), 1)
Zd = linkage(ssd.squareform(distMatrix), method="complete")
cld = fcluster(Zd, 12, criterion='maxclust').reshape(len(new_names), 1)
clusters = {}
for i, name in enumerate(new_names):
clusters[name] = cld[i][0]
for name in experiment.coordinates:
if name not in clusters:
clusters[name] = 0
return {'value': clusters}
def clustering_kmeans(experiment, num_clusters=20):
from sklearn.cluster import KMeans
points = list(experiment.coordinates.values())
kmeans = KMeans(n_clusters=num_clusters)
kmeans.fit(points)
y_km = kmeans.fit_predict(points)
# plt.scatter(points[y_km == 0, 0], points[y_km == 0, 1], s=100, c='red')
# plt.scatter(points[y_km == 1, 0], points[y_km == 1, 1], s=100, c='black')
# plt.scatter(points[y_km == 2, 0], points[y_km == 2, 1], s=100, c='blue')
# plt.scatter(points[y_km == 3, 0], points[y_km == 3, 1], s=100, c='cyan')
# all_distances = []
# for a,b in combinations(experiment.distances, 2):
# all_distances.append([a, b, experiment.distances[a][b]])
# all_distances.sort(key=lambda x: x[2])
#
# clusters = {a: None for a in experiment.distances}
# num_clusters = 0
# for a,b,dist in all_distances:
# if clusters[a] is None and clusters[b] is None:
# clusters[a] = num_clusters
# clusters[b] = num_clusters
# num_clusters += 1
# elif clusters[a] is None and clusters[b] is not None:
# clusters[a] = clusters[b]
# elif clusters[a] is not None and clusters[b] is None:
# clusters[b] = clusters[a]
clusters = {}
for i, name in enumerate(experiment.coordinates):
clusters[name] = y_km[i]
return {'value': clusters}
| 33.54321 | 79 | 0.606183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,197 | 0.440559 |
c86693ef8ab98f83a2f7c7800edbe9c593122043 | 561 | py | Python | day15-1.py | kenleung5e28/advent-of-code-2021 | f6de211f0d4f3bafa19572bf28e3407f0fab6d58 | [
"MIT"
]
| null | null | null | day15-1.py | kenleung5e28/advent-of-code-2021 | f6de211f0d4f3bafa19572bf28e3407f0fab6d58 | [
"MIT"
]
| null | null | null | day15-1.py | kenleung5e28/advent-of-code-2021 | f6de211f0d4f3bafa19572bf28e3407f0fab6d58 | [
"MIT"
]
| null | null | null | import math
grid = []
with open('input-day15.txt') as file:
for line in file:
line = line.rstrip()
grid.append([int(s) for s in line])
n = len(grid)
costs = [[math.inf] * n for _ in range(n)]
costs[0][0] = 0
queue = [(0, 0)]
while len(queue) > 0:
x1, y1 = queue.pop(0)
for dx, dy in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
x, y = x1 + dx, y1 + dy
if x >= 0 and y >= 0 and x < n and y < n:
cost = costs[x1][y1] + grid[x][y]
if cost < costs[x][y]:
costs[x][y] = cost
queue.append((x, y))
print(costs[n - 1][n - 1]) | 24.391304 | 51 | 0.504456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.030303 |
c8669721869b7d885f2345a687dcb60a71f978c7 | 2,930 | py | Python | OSMTagFinder/thesaurus/relatedterm.py | geometalab/OSMTagFinder | 9ffe854a2bebbd7f96facd7e236434e761fee884 | [
"MIT"
]
| 20 | 2015-01-18T19:57:40.000Z | 2020-06-15T22:06:42.000Z | OSMTagFinder/thesaurus/relatedterm.py | geometalab/OSMTagFinder | 9ffe854a2bebbd7f96facd7e236434e761fee884 | [
"MIT"
]
| 4 | 2015-01-18T22:16:15.000Z | 2021-03-31T18:32:22.000Z | OSMTagFinder/thesaurus/relatedterm.py | geometalab/OSMTagFinder | 9ffe854a2bebbd7f96facd7e236434e761fee884 | [
"MIT"
]
| 2 | 2019-01-16T15:43:41.000Z | 2020-06-15T22:03:31.000Z | # -*- coding: utf-8 -*-
'''
Created on 16.11.2014
@author: Simon Gwerder
'''
from utilities.configloader import ConfigLoader
from rdfgraph import RDFGraph
class RelatedTerm:
rdfGraph = RDFGraph()
cl = ConfigLoader()
termSchemeName = cl.getThesaurusString('TERM_SCHEME_NAME')
termSchemeTitle = cl.getThesaurusString('TERM_SCHEME_TITLE')
creator = cl.getThesaurusString('CREATOR')
termScheme = None
def __init__(self, rdfGraph):
if rdfGraph is not None:
self.rdfGraph = rdfGraph
self.termScheme = self.rdfGraph.addConceptScheme(self.termSchemeName, self.termSchemeTitle, self.creator) # doesn't matter if called a lot
def createTerm(self, keyTagConcept, prefLabelEN, prefLabelDE):
label = prefLabelEN.decode("utf-8")
if self.rdfGraph.isInKeyScheme(keyTagConcept):
label = keyTagConcept.split('Key:')[1]
else:
label = keyTagConcept.split('Tag:')[1]
label = (label.replace('=','_')).decode("utf-8")
termConcept = self.rdfGraph.addConcept(self.termSchemeName + '/' + label)
self.rdfGraph.addInScheme(termConcept, self.termSchemeName)
self.rdfGraph.addPrefLabel(termConcept, prefLabelEN, language='en')
self.rdfGraph.addPrefLabel(termConcept, prefLabelDE, language='de')
self.rdfGraph.addRelatedMatch(keyTagConcept, termConcept)
self.rdfGraph.addRelatedMatch(termConcept, keyTagConcept)
return termConcept
def addAltLabelEN(self, termConcept, altLabelEN):
self.rdfGraph.addAltLabel(termConcept, altLabelEN, 'en')
return termConcept
def addAltLabelDE(self, termConcept, altLabelDE):
self.rdfGraph.addAltLabel(termConcept, altLabelDE, 'de')
return termConcept
def addNarrowerLiteralEN(self, termConcept, narrowerEN):
self.rdfGraph.addNarrowerLiteral(termConcept, narrowerEN, 'en')
return termConcept
def addNarrowerLiteralDE(self, termConcept, narrowerDE):
self.rdfGraph.addNarrowerLiteral(termConcept, narrowerDE, 'de')
return termConcept
def addBroaderLiteralEN(self, termConcept, broaderEN):
self.rdfGraph.addBroaderLiteral(termConcept, broaderEN, 'en')
return termConcept
def addBroaderLiteralDE(self, termConcept, broaderDE):
self.rdfGraph.addBroaderLiteral(termConcept, broaderDE, 'de')
return termConcept
def removeAltLabelLiteral(self, termConcept, altLabelObj):
self.rdfGraph.removeAltLabelLiteral(termConcept, altLabelObj)
def removeBroaderLiteral(self, termConcept, broaderObj):
self.rdfGraph.removeAltLabelLiteral(termConcept, broaderObj)
def removeNarrowerLiteral(self, termConcept, narrowerObj):
self.rdfGraph.removeAltLabelLiteral(termConcept, narrowerObj)
def save(self):
self.rdfGraph.serialize(self.rdfGraph.filePath)
return self.rdfGraph.filePath
| 34.880952 | 146 | 0.713652 | 2,766 | 0.944027 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.075427 |
c86731656ffa6ef2b38ba405b2722abcba4b7c94 | 1,217 | py | Python | Algorithms/Sorting and Searching/sorting/merge sort/merge-sort-return-list.py | bulentsiyah/Python-Basics-Algorithms-Data-Structures-Object-Oriented-Programming-Job-Interview-Questions | 3a67bdac1525495e6874c5bde61882848f60381d | [
"MIT"
]
| 14 | 2021-01-23T11:28:16.000Z | 2021-12-07T16:08:23.000Z | Algorithms/Sorting and Searching/sorting/merge sort/merge-sort-return-list.py | bulentsiyah/Python-Basics-Algorithms-Data-Structures-Object-Oriented-Programming-Job-Interview-Questions | 3a67bdac1525495e6874c5bde61882848f60381d | [
"MIT"
]
| null | null | null | Algorithms/Sorting and Searching/sorting/merge sort/merge-sort-return-list.py | bulentsiyah/Python-Basics-Algorithms-Data-Structures-Object-Oriented-Programming-Job-Interview-Questions | 3a67bdac1525495e6874c5bde61882848f60381d | [
"MIT"
]
| 2 | 2021-02-03T12:28:19.000Z | 2021-09-14T09:50:08.000Z |
arr: list = [54,26,93,17,77,31,44,55,20]
def merge_sort(arr: list):
result: list = helper(arr, 0, len(arr) - 1)
for i in range(len(arr)):
arr[i] = result[i]
def helper(arr: list, start: int, end: int) -> list:
if start > end:
return []
elif start == end:
return [arr[start]]
else:
midpoint: int = start + (end - start) // 2
leftList = helper(arr, start, midpoint)
rightList = helper(arr, midpoint + 1, end)
return mergelists(leftList, rightList)
def mergelists(leftList: list, rightList: list) -> list:
arr: list = [None] * (len(leftList) + len(rightList))
i = j = k = 0
while i < len(leftList) and j < len(rightList):
if leftList[i] < rightList[j]:
arr[k] = leftList[i]
i += 1
else:
arr[k] = rightList[j]
j += 1
k += 1
while i < len(leftList):
arr[k] = leftList[i]
i += 1
k += 1
while j < len(rightList):
arr[k] = rightList[j]
j += 1
k += 1
return arr
print(arr)
merge_sort(arr)
print(arr)
| 24.836735 | 61 | 0.474117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c867bd2b7a6b9e73aa95e644913f2d2ac179784c | 3,406 | py | Python | cve-manager/cve_manager/handler/task_handler/callback/cve_scan.py | seandong37tt4qu/jeszhengq | 32b3737ab45e89e8c5b71cdce871cefd2c938fa8 | [
"MulanPSL-1.0"
]
| null | null | null | cve-manager/cve_manager/handler/task_handler/callback/cve_scan.py | seandong37tt4qu/jeszhengq | 32b3737ab45e89e8c5b71cdce871cefd2c938fa8 | [
"MulanPSL-1.0"
]
| null | null | null | cve-manager/cve_manager/handler/task_handler/callback/cve_scan.py | seandong37tt4qu/jeszhengq | 32b3737ab45e89e8c5b71cdce871cefd2c938fa8 | [
"MulanPSL-1.0"
]
| null | null | null | #!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Time:
Author:
Description: callback function of the cve scanning task.
"""
from aops_utils.log.log import LOGGER
from cve_manager.handler.task_handler.callback import TaskCallback
from cve_manager.conf.constant import ANSIBLE_TASK_STATUS, CVE_SCAN_STATUS
class CveScanCallback(TaskCallback):
"""
Callback function for cve scanning.
"""
def __init__(self, user, proxy, host_info):
"""
Args:
user (str): who the scanned hosts belongs to.
proxy (object): database proxy
host_info (list): host info, e.g. hostname, ip, etc.
"""
self.user = user
task_info = {}
for info in host_info:
host_name = info.get('host_name')
task_info[host_name] = info
super().__init__(None, proxy, task_info)
def v2_runner_on_unreachable(self, result):
host_name, result_info, task_name = self._get_info(result)
self.result[host_name][task_name] = {
"info": result_info['msg'], "status": ANSIBLE_TASK_STATUS.UNREACHABLE}
LOGGER.debug("task name: %s, user: %s, host name: %s, result: %s",
task_name, self.user, host_name, ANSIBLE_TASK_STATUS.UNREACHABLE)
self.save_to_db(task_name, host_name, CVE_SCAN_STATUS.DONE)
def v2_runner_on_ok(self, result):
host_name, result_info, task_name = self._get_info(result)
self.result[host_name][task_name] = {
"info": result_info['stdout'], "status": ANSIBLE_TASK_STATUS.SUCCEED}
LOGGER.debug("task name: %s, user: %s, host name: %s, result: %s",
task_name, self.user, host_name, ANSIBLE_TASK_STATUS.SUCCEED)
self.save_to_db(task_name, host_name, CVE_SCAN_STATUS.DONE)
def v2_runner_on_failed(self, result, ignore_errors=False):
host_name, result_info, task_name = self._get_info(result)
self.result[host_name][task_name] = {
"info": result_info['stderr'], "status": ANSIBLE_TASK_STATUS.FAIL}
LOGGER.debug("task name: %s, user: %s, host name: %s, result: %s",
task_name, self.user, host_name, ANSIBLE_TASK_STATUS.FAIL)
self.save_to_db(task_name, host_name, CVE_SCAN_STATUS.DONE)
def save_to_db(self, task_name, host_name, status):
"""
Set the status of the host to database.
Args:
task_name (str): task name in playbook.
host_name (str)
status (str)
"""
host_id = self.task_info[host_name]['host_id']
self.proxy.update_scan_status([host_id])
LOGGER.debug("task name: %s, host_id: %s, status: %s", task_name, host_id, status)
| 40.547619 | 98 | 0.625954 | 2,425 | 0.711979 | 0 | 0 | 0 | 0 | 0 | 0 | 1,493 | 0.438344 |
c867e56be1f71eb568a6e918ed29a6d7c65c450d | 58 | py | Python | mean-var-std/main.py | PedroEduardoSS/Data-Analisys-projects | f06c2d7091a9a61509525019f2f0375e21698f6a | [
"MIT"
]
| null | null | null | mean-var-std/main.py | PedroEduardoSS/Data-Analisys-projects | f06c2d7091a9a61509525019f2f0375e21698f6a | [
"MIT"
]
| null | null | null | mean-var-std/main.py | PedroEduardoSS/Data-Analisys-projects | f06c2d7091a9a61509525019f2f0375e21698f6a | [
"MIT"
]
| null | null | null | from mean_var_std import *
calculate([0,1,2,3,4,5,6,7,8]) | 19.333333 | 30 | 0.689655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c86aa619ebc8f014032a97d24de5e8f90b466d18 | 2,416 | py | Python | tests/result/test_gatling.py | LaudateCorpus1/perfsize | 710d6a5ae0918002e736f3aba8cd5cacb2b11326 | [
"Apache-2.0"
]
| 5 | 2021-08-02T22:44:32.000Z | 2022-01-07T20:53:48.000Z | tests/result/test_gatling.py | intuit/perfsize | 710d6a5ae0918002e736f3aba8cd5cacb2b11326 | [
"Apache-2.0"
]
| 1 | 2022-02-24T08:05:51.000Z | 2022-02-24T08:05:51.000Z | tests/result/test_gatling.py | LaudateCorpus1/perfsize | 710d6a5ae0918002e736f3aba8cd5cacb2b11326 | [
"Apache-2.0"
]
| 1 | 2022-02-24T08:05:41.000Z | 2022-02-24T08:05:41.000Z | from datetime import datetime
from decimal import Decimal
from perfsize.perfsize import (
lt,
lte,
gt,
gte,
eq,
neq,
Condition,
Result,
Run,
Config,
Plan,
StepManager,
EnvironmentManager,
LoadManager,
ResultManager,
Reporter,
Workflow,
)
from perfsize.environment.mock import MockEnvironmentManager
from perfsize.load.mock import MockLoadManager
from perfsize.reporter.mock import MockReporter
from perfsize.result.mock import MockResultManager
from perfsize.result.gatling import Metric, GatlingResultManager
from perfsize.step.mock import MockStepManager
from pprint import pprint
import pytest
from unittest.mock import patch
class TestGatlingResultManager:
def test_gatling_result_manager(self) -> None:
# A plan would define the various configs possible for testing.
# A step manager would pick the next config to test.
# This test is starting with a given Config and an associated Run.
config = Config(
parameters={
"endpoint_name": "LEARNING-model-sim-public-c-1",
"endpoint_config_name": "LEARNING-model-sim-public-c-1-0",
"model_name": "model-sim-public",
"instance_type": "ml.t2.medium",
"initial_instance_count": "1",
"ramp_start_tps": "0",
"ramp_minutes": "0",
"steady_state_tps": "1",
"steady_state_minutes": "1",
},
requirements={
Metric.latency_success_p99: [
Condition(lt(Decimal("200")), "value < 200"),
Condition(gte(Decimal("0")), "value >= 0"),
],
Metric.percent_fail: [
Condition(lt(Decimal("0.01")), "value < 0.01"),
Condition(gte(Decimal("0")), "value >= 0"),
],
},
)
run = Run(
id="test_run_tag",
start=datetime.fromisoformat("2021-04-01T00:00:00"),
end=datetime.fromisoformat("2021-04-01T01:00:00"),
results=[],
)
# GatlingResultManager will parse simulation.log and populate results
result_manager = GatlingResultManager(
results_path="examples/perfsize-results-root"
)
result_manager.query(config, run)
pprint(run.results)
| 33.09589 | 77 | 0.591474 | 1,716 | 0.710265 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.279387 |
c86bfc31df7a20be6ab83d39b12b217359bfd5df | 3,904 | py | Python | __main__.py | GbaCretin/dmf2mlm | 8a0d3d219aecb9aa14a66537e2deb02651bdfe7d | [
"MIT"
]
| 2 | 2021-06-13T15:55:55.000Z | 2021-09-14T08:21:53.000Z | __main__.py | GbaCretin/dmf2mlm | 8a0d3d219aecb9aa14a66537e2deb02651bdfe7d | [
"MIT"
]
| 6 | 2022-03-22T10:02:35.000Z | 2022-03-31T19:28:13.000Z | __main__.py | GbaCretin/dmf2mlm | 8a0d3d219aecb9aa14a66537e2deb02651bdfe7d | [
"MIT"
]
| null | null | null | from src import dmf,mzs,utils,sfx
from pathlib import Path
import argparse
def print_info(mlm_sdata):
if len(mlm_sdata.songs) <= 0: return
for i in range(len(mlm_sdata.songs[0].channels)):
channel = mlm_sdata.songs[0].channels[i]
print("\n================[ {0:01X} ]================".format(i))
if channel == None:
print("Empty")
continue
for event in channel.events:
print(event)
if isinstance(event, mzs.SongComJumpToSubEL):
sub_el = mlm_sdata.songs[0].sub_event_lists[i][event.sub_el_idx]
sub_el.print()
print("\t--------")
def print_df_info(mod, channels: [int]):
for ch in channels:
print("|####[${0:02X}]####".format(ch), end='')
print("|")
for i in range(mod.pattern_matrix.rows_in_pattern_matrix):
for ch in channels:
subel_idx = mod.pattern_matrix.matrix[ch][i]
print("|====(${0:02X})====".format(subel_idx), end='')
print("|")
for j in range(mod.pattern_matrix.rows_per_pattern):
for ch in channels:
pat_idx = mod.pattern_matrix.matrix[ch][i]
row = mod.patterns[ch][pat_idx].rows[j]
note_lbl = "--"
oct_lbl = "-"
vol_lbl = "--"
inst_lbl = "--"
fx0_lbl = "----"
if row.octave != None:
oct_lbl = str(row.octave)
if row.note == dmf.Note.NOTE_OFF:
note_lbl = "~~"
oct_lbl = "~"
elif row.note != None:
note_lbl = row.note.name.ljust(2, '-').replace('S', '#')
if row.volume != None:
vol_lbl = "{:02X}".format(row.volume)
if row.instrument != None:
inst_lbl = "{:02X}".format(row.instrument)
if len(row.effects) > 0:
fx0 = row.effects[0]
if fx0.code == dmf.EffectCode.EMPTY:
fx0_lbl = "--"
else:
fx0_lbl = "{:02X}".format(fx0.code.value)
if fx0.value == None:
fx0_lbl += "--"
else:
fx0_lbl += "{:02X}".format(fx0.value)
print("|{0}{1} {2}{3} {4}".format(note_lbl, oct_lbl, vol_lbl, inst_lbl, fx0_lbl), end='')
print("|")
parser = argparse.ArgumentParser(description='Convert DMF modules and SFX to an MLM driver compatible format')
parser.add_argument('dmf_module_paths', type=str, nargs='*', help="The paths to the input DMF files")
parser.add_argument('--sfx-directory', type=Path, help="Path to folder containing .raw files (Only absolute paths; Must be 18500Hz 16bit mono)")
parser.add_argument('--sfx-header', type=Path, help="Where to save the generated SFX c header (Only absolute paths)")
args = parser.parse_args()
dmf_modules = []
sfx_samples = None
if args.sfx_directory != None:
print("Parsing SFX... ", end='', flush=True)
sfx_samples = sfx.SFXSamples(args.sfx_directory)
print("OK")
if args.sfx_header != None:
print("Generating SFX Header... ", end='', flush=True)
c_header = sfx_samples.generate_c_header()
print("OK")
print(f"Saving SFX Header as '{args.sfx_header}'... ", end='', flush=True)
with open(args.sfx_header, "w") as file:
file.write(c_header)
print("OK")
for i in range(len(args.dmf_module_paths)):
with open(args.dmf_module_paths[i], "rb") as file:
print(f"Parsing '{args.dmf_module_paths[i]}'... ", end='', flush=True)
mod = dmf.Module(file.read())
print("OK")
print(f"Optimizing '{args.dmf_module_paths[i]}'... ", end='', flush=True)
mod.patch_for_mzs()
mod.optimize()
print("OK")
dmf_modules.append(mod)
mlm_sdata = mzs.SoundData()
print(f"Converting DMFs... ", end='', flush=True)
mlm_sdata.add_dmfs(dmf_modules)
print("OK")
if sfx_samples != None:
print(f"Converting SFX... ", end='', flush=True)
mlm_sdata.add_sfx(sfx_samples, False)
print("OK")
#print_df_info(dmf_modules[0], [0, 4, 7])
#print_info(mlm_sdata)
print(f"Compiling... ", end='', flush=True)
mlm_compiled_sdata = mlm_sdata.compile_sdata()
mlm_compiled_vrom = mlm_sdata.compile_vrom()
print("OK")
with open("m1_sdata.bin", "wb") as file:
file.write(mlm_compiled_sdata)
with open("vrom.bin", "wb") as file:
file.write(mlm_compiled_vrom) | 30.984127 | 144 | 0.649846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 911 | 0.23335 |
c06e2030a941664f4cc84a738c586a21db2c9695 | 1,169 | py | Python | python/8.Making-a-POST-Request.py | 17nikhil/codecademy | 58fbd652691c9df8139544965ebb0e9748142538 | [
"Apache-2.0"
]
| null | null | null | python/8.Making-a-POST-Request.py | 17nikhil/codecademy | 58fbd652691c9df8139544965ebb0e9748142538 | [
"Apache-2.0"
]
| null | null | null | python/8.Making-a-POST-Request.py | 17nikhil/codecademy | 58fbd652691c9df8139544965ebb0e9748142538 | [
"Apache-2.0"
]
| 1 | 2018-10-03T14:36:31.000Z | 2018-10-03T14:36:31.000Z | # Using the Requests library, you can make a POST request by using the requests.post() method. You aren't just GETting data with a POST - you can pass your own data into the request as well, like so:
#
# requests.post("http://placekitten.com/", data="myDataToPost")
# We're going to make the same request as the one shown on line 2 through line 5. Request header lines (line 3 and line 4) are usually created automatically, so we don't have to worry about them. The body of the request on line 5 is what we will need to add to our POST.
#
# Instructions
# We created the body of the request as a dictionary on line 9. Call requests.post() on the URL http://codecademy.com/learn-http/ and pass the argument data=body, as in the example above, to create the POST request; set this result equal to a new variable named response.
########## Example request #############
# POST /learn-http HTTP/1.1
# Host: www.codecademy.com
# Content-Type: text/html; charset=UTF-8
# Name=Eric&Age=26
import requests
body = {'Name': 'Eric', 'Age': '26'}
# Make the POST request here, passing body as the data:
response = requests.post('http://codecademy.com/learn-http/', data=body)
| 53.136364 | 271 | 0.726262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,081 | 0.924722 |
c06f05eaa2d985c3d75a5edbcfcca422b525cddf | 2,630 | py | Python | python/zephyr/models/__init__.py | r-pad/zephyr | c8f45e207c11bfc2b21df169db65a7df892d2848 | [
"MIT"
]
| 18 | 2021-05-27T04:40:38.000Z | 2022-02-08T19:46:31.000Z | python/zephyr/models/__init__.py | r-pad/zephyr | c8f45e207c11bfc2b21df169db65a7df892d2848 | [
"MIT"
]
| null | null | null | python/zephyr/models/__init__.py | r-pad/zephyr | c8f45e207c11bfc2b21df169db65a7df892d2848 | [
"MIT"
]
| 2 | 2021-11-07T12:42:00.000Z | 2022-03-01T12:51:54.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from functools import partial
from .linear import MLP, LogReg
from .pointnet import PointNet
from .pointnet2 import PointNet2SSG
from .pointnet3 import PointNet3SSG
from .dgcnn import DGCNN
# from .masked_conv import ConvolutionalPoseModel
from .point_mlp import PointMLP
from pytorch_lightning.core.lightning import LightningModule
def getModel(model_name, args, mode="train"):
if args.resume_path is None or mode == 'train':
if model_name == 'mlp':
model = MLP(args.dim_agg, args)
if model_name == "pmlp":
model = PointMLP(args.dim_point, args)
elif model_name[:2] == 'lg':
model = LogReg(args.dim_agg, args)
elif model_name == "pn":
model = PointNet(args.dim_point, args)
elif model_name == "pn2":
model = PointNet2SSG(args.dim_point, args, num_class=1)
elif model_name == "pn3":
model = PointNet3SSG(args.dim_point, args, num_class=1)
elif model_name == "dgcnn":
model = DGCNN(args.dim_point, args, num_class=1)
# elif model_name == "maskconv":
# model = ConvolutionalPoseModel(args)
else:
raise Exception("Unknown model name:", model_name)
else:
if model_name == 'mlp':
model = MLP.load_from_checkpoint(args.resume_path, args.dim_agg, args)
elif model_name == "pmlp":
model = PointMLP.load_from_checkpoint(args.resume_path, args.dim_point, args)
elif model_name[:2] == 'lg':
model = LogReg.load_from_checkpoint(args.resume_path, args.dim_agg, args)
elif model_name == "pn":
model = PointNet.load_from_checkpoint(args.resume_path, args.dim_point, args)
elif model_name == "pn2":
model = PointNet2SSG.load_from_checkpoint(args.resume_path, args.dim_point, args, num_class=1)
elif model_name == "pn3":
model = PointNet3SSG.load_from_checkpoint(args.resume_path, args.dim_point, args, num_class=1)
elif model_name == "dgcnn":
model = DGCNN.load_from_checkpoint(args.resume_path, args.dim_point, args, num_class=1)
# elif model_name == "maskconv":
# model = ConvolutionalPoseModel.load_from_checkpoint(args.resume_path, args)
else:
raise Exception("Unknown model name:", model_name)
if not args.pretrained_pnfeat is None:
model.loadPretrainedFeat(args.pretrained_pnfeat)
return model
| 43.114754 | 107 | 0.646388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.140304 |
c070571b2741261b60b7d7b775818838f5089fed | 1,938 | py | Python | golpy/main.py | dkuska/golpy | a7bc73693090c252fa5ac587fe0d6c77472d9e9c | [
"MIT"
]
| 1 | 2021-01-06T09:19:19.000Z | 2021-01-06T09:19:19.000Z | golpy/main.py | dkuska/golpy | a7bc73693090c252fa5ac587fe0d6c77472d9e9c | [
"MIT"
]
| null | null | null | golpy/main.py | dkuska/golpy | a7bc73693090c252fa5ac587fe0d6c77472d9e9c | [
"MIT"
]
| null | null | null | import golpy.controller.controller as controller
import golpy.view.view as view
import golpy.model.gamemodel as model
import golpy.eventmanager.eventmanager as eventm
import golpy.config as config
import log.log as log
import argparse
def pass_args():
""" Takes Argument from the command line and returns an ArgumentParser"""
parser = argparse.ArgumentParser(description="2D Cellular Automata Viewer supporting multiple formats")
parser.add_argument("-rule", "-r", type=str, default=config.default_rule,
help='String describing the used rule')
parser.add_argument("-mode", "-m", type=str, default=config.default_mode,
help="String describing Game Mode")
parser.add_argument("-size", "-s", type=int, default=config.default_size,
help="Integer describing size of the universe. I.e. -size 200 will correspond to a (200 x 200) cell universe")
parser.add_argument("-topology", "-t", type=str, default=config.default_topology,
help="String describing the topology of the universe. Default being Torus-shaped")
parser.add_argument("-speed", "-sp", type=int, default=config.default_speed,
help="Integer describing the maximum FPS possible for the animation")
parser.add_argument("-windowsize", "-w", type=int, default=config.default_window_size,
help="Integer describing the window size in pixels")
return parser.parse_args()
def run():
args = pass_args()
logger = log.Logger()
event_manager = eventm.EventManager()
game_model = model.GameModel(event_manager, rule_str=args.rule, field_size=(args.size, args.size))
game_view = view.View(event_manager, game_model, size=args.windowsize, tick_rate=args.speed)
game_controller = controller.Controller(event_manager, game_model)
game_model.run()
if __name__ == '__main__':
run()
| 46.142857 | 134 | 0.697626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.293602 |
c07059d11303ea7e0150379f0e66db3230da9433 | 2,899 | py | Python | tests/orca_unit_testing/test_series_str.py | jiajiaxu123/Orca | e86189e70c1d0387816bb98b8047a6232fbda9df | [
"Apache-2.0"
]
| 20 | 2019-12-02T11:49:12.000Z | 2021-12-24T19:34:32.000Z | tests/orca_unit_testing/test_series_str.py | jiajiaxu123/Orca | e86189e70c1d0387816bb98b8047a6232fbda9df | [
"Apache-2.0"
]
| null | null | null | tests/orca_unit_testing/test_series_str.py | jiajiaxu123/Orca | e86189e70c1d0387816bb98b8047a6232fbda9df | [
"Apache-2.0"
]
| 5 | 2019-12-02T12:16:22.000Z | 2021-10-22T02:27:47.000Z | import unittest
import orca
from setup.settings import *
from pandas.util.testing import *
class SeriesStrTest(unittest.TestCase):
def setUp(self):
self.PRECISION = 5
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
@property
def ps(self):
return pd.Series(['Foo', 'ss ', 'sW', 'qa'], name='x')
@property
def os(self):
return orca.Series(self.ps)
@property
def psa(self):
return pd.Series([10, 1, 19, np.nan], index=['a', 'b', 'c', 'd'])
@property
def psb(self):
return pd.Series([-1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
def test_series_str_count(self):
assert_series_equal(self.ps.str.count('a'), self.os.str.count("a").to_pandas(),check_dtype=False)
def test_series_str_startsWith(self):
assert_series_equal(self.ps.str.startswith('Fo'), self.os.str.startswith('Fo').to_pandas(), check_dtype=False)
def test_series_str_endswith(self):
assert_series_equal(self.ps.str.endswith('W'), self.os.str.endswith('W').to_pandas(), check_dtype=False)
def test_series_str_find(self):
assert_series_equal(self.ps.str.find('Fo'), self.os.str.find('Fo').to_pandas(), check_dtype=False)
def test_series_str_get(self):
assert_series_equal(self.ps.str.get(1), self.os.str.get(1).to_pandas(), check_dtype=False)
def test_series_str_just(self):
# TODO: pandas not cut the str when length is not enough
# assert_series_equal(self.ps.str.ljust(1), self.os.str.ljust(1).to_pandas(), check_dtype=False)
assert_series_equal(self.ps.str.ljust(10), self.os.str.ljust(10).to_pandas(), check_dtype=False)
assert_series_equal(self.ps.str.ljust(10,'A'), self.os.str.ljust(10,'A').to_pandas(), check_dtype=False)
assert_series_equal(self.ps.str.rjust(10), self.os.str.rjust(10).to_pandas(), check_dtype=False)
assert_series_equal(self.ps.str.rjust(10, 'A'), self.os.str.rjust(10, 'A').to_pandas(), check_dtype=False)
def test_series_str_is(self):
assert_series_equal(self.ps.str.isalnum(),self.os.str.isalnum().to_pandas())
assert_series_equal(self.ps.str.isalpha(), self.os.str.isalpha().to_pandas())
assert_series_equal(self.ps.str.isdigit(), self.os.str.isdigit().to_pandas())
assert_series_equal(self.ps.str.isspace(), self.os.str.isspace().to_pandas())
assert_series_equal(self.ps.str.islower(), self.os.str.islower().to_pandas())
assert_series_equal(self.ps.str.isupper(), self.os.str.isupper().to_pandas())
assert_series_equal(self.ps.str.istitle(), self.os.str.istitle().to_pandas())
assert_series_equal(self.ps.str.isnumeric(), self.os.str.isnumeric().to_pandas())
assert_series_equal(self.ps.str.isdecimal(), self.os.str.isdecimal().to_pandas())
| 43.924242 | 118 | 0.675405 | 2,805 | 0.967575 | 0 | 0 | 488 | 0.168334 | 0 | 0 | 283 | 0.09762 |
c0712a7efbb3bde035967d1fd7d0d7a42cb89f4b | 278 | py | Python | inputs/in4.py | mabbaszade/transportation-problem | 64ab4db7f836513c388073b5e2e9c64d7c439fde | [
"MIT"
]
| null | null | null | inputs/in4.py | mabbaszade/transportation-problem | 64ab4db7f836513c388073b5e2e9c64d7c439fde | [
"MIT"
]
| null | null | null | inputs/in4.py | mabbaszade/transportation-problem | 64ab4db7f836513c388073b5e2e9c64d7c439fde | [
"MIT"
]
| null | null | null | ITERATION_NUM = 10
MAX_POPULATION = 500
CROSSOVER_RATE = 1
MUTATION_RATE = 1
supplies = {
'S1': 20,
'S2': 15,
'S3': 40
}
demands = {
'D1': 20,
'D2': 30,
'D3': 25
}
cost = [[2, 3, 1],
[5, 4, 8],
[5, 6, 8]
]
| 12.636364 | 21 | 0.406475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.086331 |
c0712de2126b7958548be086996c8304d5bf2f45 | 89 | py | Python | Project/Connect/apps.py | AashishKhanal69/PeakyBlinders_ADC7_PartII | a4474c02be4ee8f8405b51df2f1d215e56ac192d | [
"bzip2-1.0.6"
]
| null | null | null | Project/Connect/apps.py | AashishKhanal69/PeakyBlinders_ADC7_PartII | a4474c02be4ee8f8405b51df2f1d215e56ac192d | [
"bzip2-1.0.6"
]
| null | null | null | Project/Connect/apps.py | AashishKhanal69/PeakyBlinders_ADC7_PartII | a4474c02be4ee8f8405b51df2f1d215e56ac192d | [
"bzip2-1.0.6"
]
| null | null | null | from django.apps import AppConfig
class ConnectConfig(AppConfig):
name = 'Connect'
| 14.833333 | 33 | 0.752809 | 52 | 0.58427 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.101124 |
c072ef7683ac75d9de6d4b4eb8e1ab74898e3920 | 914 | py | Python | spatial/edge.py | jbschwartz/spatial | 04dc619ae024ebb4f516cd6483f835421c7d84b1 | [
"MIT"
]
| 1 | 2022-01-02T22:03:09.000Z | 2022-01-02T22:03:09.000Z | spatial/edge.py | jbschwartz/spatial | 04dc619ae024ebb4f516cd6483f835421c7d84b1 | [
"MIT"
]
| null | null | null | spatial/edge.py | jbschwartz/spatial | 04dc619ae024ebb4f516cd6483f835421c7d84b1 | [
"MIT"
]
| null | null | null | from functools import cached_property
from .vector3 import Vector3
class Edge:
"""An edge created by two points."""
def __init__(self, start: Vector3, end: Vector3) -> None:
self.start = start
self.end = end
def __eq__(self, other: object) -> bool:
"""Return True if this edge is equal to the other."""
if isinstance(other, Edge):
if self.start == other.start and self.end == other.end:
return True
if self.start == other.end and self.end == other.start:
return True
return False
return NotImplemented
@cached_property
def length(self) -> float:
"""Return the length of the edge."""
return self.vector.length()
@cached_property
def vector(self) -> Vector3:
"""Return the edge's vector from start to end."""
return self.end - self.start
| 26.114286 | 67 | 0.591904 | 843 | 0.922319 | 0 | 0 | 272 | 0.297593 | 0 | 0 | 174 | 0.190372 |
c07358522633a4b5223edee437652e807e46cb27 | 1,054 | py | Python | timer.py | ryanleesmith/race-timer | 3a058e3689c9435751b06909d5b7a14db618d2da | [
"MIT"
]
| null | null | null | timer.py | ryanleesmith/race-timer | 3a058e3689c9435751b06909d5b7a14db618d2da | [
"MIT"
]
| null | null | null | timer.py | ryanleesmith/race-timer | 3a058e3689c9435751b06909d5b7a14db618d2da | [
"MIT"
]
| null | null | null | from gps import *
import math
import time
import json
import threading
gpsd = None
poller = None
class Poller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd
gpsd = gps(mode=WATCH_ENABLE|WATCH_NEWSTYLE)
self.current_value = None
self.running = True
def run(self):
global gpsd, poller
while poller.running:
gpsd.next()
def timer():
global gpsd, poller
poller = Poller()
try:
poller.start()
while True:
speed = gpsd.fix.speed
if math.isnan(speed):
speed = 0
#print(speed)
#print(gpsd.fix.mode)
#print(gpsd.satellites)
dump = json.dumps({'x': int(round(time.time() * 1000)), 'y': speed})
yield 'event: SPEED\ndata: {}\n\n'.format(dump)
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
print("\nKilling Thread...")
poller.running = False
poller.join()
| 23.422222 | 80 | 0.555028 | 335 | 0.317837 | 617 | 0.585389 | 0 | 0 | 0 | 0 | 112 | 0.106262 |
c073994563fd9d56aecce3609828c1cbf0a8170a | 31,133 | py | Python | vkwave/bots/addons/easy/easy_handlers.py | amishakov/vkwave | 377d470fc4b84b64516fffcabc6d682bf86b5d7f | [
"MIT"
]
| null | null | null | vkwave/bots/addons/easy/easy_handlers.py | amishakov/vkwave | 377d470fc4b84b64516fffcabc6d682bf86b5d7f | [
"MIT"
]
| null | null | null | vkwave/bots/addons/easy/easy_handlers.py | amishakov/vkwave | 377d470fc4b84b64516fffcabc6d682bf86b5d7f | [
"MIT"
]
| null | null | null | import json
import random
import warnings
from typing import Any, Callable, Dict, List, Union, Type, Optional, NoReturn
from pydantic import PrivateAttr
from vkwave.bots import BotEvent, BotType, EventTypeFilter, UserEvent
from vkwave.bots.core import BaseFilter
from vkwave.bots.core.dispatching.filters.builtin import get_payload, get_text
from vkwave.bots.core.dispatching.handler.callback import BaseCallback
from vkwave.bots.core.dispatching.handler.cast import caster as callback_caster
from vkwave.bots.core.types.json_types import JSONEncoder
from vkwave.types.bot_events import BotEventType
from vkwave.types.objects import (
BaseBoolInt,
MessagesMessageAttachment,
MessagesMessageAttachmentType,
UsersUser,
)
from vkwave.types.responses import BaseOkResponse, MessagesEditResponse, MessagesSendResponse
from vkwave.types.user_events import EventId
try:
import aiofile
except ImportError:
aiofile = None
class SimpleUserEvent(UserEvent):
def __init__(self, event: UserEvent):
super().__init__(event.object, event.api_ctx)
self.user_data = event.user_data
def __setitem__(self, key: Any, item: Any) -> None:
self.user_data[key] = item
def __getitem__(self, key: Any) -> Any:
return self.user_data[key]
@property
def text(self) -> str:
return get_text(self)
@property
def peer_id(self) -> int:
return self.object.object.peer_id
@property
def from_id(self) -> int:
return self.object.object.message_data.from_id
@property
def user_id(self) -> int:
return self.from_id if self.peer_id > 2e9 else self.peer_id
async def get_user(
self, raw_mode: bool = False, **kwargs
) -> Union["UsersUser", dict]: # getting information about the sender
raw_user = (
await self.api_ctx.api_request("users.get", {"user_ids": self.user_id, **kwargs})
)["response"][0]
return raw_user if raw_mode else UsersUser(**raw_user)
async def answer(
self,
message: Optional[str] = None,
domain: Optional[str] = None,
lat: Optional[int] = None,
long: Optional[int] = None,
attachment: Optional[str] = None,
reply_to: Optional[int] = None,
forward_messages: Optional[List[int]] = None,
forward: Optional[str] = None,
sticker_id: Optional[int] = None,
group_id: Optional[int] = None,
keyboard: Optional[str] = None,
template: Optional[str] = None,
payload: Optional[str] = None,
content_source: Optional[str] = None,
dont_parse_links: Optional[bool] = None,
disable_mentions: Optional[bool] = None,
intent: Optional[str] = None,
subscribe_id: Optional[int] = None,
expire_ttl: Optional[int] = None,
silent: Optional[bool] = None,
) -> MessagesSendResponse:
return await self.api_ctx.messages.send(
message=message,
forward=forward,
template=template,
content_source=content_source,
intent=intent,
subscribe_id=subscribe_id,
expire_ttl=expire_ttl,
silent=silent,
domain=domain,
lat=lat,
long=long,
attachment=attachment,
reply_to=reply_to,
forward_messages=forward_messages,
sticker_id=sticker_id,
group_id=group_id,
keyboard=keyboard,
payload=payload,
dont_parse_links=dont_parse_links,
disable_mentions=disable_mentions,
peer_id=self.object.object.peer_id,
random_id=random.randint(-2147483648, 2147483647),
)
async def long_answer(
self,
message: str,
domain: Optional[str] = None,
lat: Optional[int] = None,
long: Optional[int] = None,
attachment: Optional[str] = None,
reply_to: Optional[int] = None,
forward_messages: Optional[List[int]] = None,
forward: Optional[str] = None,
sticker_id: Optional[int] = None,
group_id: Optional[int] = None,
keyboard: Optional[str] = None,
template: Optional[str] = None,
payload: Optional[str] = None,
content_source: Optional[str] = None,
dont_parse_links: Optional[bool] = None,
disable_mentions: Optional[bool] = None,
intent: Optional[str] = None,
subscribe_id: Optional[int] = None,
expire_ttl: Optional[int] = None,
silent: Optional[bool] = None,
) -> List[MessagesSendResponse]:
"""
Shortcut for sending message > 4096 lenght
:return: Message IDs
"""
message_ids: List[MessagesSendResponse] = []
for x in range(0, len(message), 4096):
message_id = await self.answer(
message=message[x:x+4096],
forward=forward,
template=template,
content_source=content_source,
intent=intent,
subscribe_id=subscribe_id,
expire_ttl=expire_ttl,
silent=silent,
domain=domain,
lat=lat,
long=long,
attachment=attachment,
reply_to=reply_to,
forward_messages=forward_messages,
sticker_id=sticker_id,
group_id=group_id,
keyboard=keyboard,
payload=payload,
dont_parse_links=dont_parse_links,
disable_mentions=disable_mentions,
)
message_ids.append(message_id)
return message_ids
async def reply(
self,
message: Optional[str] = None,
domain: Optional[str] = None,
lat: Optional[int] = None,
long: Optional[int] = None,
attachment: Optional[str] = None,
forward_messages: Optional[List[int]] = None,
forward: Optional[str] = None,
sticker_id: Optional[int] = None,
group_id: Optional[int] = None,
keyboard: Optional[str] = None,
template: Optional[str] = None,
payload: Optional[str] = None,
content_source: Optional[str] = None,
dont_parse_links: Optional[bool] = None,
disable_mentions: Optional[bool] = None,
intent: Optional[str] = None,
subscribe_id: Optional[int] = None,
expire_ttl: Optional[int] = None,
silent: Optional[bool] = None,
) -> MessagesSendResponse:
return await self.api_ctx.messages.send(
message=message,
forward=forward,
template=template,
content_source=content_source,
intent=intent,
subscribe_id=subscribe_id,
expire_ttl=expire_ttl,
silent=silent,
domain=domain,
lat=lat,
long=long,
attachment=attachment,
reply_to=self.object.object.message_id,
forward_messages=forward_messages,
sticker_id=sticker_id,
group_id=group_id,
keyboard=keyboard,
payload=payload,
dont_parse_links=dont_parse_links,
disable_mentions=disable_mentions,
peer_id=self.object.object.peer_id,
random_id=random.randint(-2147483648, 2147483647),
)
async def edit(
self,
message: Optional[str] = None,
return_raw_response: bool = False,
lat: Optional[int] = None,
long: Optional[int] = None,
attachment: Optional[str] = None,
keep_forward_messages: Optional[BaseBoolInt] = None,
keep_snippets: Optional[BaseBoolInt] = None,
group_id: Optional[int] = None,
dont_parse_links: Optional[bool] = None,
message_id: Optional[int] = None,
conversation_message_id: Optional[int] = None,
template: Optional[str] = None,
keyboard: Optional[str] = None,
) -> MessagesEditResponse:
return await self.api_ctx.messages.edit(
message=message,
peer_id=self.object.object.peer_id,
return_raw_response=return_raw_response,
lat=lat,
long=long,
attachment=attachment,
keep_forward_messages=keep_forward_messages,
keep_snippets=keep_snippets,
group_id=group_id,
dont_parse_links=dont_parse_links,
message_id=message_id or self.object.object.message_id,
conversation_message_id=conversation_message_id,
template=template,
keyboard=keyboard,
)
async def set_activity(
self,
type: Optional[str] = None,
user_id: Optional[int] = None,
group_id: Optional[int] = None,
) -> MessagesSendResponse:
"""
type:
typing — пользователь начал набирать текст,
audiomessage — пользователь записывает голосовое сообщение
"""
return await self.api_ctx.messages.set_activity(
user_id=user_id,
type=type,
peer_id=self.object.object.peer_id,
group_id=group_id,
)
def _check_event_type(event_type: str):
if event_type not in (
BotEventType.MESSAGE_NEW,
BotEventType.MESSAGE_EDIT,
BotEventType.MESSAGE_REPLY,
BotEventType.MESSAGE_TYPING_STATE,
BotEventType.MESSAGE_ALLOW,
):
raise RuntimeError("You cant use event.answer() with this event")
class SimpleAttachment(MessagesMessageAttachment):
_event: "SimpleBotEvent" = PrivateAttr()
_data: Optional[bytes] = PrivateAttr()
_allowed_types: List[MessagesMessageAttachmentType] = PrivateAttr()
_url_types: Dict[MessagesMessageAttachmentType, Callable] = PrivateAttr()
def __init__(self, attachment: MessagesMessageAttachment, event: "SimpleBotEvent"):
super().__init__(**attachment.dict())
self._event = event
self._data = None
self._allowed_types = [
MessagesMessageAttachmentType.AUDIO_MESSAGE,
MessagesMessageAttachmentType.DOC,
MessagesMessageAttachmentType.AUDIO,
MessagesMessageAttachmentType.PHOTO,
MessagesMessageAttachmentType.GRAFFITI,
]
self._url_types = {
MessagesMessageAttachmentType.PHOTO: lambda _attachment: _attachment.photo.sizes[
-1
].url,
MessagesMessageAttachmentType.AUDIO_MESSAGE: lambda _attachment: _attachment.audio_message.link_ogg,
MessagesMessageAttachmentType.DOC: lambda _attachment: _attachment.doc.url,
MessagesMessageAttachmentType.AUDIO: lambda _attachment: _attachment.audio.url,
MessagesMessageAttachmentType.GRAFFITI: lambda _attachment: _attachment.graffiti.url,
}
@property
def url(self) -> str:
return self._url_types[self.type](self)
async def download(self) -> Union[NoReturn, bytes]:
if self._data is not None:
return self._data
if self.type not in self._allowed_types:
raise RuntimeError("cannot download this attachment type")
url = self.url
client, token = await self._event.api_ctx.api_options.get_client_and_token()
data = await client.http_client.request_data(method="GET", url=url)
self._data = data
return data
async def save(self, path: str):
attach_data = self._data
if attach_data is None:
attach_data = await self.download()
if aiofile is None:
warnings.warn("aiofile is not installed, saving synchronously")
with open(path, "wb") as f:
f.write(attach_data)
return
async with aiofile.async_open(path, "wb") as afp:
await afp.write(attach_data)
class Attachments(list):
def __init__(self, event: "SimpleBotEvent"):
super().__init__(
[
SimpleAttachment(attachment, event=event)
for attachment in event.object.object.message.attachments
]
)
class SimpleBotEvent(BotEvent):
"""Базовый класс события."""
def __init__(self, event: BotEvent):
super().__init__(event.object, event.api_ctx)
self.user_data = event.user_data
self._attachments: Optional[Attachments] = None
self._payload: Optional[dict] = None
def __setitem__(self, key: Any, item: Any) -> None:
self.user_data[key] = item
def __getitem__(self, key: Any) -> Any:
return self.user_data[key]
@property
def text(self) -> str:
"""Получает текст сообщения
Returns:
str: Текст
"""
return get_text(self)
@property
def peer_id(self) -> int:
"""Получает идентификатор чата
Returns:
int: идентификатор чата
"""
if self.object.type == BotEventType.MESSAGE_EVENT.value:
return self.object.object.peer_id
return self.object.object.message.peer_id
@property
def from_id(self) -> int:
"""Получает идентификатор отправителя
Returns:
int: идентификатор отправителя
"""
if self.object.type == BotEventType.MESSAGE_EVENT.value:
return self.object.object.user_id
return self.object.object.message.from_id
@property
def payload(self) -> Optional[dict]:
"""Получает payload события
Returns:
int: payload события
"""
current_payload = get_payload(self)
if current_payload is None:
return current_payload
if self._payload is None:
self._payload = (
json.loads(current_payload)
if not isinstance(current_payload, dict)
else current_payload
)
return self._payload
@property
def attachments(self) -> Optional[List[SimpleAttachment]]:
"""Получает список вложений
Returns:
Optional[List[SimpleAttachment]]: список вложений
"""
if self.object.object.message.attachments is None:
return None
if self._attachments is None:
self._attachments = Attachments(event=self)
return self._attachments
@property
def user_id(self) -> int:
"""Шорткат для выбора from_id или peer_id
Returns:
int: идентификатор пользователя
"""
return self.from_id if self.peer_id > 2e9 else self.peer_id
async def get_user(self, raw_mode: bool = False, **kwargs) -> Union["UsersUser", dict]:
"""Получение объекта пользователя
Returns:
Union["UsersUser", dict]: Объект пользователя
"""
raw_user = (
await self.api_ctx.api_request("users.get", {"user_ids": self.user_id, **kwargs})
)["response"][0]
return raw_user if raw_mode else UsersUser(**raw_user)
async def edit(
self,
message: Optional[str] = None,
lat: Optional[int] = None,
long: Optional[int] = None,
attachment: Optional[str] = None,
keep_forward_messages: Optional[BaseBoolInt] = None,
keep_snippets: Optional[BaseBoolInt] = None,
group_id: Optional[int] = None,
dont_parse_links: Optional[bool] = None,
disable_mentions: Optional[bool] = None,
message_id: Optional[int] = None,
conversation_message_id: Optional[int] = None,
template: Optional[str] = None,
keyboard: Optional[str] = None,
) -> MessagesEditResponse:
"""Шорткат для редактирования своего сообщения.
Args:
message (Optional[str]): Текст.
lat (Optional[int]): Широта.
long (Optional[int]): Долгота.
attachment (Optional[str]): Вложения (строка с идентификаторами, разделёнными запятой).
keep_forward_messages (Optional[BaseBoolInt]): — сохранить прикрепленные пересланные сообщения.
keep_snippets (Optional[BaseBoolInt]): 1 — сохранить прикрепленные внешние ссылки (сниппеты).
group_id (Optional[int]): Идентификатор группы.
dont_parse_links (Optional[bool]): 1 — не создавать сниппет ссылки из сообщения.
disable_mentions (Optional[bool]): 1 — отключить уведомление об упоминании в сообщении.
message_id (Optional[int]): Идентификатор сообщения.
conversation_message_id (Optional[int]): Идентификатор сообщения в беседе.
template (Optional[str]): Шаблон.
keyboard (Optional[str]): Клавиатура.
Returns:
MessagesEditResponse: Ответ сервера
"""
_check_event_type(self.object.type)
return await self.api_ctx.messages.edit(
peer_id=self.object.object.message.peer_id,
message=message,
lat=lat,
long=long,
attachment=attachment,
keep_forward_messages=keep_forward_messages,
keep_snippets=keep_snippets,
group_id=group_id,
dont_parse_links=dont_parse_links,
disable_mentions=disable_mentions,
message_id=message_id,
conversation_message_id=conversation_message_id,
template=template,
keyboard=keyboard
)
async def reply(
self,
message: Optional[str] = None,
domain: Optional[str] = None,
lat: Optional[int] = None,
long: Optional[int] = None,
attachment: Optional[str] = None,
sticker_id: Optional[int] = None,
group_id: Optional[int] = None,
keyboard: Optional[str] = None,
template: Optional[str] = None,
payload: Optional[str] = None,
content_source: Optional[str] = None,
dont_parse_links: Optional[bool] = None,
disable_mentions: Optional[bool] = None,
intent: Optional[str] = None,
subscribe_id: Optional[int] = None,
expire_ttl: Optional[int] = None,
silent: Optional[bool] = None,
json_serialize: JSONEncoder = json.dumps,
) -> MessagesSendResponse:
"""Шорткат для отправки ответа на сообщение пользователю, от которого пришло событие
Args:
message (Optional[str]): Текст.
domain (Optional[str]): Короткая ссылка пользователя.
lat (Optional[int]): Широта.
long (Optional[int]): Долгота.
attachment (Optional[str]): Вложения (строка с идентификаторами, разделёнными запятой).
sticker_id (Optional[int]): Идентификатор прикрепляемого стикера.
group_id (Optional[int]): Идентификатор группы.
keyboard (Optional[str]): Клавиатура.
template (Optional[str]): Шаблон (карусель, например).
payload (Optional[str]): Payload.
content_source (Optional[str]): Источник [пользовательского контента](https://vk.com/dev/bots_docs_2?f=3.3.+%D0%A1%D0%BE%D0%BE%D0%B1%D1%89%D0%B5%D0%BD%D0%B8%D1%8F+%D1%81+%D0%BF%D0%BE%D0%BB%D1%8C%D0%B7%D0%BE%D0%B2%D0%B0%D1%82%D0%B5%D0%BB%D1%8C%D1%81%D0%BA%D0%B8%D0%BC+%D0%BA%D0%BE%D0%BD%D1%82%D0%B5%D0%BD%D1%82%D0%BE%D0%BC).
dont_parse_links (Optional[bool]): 1 — не создавать сниппет ссылки из сообщения.
disable_mentions (Optional[bool]): 1 — отключить создание упоминаний.
intent (Optional[str]): Строка, описывающая [интенты](https://vk.com/dev/bots_docs_4?f=7.+%D0%98%D0%BD%D1%82%D0%B5%D0%BD%D1%82%D1%8B).
subscribe_id (Optional[int]): число, которое в будущем будет предназначено для работы с интентами.
expire_ttl (Optional[int]): ???.
silent (Optional[bool]): ???.
json_serialize (JSONEncoder): сериализация.
Returns:
MessagesSendResponse - Ответ сервера
"""
_check_event_type(self.object.type)
forward = {
"is_reply": 1,
"conversation_message_ids": self.object.object.message.conversation_message_id,
"peer_id": self.object.object.message.peer_id,
}
return await self.api_ctx.messages.send(
forward=json_serialize(forward),
intent=intent,
subscribe_id=subscribe_id,
expire_ttl=expire_ttl,
silent=silent,
domain=domain,
lat=lat,
long=long,
attachment=attachment,
sticker_id=sticker_id,
group_id=group_id,
keyboard=keyboard,
payload=payload,
dont_parse_links=dont_parse_links,
disable_mentions=disable_mentions,
peer_id=self.object.object.message.peer_id,
message=message,
random_id=0,
template=template,
content_source=content_source,
)
async def answer(
self,
message: Optional[str] = None,
domain: Optional[str] = None,
lat: Optional[int] = None,
long: Optional[int] = None,
attachment: Optional[str] = None,
reply_to: Optional[int] = None,
forward_messages: Optional[List[int]] = None,
forward: Optional[str] = None,
sticker_id: Optional[int] = None,
group_id: Optional[int] = None,
keyboard: Optional[str] = None,
template: Optional[str] = None,
payload: Optional[str] = None,
content_source: Optional[str] = None,
dont_parse_links: Optional[bool] = None,
disable_mentions: Optional[bool] = None,
intent: Optional[str] = None,
subscribe_id: Optional[int] = None,
expire_ttl: Optional[int] = None,
silent: Optional[bool] = None,
) -> MessagesSendResponse:
"""Шорткат для отправки сообщения пользователю, от которого пришло событие.
Args:
message (Optional[str]): Текст.
domain (Optional[str]): Короткая ссылка пользователя.
lat (Optional[int]): Широта.
long (Optional[int]): Долгота.
attachment (Optional[str]): Вложения (строка с идентификаторами, разделёнными запятой).
reply_to (Optional[int]): Идентификатор сообщения, на которое нужно ответить.
forward_messages (Optional[List[int]]): Идентификаторы пересылаемых сообщений.
forward (Optional[str]): JSON-объект (подробнее в [документации ВК](https://vk.com/dev/messages.send)).
sticker_id (Optional[int]): Идентификатор прикрепляемого стикера.
group_id (Optional[int]): Идентификатор группы.
keyboard (Optional[str]): Клавиатура.
template (Optional[str]): Шаблон (карусель, например).
payload (Optional[str]): Payload.
content_source (Optional[str]): Источник [пользовательского контента](https://vk.com/dev/bots_docs_2?f=3.3.+%D0%A1%D0%BE%D0%BE%D0%B1%D1%89%D0%B5%D0%BD%D0%B8%D1%8F+%D1%81+%D0%BF%D0%BE%D0%BB%D1%8C%D0%B7%D0%BE%D0%B2%D0%B0%D1%82%D0%B5%D0%BB%D1%8C%D1%81%D0%BA%D0%B8%D0%BC+%D0%BA%D0%BE%D0%BD%D1%82%D0%B5%D0%BD%D1%82%D0%BE%D0%BC).
dont_parse_links (Optional[bool]): 1 — не создавать сниппет ссылки из сообщения.
disable_mentions (Optional[bool]): 1 — отключить создание упоминаний.
intent (Optional[str]): Строка, описывающая [интенты](https://vk.com/dev/bots_docs_4?f=7.+%D0%98%D0%BD%D1%82%D0%B5%D0%BD%D1%82%D1%8B).
subscribe_id (Optional[int]): число, которое в будущем будет предназначено для работы с интентами.
expire_ttl (Optional[int]): ???.
silent (Optional[bool]): ???.
:return: Message ID
"""
_check_event_type(self.object.type)
return await self.api_ctx.messages.send(
forward=forward,
intent=intent,
subscribe_id=subscribe_id,
expire_ttl=expire_ttl,
silent=silent,
domain=domain,
lat=lat,
long=long,
attachment=attachment,
reply_to=reply_to,
forward_messages=forward_messages,
sticker_id=sticker_id,
group_id=group_id,
keyboard=keyboard,
payload=payload,
dont_parse_links=dont_parse_links,
disable_mentions=disable_mentions,
peer_id=self.object.object.message.peer_id,
message=message,
random_id=0,
template=template,
content_source=content_source,
)
async def long_answer(
self,
message: Optional[str] = None,
domain: Optional[str] = None,
lat: Optional[int] = None,
long: Optional[int] = None,
attachment: Optional[str] = None,
reply_to: Optional[int] = None,
forward_messages: Optional[List[int]] = None,
forward: Optional[str] = None,
sticker_id: Optional[int] = None,
group_id: Optional[int] = None,
keyboard: Optional[str] = None,
template: Optional[str] = None,
payload: Optional[str] = None,
content_source: Optional[str] = None,
dont_parse_links: Optional[bool] = None,
disable_mentions: Optional[bool] = None,
intent: Optional[str] = None,
subscribe_id: Optional[int] = None,
expire_ttl: Optional[int] = None,
silent: Optional[bool] = None,
) -> List[MessagesSendResponse]:
"""
Shortcut for sending messages > 4096 length
:return: Message IDs
"""
_check_event_type(self.object.type)
message_ids: List[MessagesSendResponse] = []
for x in range(0, len(message), 4096):
message_id = await self.answer(
forward=forward,
intent=intent,
subscribe_id=subscribe_id,
expire_ttl=expire_ttl,
silent=silent,
domain=domain,
lat=lat,
long=long,
attachment=attachment,
reply_to=reply_to,
forward_messages=forward_messages,
sticker_id=sticker_id,
group_id=group_id,
keyboard=keyboard,
payload=payload,
dont_parse_links=dont_parse_links,
disable_mentions=disable_mentions,
message=message[x:x+4096],
template=template,
content_source=content_source,
)
message_ids.append(message_id)
return message_ids
async def set_activity(
self,
type: Optional[str] = None,
user_id: Optional[int] = None,
group_id: Optional[int] = None,
) -> MessagesSendResponse:
"""Изменение статуса активности
Args:
type (Optional[str], optional): Тип активности. (`typing` — пользователь начал набирать текст, `audiomessage` — пользователь записывает голосовое сообщение)
user_id (Optional[int], optional): Идентификатор пользователя-получателя.
group_id (Optional[int], optional): Идентификатор группы.
Returns:
MessagesSendResponse: Результат запроса.
"""
_check_event_type(self.object.type)
return await self.api_ctx.messages.set_activity(
user_id=user_id,
type=type,
peer_id=self.object.object.message.peer_id,
group_id=group_id,
)
async def callback_answer(self, event_data: str) -> BaseOkResponse:
"""Ответ на нажатие callback кнопки.
Args:
event_data (Dict[str, str]): [описание данных](https://vk.com/dev/bots_docs_5?f=4.4.%2BCallback-%D0%BA%D0%BD%D0%BE%D0%BF%D0%BA%D0%B8) для ответа на callback
Raises:
RuntimeError: Если вызван, когда событие не MessageEvent типа.
Returns:
BaseOkResponse: Результат запроса
"""
if self.object.type != BotEventType.MESSAGE_EVENT:
raise RuntimeError("You cant use event.callback_answer() with this event")
return await self.api_ctx.messages.send_message_event_answer(
user_id=self.object.object.user_id,
peer_id=self.object.object.peer_id,
event_id=self.object.object.event_id,
event_data=event_data,
)
class SimpleBotCallback(BaseCallback):
def __init__(
self,
func: Any,
bot_type: BotType,
event_type: Type[Union[SimpleUserEvent, SimpleBotEvent]]
):
self.bot_type = bot_type
self.func = callback_caster.cast(func)
self.event_type = event_type
async def execute(self, event: Union[UserEvent, BotEvent]) -> Any:
if self.bot_type is BotType.BOT:
new_event = self.event_type(event)
else:
new_event = self.event_type(event)
return await self.func.execute(new_event)
def __repr__(self):
return f"<SimpleBotCallback {self.func.__name__} bot_type={self.bot_type}>"
def simple_bot_handler(router, event: Optional[Type[SimpleBotEvent]] = None, *filters: BaseFilter):
"""
Handler for all bot events
"""
def decorator(func: Callable[..., Any]):
record = router.registrar.new()
record.with_filters(*filters)
record.handle(SimpleBotCallback(func, BotType.BOT, event or SimpleBotEvent))
router.registrar.register(record.ready())
return func
return decorator
def simple_user_handler(router, *filters: BaseFilter, event: Optional[Type[SimpleUserEvent]] = None):
"""
Handler for all user events
"""
def decorator(func: Callable[..., Any]):
record = router.registrar.new()
record.with_filters(*filters)
record.handle(SimpleBotCallback(func, BotType.USER, event or SimpleUserEvent))
router.registrar.register(record.ready())
return func
return decorator
def simple_bot_message_handler(router, *filters: BaseFilter, event: Optional[Type[SimpleBotEvent]] = None):
"""
Handler only for message events
"""
def decorator(func: Callable[..., Any]):
record = router.registrar.new()
record.with_filters(*filters)
record.filters.append(EventTypeFilter(BotEventType.MESSAGE_NEW))
record.handle(SimpleBotCallback(func, BotType.BOT, event or SimpleBotEvent))
router.registrar.register(record.ready())
return func
return decorator
def simple_user_message_handler(router, *filters: BaseFilter, event: Optional[Type[SimpleUserEvent]] = None):
"""
Handler only for message events
"""
def decorator(func: Callable[..., Any]):
record = router.registrar.new()
record.with_filters(*filters)
record.filters.append(EventTypeFilter(EventId.MESSAGE_EVENT.value))
record.handle(SimpleBotCallback(func, BotType.USER, event or SimpleUserEvent))
router.registrar.register(record.ready())
return func
return decorator
| 37.195938 | 335 | 0.615039 | 29,848 | 0.901181 | 0 | 0 | 2,587 | 0.078108 | 24,244 | 0.731983 | 9,269 | 0.279853 |
c074c692de4483f97d3f233f58a66ad3a9239b2d | 1,337 | py | Python | src/scs_core/osio/data/message_body.py | seoss/scs_core | 0d4323c5697a39eb44a887f179ba5dca3716c1d2 | [
"MIT"
]
| 3 | 2019-03-12T01:59:58.000Z | 2020-09-12T07:27:42.000Z | src/scs_core/osio/data/message_body.py | seoss/scs_core | 0d4323c5697a39eb44a887f179ba5dca3716c1d2 | [
"MIT"
]
| 1 | 2018-04-20T07:58:38.000Z | 2021-03-27T08:52:45.000Z | src/scs_core/osio/data/message_body.py | seoss/scs_core | 0d4323c5697a39eb44a887f179ba5dca3716c1d2 | [
"MIT"
]
| 4 | 2017-09-29T13:08:43.000Z | 2019-10-09T09:13:58.000Z | """
Created on 7 Nov 2016
@author: Bruno Beloff ([email protected])
example:
25 June 2016 17:44:28 BST: {"datum":{"conc":92,"dens":184},"measured-at":"2016-06-25T17:41:01+01:00"}
"""
from collections import OrderedDict
from scs_core.data.json import JSONable
# --------------------------------------------------------------------------------------------------------------------
class MessageBody(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, data):
"""
Constructor
"""
self.__data = data # string
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['data'] = self.data
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def data(self):
return self.__data
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "MessageBody:{data:%s}" % self.data
| 25.711538 | 118 | 0.324607 | 934 | 0.698579 | 0 | 0 | 56 | 0.041885 | 0 | 0 | 872 | 0.652206 |
c078fecfd19302ee3b513baaaa01bf856eb712e7 | 24,154 | py | Python | pysnmp/CISCO-FC-PM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
]
| 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/CISCO-FC-PM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
]
| 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/CISCO-FC-PM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
]
| 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCO-FC-PM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-FC-PM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:40:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
PerfIntervalCount, PerfCurrentCount, PerfTotalCount = mibBuilder.importSymbols("PerfHist-TC-MIB", "PerfIntervalCount", "PerfCurrentCount", "PerfTotalCount")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
iso, Bits, ModuleIdentity, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Integer32, NotificationType, Counter32, Gauge32, IpAddress, Unsigned32, Counter64, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Bits", "ModuleIdentity", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Integer32", "NotificationType", "Counter32", "Gauge32", "IpAddress", "Unsigned32", "Counter64", "TimeTicks")
TextualConvention, TruthValue, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "DisplayString")
ciscoFcPmMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 99997))
ciscoFcPmMIB.setRevisions(('2005-02-06 00:00',))
if mibBuilder.loadTexts: ciscoFcPmMIB.setLastUpdated('200502060000Z')
if mibBuilder.loadTexts: ciscoFcPmMIB.setOrganization('Cisco Systems, Inc.')
ciscoFcPmMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 99997, 0))
ciscoFcPmMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1))
ciscoFcPmMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 99997, 2))
cfcpmPortPerfStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 1))
cfcpmPortErrorStatusBlock = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2))
cfcpmPortPerfStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 1, 1), )
if mibBuilder.loadTexts: cfcpmPortPerfStatusTable.setStatus('current')
cfcpmPortPerfStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cfcpmPortPerfStatusEntry.setStatus('current')
cfcpmTimeElapsed = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 899))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmTimeElapsed.setStatus('current')
cfcpmValidIntervals = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmValidIntervals.setStatus('current')
cfcpmInvalidIntervals = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmInvalidIntervals.setStatus('current')
cfcpmTotalPortErrorTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1), )
if mibBuilder.loadTexts: cfcpmTotalPortErrorTable.setStatus('current')
cfcpmTotalPortErrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cfcpmTotalPortErrorEntry.setStatus('current')
cfcpmtPortRxLinkResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 1), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortRxLinkResets.setStatus('current')
cfcpmtPortTxLinkResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 2), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortTxLinkResets.setStatus('current')
cfcpmtPortLinkResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 3), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortLinkResets.setStatus('current')
cfcpmtPortRxOfflineSequences = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 4), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortRxOfflineSequences.setStatus('current')
cfcpmtPortTxOfflineSequences = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 5), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortTxOfflineSequences.setStatus('current')
cfcpmtPortLinkFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 6), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortLinkFailures.setStatus('current')
cfcpmtPortSynchLosses = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 7), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortSynchLosses.setStatus('current')
cfcpmtPortSignalLosses = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 8), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortSignalLosses.setStatus('current')
cfcpmtPortPrimSeqProtocolErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 9), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortPrimSeqProtocolErrors.setStatus('current')
cfcpmtPortInvalidTxWords = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 10), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortInvalidTxWords.setStatus('current')
cfcpmtPortInvalidCRCs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 11), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortInvalidCRCs.setStatus('current')
cfcpmtPortInvalidOrderedSets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 12), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortInvalidOrderedSets.setStatus('current')
cfcpmtPortFramesTooLong = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 13), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortFramesTooLong.setStatus('current')
cfcpmtPortTruncatedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 14), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortTruncatedFrames.setStatus('current')
cfcpmtPortAddressErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 15), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortAddressErrors.setStatus('current')
cfcpmtPortDelimiterErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 16), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortDelimiterErrors.setStatus('current')
cfcpmtPortEncDisparityErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 17), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortEncDisparityErrors.setStatus('current')
cfcpmtPortOtherErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 1, 1, 18), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmtPortOtherErrors.setStatus('current')
cfcpmCurrentPortErrorTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2), )
if mibBuilder.loadTexts: cfcpmCurrentPortErrorTable.setStatus('current')
cfcpmCurrentPortErrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cfcpmCurrentPortErrorEntry.setStatus('current')
cfcpmcPortRxLinkResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 1), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortRxLinkResets.setStatus('current')
cfcpmcPortTxLinkResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 2), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortTxLinkResets.setStatus('current')
cfcpmcPortLinkResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 3), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortLinkResets.setStatus('current')
cfcpmcPortRxOfflineSequences = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 4), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortRxOfflineSequences.setStatus('current')
cfcpmcPortTxOfflineSequences = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 5), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortTxOfflineSequences.setStatus('current')
cfcpmcPortLinkFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 6), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortLinkFailures.setStatus('current')
cfcpmcPortSynchLosses = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 7), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortSynchLosses.setStatus('current')
cfcpmcPortSignalLosses = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 8), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortSignalLosses.setStatus('current')
cfcpmcPortPrimSeqProtocolErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 9), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortPrimSeqProtocolErrors.setStatus('current')
cfcpmcPortInvalidTxWords = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 10), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortInvalidTxWords.setStatus('current')
cfcpmcPortInvalidCRCs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 11), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortInvalidCRCs.setStatus('current')
cfcpmcPortInvalidOrderedSets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 12), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortInvalidOrderedSets.setStatus('current')
cfcpmcPortFramesTooLong = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 13), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortFramesTooLong.setStatus('current')
cfcpmcPortTruncatedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 14), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortTruncatedFrames.setStatus('current')
cfcpmcPortAddressErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 15), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortAddressErrors.setStatus('current')
cfcpmcPortDelimiterErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 16), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortDelimiterErrors.setStatus('current')
cfcpmcPortEncDisparityErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 17), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortEncDisparityErrors.setStatus('current')
cfcpmcPortOtherErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 2, 1, 18), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmcPortOtherErrors.setStatus('current')
cfcpmIntervalPortErrorTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3), )
if mibBuilder.loadTexts: cfcpmIntervalPortErrorTable.setStatus('current')
cfcpmIntervalPortErrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-FC-PM-MIB", "cfcpmiPortErrorIntervalNumber"))
if mibBuilder.loadTexts: cfcpmIntervalPortErrorEntry.setStatus('current')
cfcpmiPortErrorIntervalNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 96)))
if mibBuilder.loadTexts: cfcpmiPortErrorIntervalNumber.setStatus('current')
cfcpmiPortRxLinkResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 2), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortRxLinkResets.setStatus('current')
cfcpmiPortTxLinkResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 3), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortTxLinkResets.setStatus('current')
cfcpmiPortLinkResets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 4), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortLinkResets.setStatus('current')
cfcpmiPortRxOfflineSequences = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 5), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortRxOfflineSequences.setStatus('current')
cfcpmiPortTxOfflineSequences = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 6), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortTxOfflineSequences.setStatus('current')
cfcpmiPortLinkFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 7), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortLinkFailures.setStatus('current')
cfcpmiPortSynchLosses = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 8), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortSynchLosses.setStatus('current')
cfcpmiPortSignalLosses = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 9), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortSignalLosses.setStatus('current')
cfcpmiPortPrimSeqProtocolErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 10), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortPrimSeqProtocolErrors.setStatus('current')
cfcpmiPortInvalidTxWords = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 11), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortInvalidTxWords.setStatus('current')
cfcpmiPortInvalidCRCs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 12), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortInvalidCRCs.setStatus('current')
cfcpmiPortInvalidOrderedSets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 13), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortInvalidOrderedSets.setStatus('current')
cfcpmiPortFramesTooLong = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 14), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortFramesTooLong.setStatus('current')
cfcpmiPortTruncatedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 15), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortTruncatedFrames.setStatus('current')
cfcpmiPortAddressErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 16), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortAddressErrors.setStatus('current')
cfcpmiPortDelimiterErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 17), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortDelimiterErrors.setStatus('current')
cfcpmiPortEncDisparityErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 18), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortEncDisparityErrors.setStatus('current')
cfcpmiPortOtherErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 19), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortOtherErrors.setStatus('current')
cfcpmiPortValidData = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 99997, 1, 2, 3, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfcpmiPortValidData.setStatus('current')
cfcpmMibCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 99997, 2, 1))
cfcpmMibGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 99997, 2, 2))
cfcpmMibCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 99997, 2, 1, 1)).setObjects(("CISCO-FC-PM-MIB", "cfcpmPortStatusGroup"), ("CISCO-FC-PM-MIB", "cfcpmMandatoryGroup"), ("CISCO-FC-PM-MIB", "cfcpmOptionalGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cfcpmMibCompliance = cfcpmMibCompliance.setStatus('current')
cfcpmPortStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 99997, 2, 2, 1)).setObjects(("CISCO-FC-PM-MIB", "cfcpmTimeElapsed"), ("CISCO-FC-PM-MIB", "cfcpmValidIntervals"), ("CISCO-FC-PM-MIB", "cfcpmInvalidIntervals"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cfcpmPortStatusGroup = cfcpmPortStatusGroup.setStatus('current')
cfcpmMandatoryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 99997, 2, 2, 2)).setObjects(("CISCO-FC-PM-MIB", "cfcpmtPortPrimSeqProtocolErrors"), ("CISCO-FC-PM-MIB", "cfcpmcPortPrimSeqProtocolErrors"), ("CISCO-FC-PM-MIB", "cfcpmiPortPrimSeqProtocolErrors"), ("CISCO-FC-PM-MIB", "cfcpmiPortValidData"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cfcpmMandatoryGroup = cfcpmMandatoryGroup.setStatus('current')
cfcpmOptionalGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 99997, 2, 2, 3)).setObjects(("CISCO-FC-PM-MIB", "cfcpmtPortRxLinkResets"), ("CISCO-FC-PM-MIB", "cfcpmtPortTxLinkResets"), ("CISCO-FC-PM-MIB", "cfcpmtPortLinkResets"), ("CISCO-FC-PM-MIB", "cfcpmtPortRxOfflineSequences"), ("CISCO-FC-PM-MIB", "cfcpmtPortTxOfflineSequences"), ("CISCO-FC-PM-MIB", "cfcpmtPortLinkFailures"), ("CISCO-FC-PM-MIB", "cfcpmtPortSynchLosses"), ("CISCO-FC-PM-MIB", "cfcpmtPortSignalLosses"), ("CISCO-FC-PM-MIB", "cfcpmtPortInvalidTxWords"), ("CISCO-FC-PM-MIB", "cfcpmtPortInvalidCRCs"), ("CISCO-FC-PM-MIB", "cfcpmtPortInvalidOrderedSets"), ("CISCO-FC-PM-MIB", "cfcpmtPortFramesTooLong"), ("CISCO-FC-PM-MIB", "cfcpmtPortTruncatedFrames"), ("CISCO-FC-PM-MIB", "cfcpmtPortAddressErrors"), ("CISCO-FC-PM-MIB", "cfcpmtPortDelimiterErrors"), ("CISCO-FC-PM-MIB", "cfcpmtPortEncDisparityErrors"), ("CISCO-FC-PM-MIB", "cfcpmtPortOtherErrors"), ("CISCO-FC-PM-MIB", "cfcpmcPortRxLinkResets"), ("CISCO-FC-PM-MIB", "cfcpmcPortTxLinkResets"), ("CISCO-FC-PM-MIB", "cfcpmcPortLinkResets"), ("CISCO-FC-PM-MIB", "cfcpmcPortRxOfflineSequences"), ("CISCO-FC-PM-MIB", "cfcpmcPortTxOfflineSequences"), ("CISCO-FC-PM-MIB", "cfcpmcPortLinkFailures"), ("CISCO-FC-PM-MIB", "cfcpmcPortSynchLosses"), ("CISCO-FC-PM-MIB", "cfcpmcPortSignalLosses"), ("CISCO-FC-PM-MIB", "cfcpmcPortInvalidTxWords"), ("CISCO-FC-PM-MIB", "cfcpmcPortInvalidCRCs"), ("CISCO-FC-PM-MIB", "cfcpmcPortInvalidOrderedSets"), ("CISCO-FC-PM-MIB", "cfcpmcPortFramesTooLong"), ("CISCO-FC-PM-MIB", "cfcpmcPortTruncatedFrames"), ("CISCO-FC-PM-MIB", "cfcpmcPortAddressErrors"), ("CISCO-FC-PM-MIB", "cfcpmcPortDelimiterErrors"), ("CISCO-FC-PM-MIB", "cfcpmcPortEncDisparityErrors"), ("CISCO-FC-PM-MIB", "cfcpmcPortOtherErrors"), ("CISCO-FC-PM-MIB", "cfcpmiPortRxLinkResets"), ("CISCO-FC-PM-MIB", "cfcpmiPortTxLinkResets"), ("CISCO-FC-PM-MIB", "cfcpmiPortLinkResets"), ("CISCO-FC-PM-MIB", "cfcpmiPortRxOfflineSequences"), ("CISCO-FC-PM-MIB", "cfcpmiPortTxOfflineSequences"), ("CISCO-FC-PM-MIB", "cfcpmiPortLinkFailures"), ("CISCO-FC-PM-MIB", "cfcpmiPortSynchLosses"), ("CISCO-FC-PM-MIB", "cfcpmiPortSignalLosses"), ("CISCO-FC-PM-MIB", "cfcpmiPortInvalidTxWords"), ("CISCO-FC-PM-MIB", "cfcpmiPortInvalidCRCs"), ("CISCO-FC-PM-MIB", "cfcpmiPortInvalidOrderedSets"), ("CISCO-FC-PM-MIB", "cfcpmiPortFramesTooLong"), ("CISCO-FC-PM-MIB", "cfcpmiPortTruncatedFrames"), ("CISCO-FC-PM-MIB", "cfcpmiPortAddressErrors"), ("CISCO-FC-PM-MIB", "cfcpmiPortDelimiterErrors"), ("CISCO-FC-PM-MIB", "cfcpmiPortEncDisparityErrors"), ("CISCO-FC-PM-MIB", "cfcpmiPortOtherErrors"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cfcpmOptionalGroup = cfcpmOptionalGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-FC-PM-MIB", cfcpmtPortSynchLosses=cfcpmtPortSynchLosses, cfcpmPortStatusGroup=cfcpmPortStatusGroup, cfcpmtPortFramesTooLong=cfcpmtPortFramesTooLong, cfcpmtPortTxLinkResets=cfcpmtPortTxLinkResets, cfcpmcPortTxOfflineSequences=cfcpmcPortTxOfflineSequences, cfcpmiPortRxOfflineSequences=cfcpmiPortRxOfflineSequences, cfcpmcPortInvalidCRCs=cfcpmcPortInvalidCRCs, cfcpmcPortInvalidOrderedSets=cfcpmcPortInvalidOrderedSets, cfcpmtPortEncDisparityErrors=cfcpmtPortEncDisparityErrors, cfcpmcPortPrimSeqProtocolErrors=cfcpmcPortPrimSeqProtocolErrors, cfcpmTimeElapsed=cfcpmTimeElapsed, cfcpmMibCompliances=cfcpmMibCompliances, cfcpmiPortPrimSeqProtocolErrors=cfcpmiPortPrimSeqProtocolErrors, cfcpmInvalidIntervals=cfcpmInvalidIntervals, cfcpmcPortSynchLosses=cfcpmcPortSynchLosses, cfcpmValidIntervals=cfcpmValidIntervals, cfcpmiPortEncDisparityErrors=cfcpmiPortEncDisparityErrors, cfcpmMibGroups=cfcpmMibGroups, cfcpmcPortRxOfflineSequences=cfcpmcPortRxOfflineSequences, cfcpmMibCompliance=cfcpmMibCompliance, cfcpmPortPerfStatusEntry=cfcpmPortPerfStatusEntry, cfcpmiPortValidData=cfcpmiPortValidData, cfcpmtPortRxOfflineSequences=cfcpmtPortRxOfflineSequences, cfcpmIntervalPortErrorEntry=cfcpmIntervalPortErrorEntry, cfcpmPortErrorStatusBlock=cfcpmPortErrorStatusBlock, ciscoFcPmMIBConform=ciscoFcPmMIBConform, cfcpmcPortSignalLosses=cfcpmcPortSignalLosses, cfcpmOptionalGroup=cfcpmOptionalGroup, cfcpmPortPerfStatusTable=cfcpmPortPerfStatusTable, cfcpmtPortRxLinkResets=cfcpmtPortRxLinkResets, PYSNMP_MODULE_ID=ciscoFcPmMIB, cfcpmTotalPortErrorEntry=cfcpmTotalPortErrorEntry, cfcpmtPortLinkResets=cfcpmtPortLinkResets, cfcpmiPortRxLinkResets=cfcpmiPortRxLinkResets, cfcpmiPortSignalLosses=cfcpmiPortSignalLosses, cfcpmiPortInvalidTxWords=cfcpmiPortInvalidTxWords, cfcpmcPortAddressErrors=cfcpmcPortAddressErrors, cfcpmiPortErrorIntervalNumber=cfcpmiPortErrorIntervalNumber, cfcpmIntervalPortErrorTable=cfcpmIntervalPortErrorTable, cfcpmiPortDelimiterErrors=cfcpmiPortDelimiterErrors, cfcpmPortPerfStatus=cfcpmPortPerfStatus, cfcpmcPortLinkFailures=cfcpmcPortLinkFailures, cfcpmCurrentPortErrorEntry=cfcpmCurrentPortErrorEntry, cfcpmiPortInvalidCRCs=cfcpmiPortInvalidCRCs, cfcpmcPortEncDisparityErrors=cfcpmcPortEncDisparityErrors, cfcpmiPortFramesTooLong=cfcpmiPortFramesTooLong, cfcpmtPortLinkFailures=cfcpmtPortLinkFailures, cfcpmcPortOtherErrors=cfcpmcPortOtherErrors, cfcpmtPortOtherErrors=cfcpmtPortOtherErrors, cfcpmcPortInvalidTxWords=cfcpmcPortInvalidTxWords, cfcpmiPortInvalidOrderedSets=cfcpmiPortInvalidOrderedSets, cfcpmtPortInvalidTxWords=cfcpmtPortInvalidTxWords, cfcpmiPortTxLinkResets=cfcpmiPortTxLinkResets, cfcpmtPortTruncatedFrames=cfcpmtPortTruncatedFrames, ciscoFcPmMIBNotifs=ciscoFcPmMIBNotifs, cfcpmtPortAddressErrors=cfcpmtPortAddressErrors, cfcpmcPortLinkResets=cfcpmcPortLinkResets, cfcpmiPortOtherErrors=cfcpmiPortOtherErrors, cfcpmcPortDelimiterErrors=cfcpmcPortDelimiterErrors, cfcpmCurrentPortErrorTable=cfcpmCurrentPortErrorTable, cfcpmiPortTruncatedFrames=cfcpmiPortTruncatedFrames, cfcpmcPortTxLinkResets=cfcpmcPortTxLinkResets, cfcpmtPortInvalidOrderedSets=cfcpmtPortInvalidOrderedSets, cfcpmMandatoryGroup=cfcpmMandatoryGroup, cfcpmcPortTruncatedFrames=cfcpmcPortTruncatedFrames, ciscoFcPmMIBObjects=ciscoFcPmMIBObjects, cfcpmiPortAddressErrors=cfcpmiPortAddressErrors, cfcpmiPortLinkFailures=cfcpmiPortLinkFailures, cfcpmiPortTxOfflineSequences=cfcpmiPortTxOfflineSequences, cfcpmtPortTxOfflineSequences=cfcpmtPortTxOfflineSequences, cfcpmiPortLinkResets=cfcpmiPortLinkResets, cfcpmtPortDelimiterErrors=cfcpmtPortDelimiterErrors, cfcpmtPortSignalLosses=cfcpmtPortSignalLosses, ciscoFcPmMIB=ciscoFcPmMIB, cfcpmtPortInvalidCRCs=cfcpmtPortInvalidCRCs, cfcpmTotalPortErrorTable=cfcpmTotalPortErrorTable, cfcpmtPortPrimSeqProtocolErrors=cfcpmtPortPrimSeqProtocolErrors, cfcpmiPortSynchLosses=cfcpmiPortSynchLosses, cfcpmcPortRxLinkResets=cfcpmcPortRxLinkResets, cfcpmcPortFramesTooLong=cfcpmcPortFramesTooLong)
| 137.238636 | 3,967 | 0.770928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,014 | 0.207585 |
c078ff18aa77981230542dee77a093f9d2cdb667 | 13,841 | py | Python | layer_manager/models.py | lueho/BRIT | 1eae630c4da6f072aa4e2139bc406db4f4756391 | [
"MIT"
]
| null | null | null | layer_manager/models.py | lueho/BRIT | 1eae630c4da6f072aa4e2139bc406db4f4756391 | [
"MIT"
]
| 4 | 2022-03-29T20:52:31.000Z | 2022-03-29T20:52:31.000Z | layer_manager/models.py | lueho/BRIT | 1eae630c4da6f072aa4e2139bc406db4f4756391 | [
"MIT"
]
| null | null | null | import django.contrib.gis.db.models as gis_models
from django.apps import apps
from django.db import models, connection
from django.urls import reverse
from distributions.models import TemporalDistribution, Timestep
from inventories.models import Scenario, InventoryAlgorithm
from materials.models import SampleSeries, MaterialComponent
from .exceptions import InvalidGeometryType, NoFeaturesProvided, TableAlreadyExists
class LayerField(models.Model):
"""
Holds all field definitions of GIS layers. Used to recreate a dynamically created model in case it is lost from
the apps registry.
"""
field_name = models.CharField(max_length=63)
data_type = models.CharField(max_length=10)
def data_type_object(self):
if self.data_type == 'float':
return models.FloatField()
elif self.data_type == 'int':
return models.IntegerField()
@staticmethod
def model_field_type(data_type: str):
if data_type == 'float':
return models.FloatField(blank=True, null=True)
elif data_type == 'int':
return models.IntegerField(blank=True, null=True)
elif data_type == 'str':
return models.CharField(blank=True, null=True, max_length=200)
class LayerManager(models.Manager):
supported_geometry_types = ['Point', 'MultiPoint', 'LineString', 'MultiLineString', 'Polygon', 'MultiPolygon', ]
def create_or_replace(self, **kwargs):
results = kwargs.pop('results')
if 'features' not in results or len(results['features']) == 0:
raise NoFeaturesProvided(results)
else:
features = results['features']
fields = {}
# The data types of the fields are detected from their content. Any column that has only null values
# will be omitted completely
if features:
fields_with_unknown_datatype = list(features[0].keys())
for feature in features:
if not fields_with_unknown_datatype:
break
for key, value in feature.items():
if feature[key] and key in fields_with_unknown_datatype:
fields[key] = type(value).__name__
fields_with_unknown_datatype.remove(key)
# At this point there might be fields left out because there were only null values from which the
# data type could be detected. They should be omitted but this information should be logged
# TODO: add omitted columns info to log
kwargs['geom_type'] = fields.pop('geom')
if kwargs['geom_type'] not in self.supported_geometry_types:
raise InvalidGeometryType(kwargs['geom_type'])
kwargs['table_name'] = 'result_of_scenario_' + \
str(kwargs['scenario'].id) + '_algorithm_' + \
str(kwargs['algorithm'].id) + '_feedstock_' + \
str(kwargs['feedstock'].id)
layer, created = super().get_or_create(table_name=kwargs['table_name'], defaults=kwargs)
if created:
layer.add_layer_fields(fields)
feature_collection = layer.update_or_create_feature_collection()
layer.create_feature_table()
else:
if layer.is_defined_by(fields=fields, **kwargs):
feature_collection = layer.get_feature_collection()
feature_collection.objects.all().delete()
else:
layer.delete()
layer = super().create(**kwargs)
layer.add_layer_fields(fields)
feature_collection = layer.update_or_create_feature_collection()
layer.create_feature_table()
layer.delete_aggregated_values()
for feature in features:
feature_collection.objects.create(**feature)
if 'aggregated_values' in results:
layer.add_aggregated_values(results['aggregated_values'])
if 'aggregated_distributions' in results:
layer.add_aggregated_distributions(results['aggregated_distributions'])
return layer, feature_collection
class Layer(models.Model):
"""
Registry of all created layers. This main model holds all meta information about each layer. When a new layer record
is created, another custom model named "features collection" is automatically generated, preserving the original
shape of the gis source dataset as much as required. The feature collection can be used to manage the actual
features of the layer. It will create a separate database table with the name given in "table_name" to store the
features.
"""
name = models.CharField(max_length=56)
geom_type = models.CharField(max_length=20)
table_name = models.CharField(max_length=200)
scenario = models.ForeignKey(Scenario, on_delete=models.CASCADE)
feedstock = models.ForeignKey(SampleSeries, on_delete=models.CASCADE)
algorithm = models.ForeignKey(InventoryAlgorithm, on_delete=models.CASCADE)
layer_fields = models.ManyToManyField(LayerField)
objects = LayerManager()
class Meta:
constraints = [
models.UniqueConstraint(fields=['table_name'], name='unique table_name')
]
def add_aggregated_values(self, aggregates: []):
for aggregate in aggregates:
LayerAggregatedValue.objects.create(name=aggregate['name'],
value=aggregate['value'],
unit=aggregate['unit'],
layer=self)
def add_aggregated_distributions(self, distributions):
for distribution in distributions:
dist = TemporalDistribution.objects.get(id=distribution['distribution'])
aggdist = LayerAggregatedDistribution.objects.create(name=distribution['name'],
distribution=dist,
layer=self)
for dset in distribution['sets']:
distset = DistributionSet.objects.create(
aggregated_distribution=aggdist,
timestep_id=dset['timestep']
)
for share in dset['shares']:
DistributionShare.objects.create(
component_id=share['component'],
average=share['average'],
standard_deviation=0.0, # TODO
distribution_set=distset
)
def add_layer_fields(self, fields: dict):
for field_name, data_type in fields.items():
field, created = LayerField.objects.get_or_create(field_name=field_name, data_type=data_type)
self.layer_fields.add(field)
def as_dict(self):
return {
'name': self.name,
'geom_type': self.geom_type,
'table_name': self.table_name,
'scenario': self.scenario,
'feedstock': self.feedstock,
'inventory_algorithm': self.algorithm,
'layer_fields': [field for field in self.layer_fields.all()],
'aggregated_results': [
{'name': aggregate.name,
'value': int(aggregate.value),
'unit': aggregate.unit}
for aggregate in self.layeraggregatedvalue_set.all()
]
}
def update_or_create_feature_collection(self):
"""
Dynamically creates model connected to this layer instance that is used to handle its features and store them
in a separate custom database table.
"""
# Empty app registry from any previous version of this model
model_name = self.table_name
if model_name in apps.all_models['layer_manager']:
del apps.all_models['layer_manager'][model_name]
attrs = {
'__module__': 'layer_manager.models',
'geom': getattr(gis_models, self.geom_type + 'Field')(srid=4326)
}
# Add all custom columns to model
for field in self.layer_fields.all():
attrs[field.field_name] = LayerField.model_field_type(field.data_type)
# Create model class and assign table_name
model = type(model_name, (models.Model,), attrs)
model._meta.layer = self
model._meta.db_table = self.table_name
return model
def create_feature_table(self):
"""
Creates a new table with all given fields from a model
:return:
"""
feature_collection = self.get_feature_collection()
# Check if any table of the name already exists
with connection.cursor() as cursor:
cursor.execute(f"SELECT to_regclass('{feature_collection._meta.db_table}')")
if cursor.fetchone()[0]:
raise TableAlreadyExists
# After cleanup, now create the new version of the result table
with connection.schema_editor() as schema_editor:
schema_editor.create_model(feature_collection)
def feature_table_url(self):
return reverse('scenario_result_map', kwargs={'pk': self.scenario.id, 'algo_pk': self.algorithm.id})
def delete(self, **kwargs):
self.delete_feature_table()
del apps.all_models['layer_manager'][self.table_name]
super().delete()
def delete_feature_table(self):
"""
Deletes a table from a given model
:return:
"""
feature_collection = self.get_feature_collection()
with connection.cursor() as cursor:
cursor.execute(f"SELECT to_regclass('{feature_collection._meta.db_table}')")
if cursor.fetchone()[0] is None:
return
with connection.schema_editor() as schema_editor:
schema_editor.delete_model(feature_collection)
def delete_aggregated_values(self):
LayerAggregatedValue.objects.filter(layer=self).delete()
def get_feature_collection(self):
"""
Returns the feature collection model that is used to manage the features connected to this layer.
"""
# If the model is already registered, return original model
if self.table_name in apps.all_models['layer_manager']:
return apps.all_models['layer_manager'][self.table_name]
else:
return self.update_or_create_feature_collection()
def is_defined_by(self, **kwargs):
fields = {field.field_name: field.data_type for field in self.layer_fields.all()}
comparisons = [
self.table_name == kwargs['table_name'],
self.geom_type == kwargs['geom_type'],
self.scenario == kwargs['scenario'],
self.algorithm == kwargs['algorithm'],
fields == kwargs['fields']
]
return all(comparisons)
class LayerAggregatedValue(models.Model):
"""
Class to hold all aggregated results from a result layer
"""
name = models.CharField(max_length=63)
value = models.FloatField()
unit = models.CharField(max_length=15, blank=True, null=True, default='')
layer = models.ForeignKey(Layer, on_delete=models.CASCADE)
DISTRIBUTION_TYPES = (
('seasonal', 'seasonal'), # Assumes array with length 12 for each month of the year
)
class LayerAggregatedDistribution(models.Model):
"""
Holds desired aggregated distributions for a layer. Intended for seasonal distributions broken down to feedstock
components but any other distribution works as well.
"""
name = models.CharField(max_length=255, null=True)
type = models.CharField(max_length=255, choices=DISTRIBUTION_TYPES, null=True)
distribution = models.ForeignKey(TemporalDistribution, on_delete=models.CASCADE, null=True)
layer = models.ForeignKey(Layer, on_delete=models.CASCADE, null=True)
@property
def shares(self):
return DistributionShare.objects.filter(distribution_set__aggregated_distribution=self)
@property
def components(self):
return MaterialComponent.objects.filter(
id__in=[share['component'] for share in self.shares.values('component').distinct()]
)
@property
def serialized(self):
dist = []
for component in self.components:
component_dist = {
'label': component.name,
'data': {},
'unit': 'Mg/a'
}
# data = {}
for timestep in self.distribution.timestep_set.all():
try: # TODO: find better way to deal with the fact that there is not a value for every component/timestep combination
share = self.shares.get(component=component, distribution_set__timestep=timestep)
component_dist['data'][timestep.name] = share.average
except:
pass
# component_dist['data'].append(data)
dist.append(component_dist)
return dist
class DistributionSet(models.Model):
timestep = models.ForeignKey(Timestep, on_delete=models.CASCADE, null=True)
aggregated_distribution = models.ForeignKey(LayerAggregatedDistribution, on_delete=models.CASCADE, null=True)
class DistributionShare(models.Model):
distribution_set = models.ForeignKey(DistributionSet, on_delete=models.CASCADE)
component = models.ForeignKey(MaterialComponent, on_delete=models.CASCADE, null=True)
average = models.FloatField()
standard_deviation = models.DecimalField(decimal_places=2, max_digits=5)
| 40.589443 | 134 | 0.627556 | 13,282 | 0.959613 | 0 | 0 | 1,468 | 0.106062 | 0 | 0 | 3,263 | 0.235749 |
c07be394b73091661999efe65e37d5d6f073209b | 4,205 | py | Python | src/evaluation/regression.py | lyonva/Nue | 90680de00b0c76f6bfdbed71b785671e7c3a3f54 | [
"Apache-2.0"
]
| null | null | null | src/evaluation/regression.py | lyonva/Nue | 90680de00b0c76f6bfdbed71b785671e7c3a3f54 | [
"Apache-2.0"
]
| null | null | null | src/evaluation/regression.py | lyonva/Nue | 90680de00b0c76f6bfdbed71b785671e7c3a3f54 | [
"Apache-2.0"
]
| null | null | null | from evaluation import MetricScorer
from .formulas import mar, sa, sd, sdar, effect_size, mmre, mdmre, pred25, pred40
from baseline import MARP0
class MAR(MetricScorer):
def setConstants(self):
self.name = "mar"
self.problem = "regression"
self.greater_is_better = False
self.lo = 0
self.hi = 20000 # Not really, but upped bound is infinity
self.baseline = MARP0
self.unifeature = False
self.composite = None
def _score_func(self, y_true, y_pred, X=None, estimator=None):
return mar(self, y_true, y_pred)
class SDAR(MetricScorer):
def setConstants(self):
self.name = "sdar"
self.problem = "regression"
self.greater_is_better = False
self.lo = 0
self.hi = 200000 # Not really, but upped bound is infinity
self.baseline = MARP0
self.unifeature = False
self.composite = None
def _score_func(self, y_true, y_pred, X=None, estimator=None):
return sdar(self, y_true, y_pred)
class SA(MetricScorer):
def setConstants(self):
self.name = "sa"
self.problem = "regression"
self.greater_is_better = True
self.lo = 0
self.hi = 1 # Not really, but upped bound is infinity
self.baseline = MARP0
self.unifeature = False
self.composite = None
def _score_func(self, y_true, y_pred, X=None, estimator=None):
return sa(self, y_true, y_pred)
class SD(MetricScorer):
def setConstants(self):
self.name = "sd"
self.problem = "regression"
self.greater_is_better = True
self.lo = 0
self.hi = 1 # Not really, but upped bound is infinity
self.baseline = MARP0
self.unifeature = False
self.composite = None
def _score_func(self, y_true, y_pred, X=None, estimator=None):
return sd(self, y_true, y_pred)
class EFFECTSIZE(MetricScorer):
def setConstants(self):
self.name = "effect size"
self.problem = "regression"
self.greater_is_better = True
self.lo = 0
self.hi = 1 # Not really, but upped bound is infinity
self.baseline = MARP0
self.unifeature = False
self.composite = None
def _score_func(self, y_true, y_pred, X=None, estimator=None):
return effect_size(self, y_true, y_pred)
class MMRE(MetricScorer):
def setConstants(self):
self.name = "mmre"
self.problem = "regression"
self.greater_is_better = False
self.lo = 0
self.hi = 20000 # Not really, but upped bound is infinity
self.baseline = MARP0
self.unifeature = False
self.composite = None
def _score_func(self, y_true, y_pred, X=None, estimator=None):
return mmre(self, y_true, y_pred)
class MdMRE(MetricScorer):
def setConstants(self):
self.name = "mdmre"
self.problem = "regression"
self.greater_is_better = False
self.lo = 0
self.hi = 20000 # Not really, but upped bound is infinity
self.baseline = MARP0
self.unifeature = False
self.composite = None
def _score_func(self, y_true, y_pred, X=None, estimator=None):
return mdmre(self, y_true, y_pred)
class PRED25(MetricScorer):
def setConstants(self):
self.name = "pred25"
self.problem = "regression"
self.greater_is_better = True
self.lo = 0
self.hi = 1 # Not really, but upped bound is infinity
self.baseline = MARP0
self.unifeature = False
self.composite = None
def _score_func(self, y_true, y_pred, X=None, estimator=None):
return pred25(self, y_true, y_pred)
class PRED40(MetricScorer):
def setConstants(self):
self.name = "pred40"
self.problem = "regression"
self.greater_is_better = True
self.lo = 0
self.hi = 1 # Not really, but upped bound is infinity
self.baseline = MARP0
self.unifeature = False
self.composite = None
def _score_func(self, y_true, y_pred, X=None, estimator=None):
return pred40(self, y_true, y_pred) | 31.616541 | 81 | 0.617122 | 4,039 | 0.960523 | 0 | 0 | 0 | 0 | 0 | 0 | 538 | 0.127943 |
c07ca44e33380193eabc6f8bec1ebe24f8d013c9 | 8,212 | py | Python | bin/CAD/Abaqus/AbaqusGeometry.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
]
| null | null | null | bin/CAD/Abaqus/AbaqusGeometry.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
]
| null | null | null | bin/CAD/Abaqus/AbaqusGeometry.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
]
| null | null | null | """
AbaqusGeometry.py
For use with Abaqus 6.13-1 (Python 2.6.2).
Created by Ozgur Yapar <[email protected]>
Robert Boyles <[email protected]>
- Includes modules which take care of geometrical operations
in the part and assembly level.
"""
import re
import math
from numpy import array, cross, transpose, vstack, dot
from abaqusConstants import *
import numpy.linalg as LA
import string as STR
def regexFriendly(inString):
""" Clean up coordinates read from STEP file, prior to applying regular expressions. """
outString = STR.replace(inString, '\'', '%')
outString = STR.replace(outString, '(', '')
outString = STR.replace(outString, ')', ',')
return outString
def coordinate(stepString):
""" Extract tuple of cartesian coordinates from STEP coordinate string. """
e = re.compile(',\S+,,') # regular expression
coordFind = e.search(stepString) # extract substring containing coordinates
coordList = coordFind.group(0).strip(',').split(',') # separate x, y, and z coordinates by commas
coords = (float(coordList[0]), float(coordList[1]),
float(coordList[2])) # convert coordinate strings to a tuple of floats
return coords # return the coordinate tuple
# calculates transformation matrix between two coordinate systems as defined in STEP
def get3DTransformArray(fromDir1, fromDir2, toDir1, toDir2):
""" Calculate transformation matrix between two coordinate systems as defined in STEP. """
fromDir1 = array(fromDir1) # convert u1 vector to an array object
fromDir2 = array(fromDir2) # convert u2 vector to an array object
fromDir3 = cross(fromDir1, fromDir2) # extrapolate u3 vector from u1 and u2
toDir1 = array(toDir1) # convert v1 vector to an array object
toDir2 = array(toDir2) # convert v2 vector to an array object
toDir3 = cross(toDir1, toDir2) # extrapolate v3 vector from v1 and v2
inva = LA.inv(transpose(vstack([fromDir1, fromDir2, fromDir3])))
b = transpose(vstack([toDir1, toDir2, toDir3]))
transformArray = dot(b, inva)
return transformArray
def unv(center, planarA, planarB):
""" Use vector operations to get unit normal vector, given a center coordinate and two planar coordinates. """
center = array(center)
planarA = array(planarA)
planarB = array(planarB)
vA = planarA - center
vB = planarB - center
xV = cross(vA, vB)
return xV/LA.norm(xV)
def transCoord(fromCoord, transformArray, translationVector):
""" Transform/translate a cartesian point from one coordinate system to another. """
vprod = dot(transformArray, fromCoord)
vprod = vprod + translationVector
toCoord = tuple(vprod)
return toCoord
def asmRecursion(asm, subAsms, asmParts):
""" Recursively identifies parts in sub-assemblies, in the order they are imported from STEP. """
parts = []
try:
for child in subAsms[asm]:
if child in subAsms:
parts.extend(asmRecursion(child, subAsms, asmParts))
else:
parts.extend(asmParts[child])
except KeyError:
pass
if asm in asmParts:
parts.extend(asmParts[asm])
return parts
def coordTransform(localTMs, localTVs, asm, subAsms, asmParts, localCoords):
"""
Iterate through sub-assemblies and top-level parts to transform/translate
every datum point to assembly coordinates; uses transCoord()
Note: Ignores top-level datums in highest assembly, which will not exist
in a CyPhy assembly anyway
"""
globalCoords = {} # create dictionary object to hold new point library
if asm in subAsms: # if assembly has sub-assemblies:
for subAsm in subAsms[asm]: # for each sub-assembly in the assembly:
subCoords = coordTransform(localTMs, localTVs, subAsm, # get point library local to sub-assembly
subAsms, asmParts, localCoords)
for part in subCoords.keys(): # for each component in chosen sub-assembly:
globalCoords.update([[part, {}]]) # create new entry in globalCoords
for (point, coord) in subCoords[part].iteritems(): # for each point in part/sub-sub-assembly:
globalCoords[part].update([[point.upper(), transCoord( # translate/transform point to globalCoords
array(coord), localTMs[subAsm], localTVs[subAsm])]])
globalCoords.update([[subAsm, {}]]) # create entry for sub-assembly in globalCoords
for (point, coord) in localCoords[subAsm].iteritems():
# for each point specified at top level of that sub-assembly:
globalCoords[subAsm].update([[point.upper(), transCoord( # translate/transform point to globalCoords
array(coord), localTMs[subAsm], localTVs[subAsm])]])
if asm in asmParts: # if assembly has top-level parts:
for part in asmParts[asm]: # for each top-level part:
globalCoords.update([[part, {}]]) # create new entry in globalCoords
for (point, coord) in localCoords[part].iteritems(): # for each point in part:
globalCoords[part].update([[point.upper(), transCoord( # translate/transform point to globalCoords
array(coord), localTMs[part], localTVs[part])]])
return globalCoords
def myMask(idnums):
""" Produce mask string for getSequenceFromMask(...) from a feature ID or set of IDs. """
try:
idnums = tuple(idnums) # make the input a tuple!
except TypeError: # if input is not iterable:
idnums = (idnums,) # make it a tuple anyway!
powersum = 0 # integer to hold mask number
for num in idnums: # iterating through input IDs:
powersum += 2**num # add 2**ID to powersum
rawmask = hex(powersum)[2:] # convert powermask to hexadecimal
rawmask = STR.rstrip(rawmask, 'L') # strip "long" character, if necessary
if max(idnums) < 32: # if hex number is 8 digits or less:
mask = '[#' + rawmask + ' ]' # create mask
else: # if hex number is >8 digits:
maskpieces = [] # container for fragments of hex string
piececount = int(math.ceil(len(rawmask)/8)) # number of times to split hex string
for i in range(piececount): # for each split needed:
maskpieces.append(rawmask[-8:]) # append last 8 characters of hex string to fragment list
rawmask = rawmask[:-8] # trim last 8 characters from hex string
maskpieces.append(rawmask) # append remaining hex string to fragment list
mask = '[#' + STR.join(maskpieces, ' #') + ' ]' # join fragments, using the correct delimiters, to create mask
return mask
def toBC(constraint):
""" Translates a degree of freedom as read from the XML to the appropriate SymbolicConstant. """
if constraint == 'FIXED':
return 0
elif constraint == 'FREE':
return UNSET
else:
return float(constraint) | 53.673203 | 121 | 0.565392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,163 | 0.385168 |
c07dacc643d713f89a754dcc9e2a89ae590b2576 | 2,143 | py | Python | analysis/11-compress-jacobians.py | lmjohns3/cube-experiment | ab6d1a9df95efebc369d184ab1c748d73d5c3313 | [
"MIT"
]
| null | null | null | analysis/11-compress-jacobians.py | lmjohns3/cube-experiment | ab6d1a9df95efebc369d184ab1c748d73d5c3313 | [
"MIT"
]
| null | null | null | analysis/11-compress-jacobians.py | lmjohns3/cube-experiment | ab6d1a9df95efebc369d184ab1c748d73d5c3313 | [
"MIT"
]
| null | null | null | import climate
import glob
import gzip
import io
import lmj.cubes
import logging
import numpy as np
import os
import pandas as pd
import pickle
import theanets
def compress(source, k, activation, **kwargs):
fns = sorted(glob.glob(os.path.join(source, '*', '*_jac.csv.gz')))
logging.info('%s: found %d jacobians', source, len(fns))
# the clipping operation affects about 2% of jacobian values.
dfs = [np.clip(pd.read_csv(fn, index_col='time').dropna(), -10, 10)
for fn in fns]
B, N = 128, dfs[0].shape[1]
logging.info('loaded %s rows of %d-D data from %d files',
sum(len(df) for df in dfs), N, len(dfs))
def batch():
batch = np.zeros((B, N), 'f')
for b in range(B):
a = np.random.randint(len(dfs))
batch[b] = dfs[a].iloc[np.random.randint(len(dfs[a])), :]
return [batch]
pca = theanets.Autoencoder([N, (k, activation), (N, 'tied')])
pca.train(batch, **kwargs)
key = '{}_k{}'.format(activation, k)
if 'hidden_l1' in kwargs:
key += '_s{hidden_l1:.4f}'.format(**kwargs)
for df, fn in zip(dfs, fns):
df = pd.DataFrame(pca.encode(df.values.astype('f')), index=df.index)
s = io.StringIO()
df.to_csv(s, index_label='time')
out = fn.replace('_jac', '_jac_' + key)
with gzip.open(out, 'wb') as handle:
handle.write(s.getvalue().encode('utf-8'))
logging.info('%s: saved %s', out, df.shape)
out = os.path.join(source, 'pca_{}.pkl'.format(key))
pickle.dump(pca, open(out, 'wb'))
@climate.annotate(
root='load data files from subject directories in this path',
k=('compress to this many dimensions', 'option', None, int),
activation=('use this activation function', 'option'),
)
def main(root, k=1000, activation='relu'):
for subject in lmj.cubes.Experiment(root).subjects:
compress(subject.root, k, activation,
momentum=0.9,
hidden_l1=0.01,
weight_l1=0.01,
monitors={'hid1:out': (0.01, 0.1, 1, 10)})
if __name__ == '__main__':
climate.call(main)
| 30.614286 | 76 | 0.591227 | 0 | 0 | 0 | 0 | 512 | 0.238917 | 0 | 0 | 422 | 0.19692 |
c07f103a6a6e92a6245209f932b8d90c064fd018 | 21,369 | py | Python | commerce/views.py | zlkca/ehetuan-api | da84cd4429bd33e8fe191327ec267bf105f41453 | [
"MIT"
]
| 1 | 2020-05-27T18:17:01.000Z | 2020-05-27T18:17:01.000Z | commerce/views.py | zlkca/ehetuan-api | da84cd4429bd33e8fe191327ec267bf105f41453 | [
"MIT"
]
| 6 | 2020-06-05T18:14:56.000Z | 2021-09-07T23:53:08.000Z | commerce/views.py | zlkca/ehetuan-api | da84cd4429bd33e8fe191327ec267bf105f41453 | [
"MIT"
]
| null | null | null | import json
import os
import logging
from datetime import datetime
from django.db.models import Q,Count
from django.http import JsonResponse
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.conf import settings
from rest_framework_jwt.settings import api_settings
from django.core.exceptions import ObjectDoesNotExist#EmptyResultSet, MultipleObjectsReturned
from django.contrib.auth import get_user_model
from commerce.models import Restaurant, Picture, Product, Category, Order, OrderItem, Style, PriceRange, FavoriteProduct
from account.models import Province, City, Address
from utils import to_json, obj_to_json, get_data_from_token
logger = logging.getLogger(__name__)
def processPictures(product, pictures):
# pid --- product id
# pictures --- dict that pass from the front end
reindex = False
pic = None
for picture in pictures:
try:
pic = Picture.objects.get(product_id=product.id, index=picture['index'])
except:
pic = None
if pic:
if picture['status'] == 'removed':
reindex = True
rmPicture(pic)
elif picture['status'] == 'changed':
savePicture(product, pic, picture)
pic.save()
else:# new
pic = Picture()
savePicture(product, pic, picture)
if reindex:
reindexPicture(product.id)
def savePicture(product, pic, picture):
# product --- Product model object
# pic --- Picture model object
# picture --- dict from front end
pic.index = picture['index']
pic.name = picture['name']
pic.product = product
pic.image.save(picture['image'].name, picture['image'].file, True)
pic.save()
def getDefaultPicture(pictures):
if pictures.count() == 0:
return ''
else:
if pictures.count()>0 and pictures[0].image.name:
return pictures[0].image.name
else:
return ''
def rmPicture(pic):
try:
os.remove(pic.image.path)
except:
print('remove image failed')
pic.image.delete()
pic.delete()
def reindexPicture(pid):
# pid --- product id
pics = Picture.objects.filter(product_id=pid).order_by('index')
i = 0
for pic in pics:
pic.index = i
i = i + 1
pic.save()
def saveProduct(params):
_id = params.get('id')
if _id:
item = Product.objects.get(id=_id)
else:
item = Product()
item.name = params.get('name')
item.description = params.get('description')
item.price = params.get('price')
item.currency = params.get('currency')
restaurant_id = params.get('restaurant_id')
try:
item.restaurant = Restaurant.objects.get(id=restaurant_id)
except:
item.restaurant = None
#item.category = category
item.save()
# item.categories.clear()
# Assume there is only one image
# n_pics = int(params.get('n_pictures'))
# pictures = []
# for i in range(n_pics):
# name = params.get('name%s'%i)
# status = params.get('image_status%s'%i)
# image = req.FILES.get('image%s'%i)
# pictures.append({'index':i,'name':name, 'status':status, 'image':image})
#
# self.processPictures(item, pictures)
#
# # select default picture
# pics = Picture.objects.filter(product_id=item.id)
# item.fpath = self.getDefaultPicture(pics)
# item.save()
return item
def find_restaurants_by_location(lat, lng, distance):
query = """SELECT *,
(
3959 *
acos(cos(radians(%s)) *
cos(radians(lat)) *
cos(radians(lng) -
radians(%s)) +
sin(radians(%s)) *
sin(radians(lat )))
) AS distance
FROM commerce_restaurant
HAVING distance < %s
ORDER BY distance LIMIT 0, 20;"""%(lat, lng, lat, distance)
try:
return Restaurant.objects.raw(query)
except:
return None
@method_decorator(csrf_exempt, name='dispatch')
class RestaurantView(View):
def getList(self, req):
lat = req.GET.get('lat')
lng = req.GET.get('lng')
distance = 25 # km
restaurants = []
admin_id = req.GET.get('admin_id')
if admin_id: # need address
try:
item = Restaurant.objects.get(admin_id=admin_id)
restaurant = to_json(item)
restaurant['address'] = self.getAddress(item)
return JsonResponse({'data':[restaurant]})
except Exception:
return JsonResponse({'data':[]})
elif lat and lng: # do not need address
restaurants = find_restaurants_by_location(lat, lng, distance)
else:
try:
restaurants = Restaurant.objects.all()#.annotate(n_products=Count('product'))
except Exception:
return JsonResponse({'data':[]})
rs =[]
for r in restaurants:
rs.append(to_json(r))
return JsonResponse({'data': rs })
def getAddress(self, restaurant):
addr_id = restaurant.address.id
item = None
try:
item = Address.objects.get(id=addr_id)
except:
item = None
return to_json(item)
def get(self, req, *args, **kwargs):
pid = kwargs.get('id')
if pid:
try:
item = Restaurant.objects.get(id=int(pid))
p = obj_to_json(item, False)
p['address'] = self.getAddress(item)
return JsonResponse({'data':p})
except Exception as e:
print(e.message);
return JsonResponse({'data':''})
else: # get list
return self.getList(req)#JsonResponse({'data':''})
def delete(self, req, *args, **kwargs):
pid = int(kwargs.get('id'))
if pid:
instance = Restaurant.objects.get(id=pid)
instance.delete()
items = Restaurant.objects.filter().order_by('-updated')
return JsonResponse({'data':to_json(items)})
return JsonResponse({'data':[]})
def post(self, req, *args, **kwargs):
params = req.POST
authorizaion = req.META['HTTP_AUTHORIZATION']
token = authorizaion.replace("Bearer ", "")
data = get_data_from_token(token)
# if data and data['username']=='admin':
_id = params.get('id')
if _id:
item = Restaurant.objects.get(id=_id)
else:
item = Restaurant()
item.name = params.get('name')
item.description = params.get('description')
item.lat = float(params.get('lat'))
item.lng = float(params.get('lng'))
item.created = item.created if item.created else datetime.now()
addr_id = params.get('address_id')
if(addr_id):
addr = Address.objects.get(id=addr_id)
self.saveAddress(addr, params)
item.address = addr
else:
addr = Address()
self.saveAddress(addr, params)
item.address = addr
item.save()
image_status = params.get('image_status')
if image_status == 'changed':
self.rmPicture(item)
image = req.FILES.get("image")
item.image.save(image.name, image.file, True)
item.save()
return JsonResponse({'data':to_json(item)})
def saveAddress(self, addr1, params):
addr1.street = params.get('street')
addr1.sub_locality = params.get('sub_locality')
addr1.postal_code = params.get('postal_code')
addr1.lat = params.get('lat')
addr1.lng = params.get('lng')
addr1.province = params.get('province')
addr1.city = params.get('city')
addr1.save()
def rmPicture(self, item):
try:
os.remove(item.image.path)
except:
print('remove image failed')
item.image.delete()
@method_decorator(csrf_exempt, name='dispatch')
class CategoryView(View):
def getList(self):
categories = []
try:
categories = Category.objects.all()#.annotate(n_products=Count('product'))
except Exception as e:
logger.error('Get category Exception:%s'%e)
return JsonResponse({'data':[]})
return JsonResponse({'data': to_json(categories)})
def get(self, req, *args, **kwargs):
cid = kwargs.get('id')
if cid:
cid = int(cid)
try:
item = Category.objects.get(id=cid)
return JsonResponse({'data':to_json(item)})
except Exception as e:
return JsonResponse({'data':''})
else:
return self.getList()
def delete(self, req, *args, **kwargs):
pid = int(kwargs.get('id'))
if pid:
instance = Category.objects.get(id=pid)
instance.delete()
items = Category.objects.filter().order_by('-updated')
return JsonResponse({'data':to_json(items)})
return JsonResponse({'data':[]})
def post(self, req, *args, **kwargs):
ubody = req.body.decode('utf-8')
params = json.loads(ubody)
_id = params.get('id')
if _id:
item = Category.objects.get(id=_id)
else:
item = Category()
item.name = params.get('name')
item.description = params.get('description')
# item.status = params.get('status')
item.save()
return JsonResponse({'data':to_json(item)})
@method_decorator(csrf_exempt, name='dispatch')
class ProductListView(View):
def get(self, req, *args, **kwargs):
''' get product list
'''
products = []
cats = req.GET.get('cats')
restaurants = req.GET.get('ms')
colors = req.GET.get('colors')
keyword = req.GET.get('keyword')
kwargs = {}
q = None
if cats:
q = Q(categories__id__in=cats.split(','))
if restaurants:
if q:
q = q | Q(restaurant__id__in=restaurants.split(','))
else:
q = Q(restaurant__id__in=restaurants.split(','))
if colors:
if q:
q = q | Q(color__id__in=colors.split(','))
else:
q = Q(restaurant__id__in=restaurants.split(','))
restaurant_id = req.GET.get('restaurant_id')
category_id = req.GET.get('category_id')
if restaurant_id:
products = Product.objects.filter(restaurant_id=restaurant_id).annotate(n_likes=Count('favoriteproduct'))
elif category_id:
products = Product.objects.filter(category_id=category_id).annotate(n_likes=Count('favoriteproduct'))
elif cats or restaurants or colors:
if keyword:
products = Product.objects.filter(q).filter(Q(name__icontains=keyword)
|Q(categories__name__icontains=keyword)
|Q(restaurant__name__icontains=keyword)
|Q(color__name__icontains=keyword))
else:
products = Product.objects.filter(q)
else:
if keyword:
products = Product.objects.filter(Q(name__icontains=keyword)
|Q(categories__name__icontains=keyword)
|Q(restaurant__name__icontains=keyword)
|Q(color__name__icontains=keyword))
else:
products = Product.objects.filter().annotate(n_likes=Count('favoriteproduct'))
ps = to_json(products)
for p in ps:
try:
pics = Picture.objects.filter(product_id=p['id'])
except:
pics = None
if pics:
p['pictures'] = to_json(pics)
#s = []
# for product in products:
# items = Item.objects.filter(product_id=product.id)
# p = product.to_json()
# p['n_likes'] = product.n_likes
# p['n_items'] = len(items)
# p['items'] = [items[0].to_json()]
# fp = None
# try:
# fp = FavoriteProduct.objects.get(user_id=uid)
# except:
# pass
#
# p['like'] = fp.status if fp else False
# s.append(p)
return JsonResponse({'data':ps})
def post(self, req, *args, **kwargs):
authorizaion = req.META['HTTP_AUTHORIZATION']
token = authorizaion.replace("Bearer ", "")
data = get_data_from_token(token)
for key in req.POST:
params = json.loads(req.POST[key])
index = int(key.replace('info_', ''))
product = saveProduct(params)
image_status = params.get('image_status')
if image_status == 'unchange':
pass
elif image_status == 'changed' or image_status == 'add':
pictures = []
image = req.FILES.get('image%s'%index)
pictures.append({'index':0,'name':'', 'status':image_status, 'image':image})
processPictures(product, pictures)
# select default picture
pics = Picture.objects.filter(product_id=product.id)
product.fpath = getDefaultPicture(pics)
product.save()
return JsonResponse({'data':[]})
@method_decorator(csrf_exempt, name='dispatch')
class ProductFilterView(View):
def get(self, req, *args, **kwargs):
categories = Category.objects.all();
styles = Style.objects.all();
price_ranges = PriceRange.objects.all();
return JsonResponse({'categories':categories, 'styles':styles, 'price_ranges':price_ranges})
@method_decorator(csrf_exempt, name='dispatch')
class ProductView(View):
def get(self, req, *args, **kwargs):
''' get product detail with multiple items
'''
pid = int(kwargs.get('id'))
if pid:
try:
products = Product.objects.filter(id=pid)
except Exception as e:
return JsonResponse({'product':''})
else:
return JsonResponse({'product':''})
product = products[0]
pics = Picture.objects.filter(product_id=product.id)
ps = []
for pic in pics:
ps.append(to_json(pic))
p = to_json(product)
p['pictures'] = ps
return JsonResponse({'data':p})
def delete(self, req, *args, **kwargs):
pid = int(kwargs.get('id'))
if pid:
instance = Product.objects.get(id=pid)
instance.delete()
items = Product.objects.filter().order_by('-updated')
return JsonResponse({'data':to_json(items)})
return JsonResponse({'data':[]})
def post(self, req, *args, **kwargs):
params = req.POST
authorizaion = req.META['HTTP_AUTHORIZATION']
token = authorizaion.replace("Bearer ", "")
data = get_data_from_token(token)
if data and data['username']=='admin' or data['utype']=='business':
item = saveProduct(params)
item.categories.clear()
categories = params.get('categories').split(',')
for cat_id in categories:
try:
category = Category.objects.get(id=cat_id)
except:
category = None
item.categories.add(category)
n_pics = int(params.get('n_pictures'))
pictures = []
for i in range(n_pics):
name = params.get('name%s'%i)
status = params.get('image_status%s'%i)
image = req.FILES.get('image%s'%i)
pictures.append({'index':i,'name':name, 'status':status, 'image':image})
processPictures(item, pictures)
# select default picture
pics = Picture.objects.filter(product_id=item.id)
item.fpath = getDefaultPicture(pics)
item.save()
return JsonResponse({'tokenValid': True,'data':to_json(item)})
return JsonResponse({'tokenValid':False, 'data':''})
@method_decorator(csrf_exempt, name='dispatch')
class OrderView(View):
def getList(self, rid=None):
orders = []
try:
if rid:
orders = Order.objects.filter(restaurant_id=rid).order_by('created')
else:
orders = Order.objects.all().order_by('created')#.annotate(n_products=Count('product'))
r = to_json(orders)
for order in orders:
items = OrderItem.objects.filter(order_id=order.id)
ri = next((x for x in r if x['id'] == order.id), None)
ri['items'] = to_json(items)
ri['user']['username'] = order.user.username
except Exception as e:
logger.error('Get Order Exception:%s'%e)
return JsonResponse({'data':[]})
return JsonResponse({'data': r})
def get(self, req, *args, **kwargs):
cid = kwargs.get('id')
if cid:
cid = int(cid)
try:
item = Order.objects.get(id=cid)
return JsonResponse({'data':to_json(item)})
except Exception as e:
return JsonResponse({'data':''})
else:
rid = req.GET.get('restaurant_id')
return self.getList(rid)
def post(self, req, *args, **kwargs):
authorizaion = req.META['HTTP_AUTHORIZATION']
token = authorizaion.replace("Bearer ", "")
data = get_data_from_token(token)
if data:
uid = data['id']
ubody = req.body.decode('utf-8')
d = json.loads(ubody)
# dict: {'orders': [{'restaurant_id': 2, 'items': [{'pid': 1, 'name': '土豆排骨', 'price': '12.000', 'restaurant_id':
#2, 'quantity': 4}, {'pid': 2, 'name': '泡椒豆腐', 'price': '12.000', 'restaurant_id': 2, 'quantity': 2}]}],
#'user_id': 7}
orders = d.get("orders")
for data in orders:
rid = data['restaurant_id']
items = data['items']
order = Order()
try:
restaurant = Restaurant.objects.get(id=rid)
user = get_user_model().objects.get(id=uid)
order.restaurant = restaurant
order.user = user
order.save()
except Exception as e:
print(e)
if order.id:
for item in items:
orderItem = OrderItem()
orderItem.order = order
orderItem.product = Product.objects.get(id=item['pid'])
orderItem.quantity = item['quantity']
orderItem.product_name = orderItem.product.name
orderItem.price = orderItem.product.price
orderItem.save()
return JsonResponse({'success': True})
return JsonResponse({'success':False})
@method_decorator(csrf_exempt, name='dispatch')
class FavoriteProductView(View):
def get(self, req, *args, **kwargs):
uid = req.GET.get('user_id')
ps = Product.objects.annotate(n_likes=Count('favoriteproduct'))
favorites = []
for p in ps:
product = p.to_json()
product['n_likes'] = p.n_likes
fp = None
try:
fp = FavoriteProduct.objects.get(user_id=uid)
except:
pass
product['favorate'] = fp.status if fp else False
favorites.append(product)
return JsonResponse({'favorites':favorites})
def post(self, req, *args, **kwargs):
ubody = req.body.decode('utf-8')
d = json.loads(ubody)
uid = d.get("user_id")
pid = d.get("product_id")
try:
like = FavoriteProduct.objects.get(user_id=uid, product_id=pid)
like.delete()
except ObjectDoesNotExist:
like = FavoriteProduct()
like.product = Product.objects.get(id=pid)
like.user = get_user_model().objects.get(id=uid)
like.status = True
like.save()
return JsonResponse({'success':'true'})
| 36.15736 | 126 | 0.530675 | 16,697 | 0.780781 | 0 | 0 | 17,033 | 0.796493 | 0 | 0 | 3,989 | 0.186533 |
c07fe33cae576add35e02a5f464a4a05467459e8 | 5,666 | py | Python | api/views.py | huatxu/erasmusbackend | d8f86ee857a292a133106e75e9c920b905b5b10d | [
"MIT"
]
| null | null | null | api/views.py | huatxu/erasmusbackend | d8f86ee857a292a133106e75e9c920b905b5b10d | [
"MIT"
]
| null | null | null | api/views.py | huatxu/erasmusbackend | d8f86ee857a292a133106e75e9c920b905b5b10d | [
"MIT"
]
| null | null | null | from django.shortcuts import render
from api.models import Comida, Cerveza, Titulo, TipoComida
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import serializers
import csv
import os
class CervezaSerializer(serializers.ModelSerializer):
class Meta:
model = Cerveza
fields = ['id', 'nombre', 'estilo', 'pais', 'pais_ingles', 'alcohol', 'color', 'amargor', 'descripcion', 'descripcion_ingles', 'disponible', 'imagen', 'artesanal', 'tipo', 'recomendada', 'formato', 'precio', 'formato_2', 'precio_2', 'formato_3', 'precio_3', 'sin_gluten', 'aparece', 'barril']
class ComidaList(APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, format=None):
comidas = Comida.objects.filter(disponible=True, tipo__aparece=True).order_by('tipo__orden', 'orden', 'nombre')
serializer = ComidaSerializer(comidas, many=True)
return Response(serializer.data)
class ComidaSerializer(serializers.ModelSerializer):
tipo = serializers.SerializerMethodField('get_tipo')
def get_tipo(self, obj):
return obj.tipo.nombre + '-' + obj.tipo.nombre_ingles
class Meta:
model = Comida
fields = ('id', 'nombre', 'nombre_ingles', 'descripcion', 'descripcion_ingles', 'tipo', 'precio', 'precio_2', 'altramuces', 'apio', 'cacahuete', 'crustaceo', 'gluten', 'huevo', 'lacteos', 'moluscos', 'mostaza', 'nueces', 'pescado', 'sesamo', 'soja', 'sulfitos', 'disponible')
class TituloSerializer(serializers.ModelSerializer):
class Meta:
model = Titulo
fields = ['titulo_1', 'titulo_1_ingles', 'titulo_2', 'titulo_2_ingles']
class CervezaList(APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, format=None):
cervezas = Cerveza.objects.all()
serializer = CervezaSerializer(cervezas, many=True)
titulos = Titulo.objects.first()
titulosSerializer = TituloSerializer(titulos)
return Response({"titulos": titulosSerializer.data, "cervezas": serializer.data})
import csv
import os
def cast_bool(entry):
try:
if not entry:
return False
trues = ['sí', 'si']
return entry.lower() in trues
except Exception:
return False
def cast_price(entry):
result = entry
result = result.replace('€', '')
result = result.replace(',', '.')
result = result.strip()
if result:
return float(result)
return 0.0
def load_csv():
with open(f'{os.path.dirname(os.path.abspath(__file__))}/carta-cervezas.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
try:
cerveza = Cerveza.objects.create(
nombre=row['Nombre'],
estilo = row['Estilo'],
pais = row['País'],
pais_ingles = row['País Ingles'],
alcohol = row['Alcohol'],
color = row['Color'],
amargor = row['Amargor'],
descripcion = row['Descripcion'],
descripcion_ingles = row['Descripcion ingles'],
disponible = cast_bool(row['Disponible']),
imagen = row['Imagen'],
artesanal = cast_bool(row['Artesanal']),
tipo = row['Tipo'],
recomendada = cast_bool(row['Recomendada']),
formato = row['Formato'],
precio = cast_price(row['Precio']),
formato_2 = row['formato 2'],
precio_2 = cast_price(row['precio 2']),
formato_3 = row['formato 3'],
precio_3 = cast_price(row['precio 3']),
sin_gluten = cast_bool(row['Sin gluten']),
aparece = cast_bool(row['Aparece']),
barril = cast_bool(row['Barril'])
)
cerveza.save()
except Exception:
pass
with open(f'{os.path.dirname(os.path.abspath(__file__))}/carta-comida.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
try:
comida = Comida.objects.create(
nombre=row['Nombre'],
nombre_ingles=row['Nombre ingles'],
descripcion=row['Descripcion'],
descripcion_ingles=row['Descripcion ingles'],
tipo=row['Tipo'],
precio=cast_price(row['Precio']),
precio_2=cast_price(row['precio 2']),
altramuces=cast_bool(row['Altramuces']),
apio=cast_bool(row['Apio']),
cacahuete=cast_bool(row['Cacahuete']),
crustaceo=cast_bool(row['Crustaceo']),
gluten=cast_bool(row['Gluten']),
huevo=cast_bool(row['Huevo']),
lacteos=cast_bool(row['Lacteos']),
moluscos=cast_bool(row['Moluscos']),
mostaza=cast_bool(row['Mostaza']),
nueces=cast_bool(row['Nueces']),
pescado=cast_bool(row['Pescado']),
sesamo=cast_bool(row['Sesamo']),
soja=cast_bool(row['Soja']),
sulfitos=cast_bool(row['Sulfitos']),
disponible=cast_bool(row['Disponible'])
)
comida.save()
except Exception as exc:
pass | 39.901408 | 300 | 0.55683 | 1,858 | 0.327632 | 0 | 0 | 0 | 0 | 0 | 0 | 1,292 | 0.227826 |
c081b2d11a5b435dcb1b7be483e436c803475836 | 232 | gyp | Python | binding.gyp | sony/node-win-usbdev | bcdbd277419f1e34b1778390ec1624ccce63068d | [
"Apache-2.0"
]
| 3 | 2017-06-28T12:00:36.000Z | 2021-11-08T12:34:26.000Z | binding.gyp | sony/node-win-usbdev | bcdbd277419f1e34b1778390ec1624ccce63068d | [
"Apache-2.0"
]
| 1 | 2018-02-16T04:32:55.000Z | 2018-02-16T04:32:55.000Z | binding.gyp | sony/node-win-usbdev | bcdbd277419f1e34b1778390ec1624ccce63068d | [
"Apache-2.0"
]
| 3 | 2017-07-31T23:19:07.000Z | 2022-03-25T17:02:51.000Z | {
"targets": [
{
"target_name": "usb_dev",
"sources": [ "usb_dev.cc" ],
"include_dirs" : [
"<!(node -e \"require('nan')\")"
],
"libraries": [
"-lsetupapi"
]
}
]
}
| 15.466667 | 42 | 0.37069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.517094 |
c083ad9611b00848cfe7baab07a7e05df20d4b0d | 1,011 | py | Python | insert_table.py | Cassiel60/python | 3f451e398a8705a5859d347d5fcdcfd9a5671e1c | [
"MIT"
]
| null | null | null | insert_table.py | Cassiel60/python | 3f451e398a8705a5859d347d5fcdcfd9a5671e1c | [
"MIT"
]
| null | null | null | insert_table.py | Cassiel60/python | 3f451e398a8705a5859d347d5fcdcfd9a5671e1c | [
"MIT"
]
| 1 | 2019-12-19T00:34:02.000Z | 2019-12-19T00:34:02.000Z | '''
table AD contains RS,ADID;table Parkinson contains RS,PDID;table variant contains ADID,
PDID insert table variant
one way: below
two way: by merge
'''
import sys ,re
import pandas as pd
varfil1=r'C:\Users\BAIOMED07\Desktop\AD_Database_20170629.xls'
varfil2=r'C:\Users\BAIOMED07\Desktop\parkinson_TOTAL.xls'
varfil3=r'C:\Users\BAIOMED07\Desktop\alleles_IonXpress_066.txt'
df1=pd.read_excel(varfil1)
print df1.head(1)
df2=pd.read_excel(varfil2)
print df2.head(1)
df=df1[df1['dbSNP'].isin(df2['dbSNP'])]
print df.head(2)
df.to_excel('1.xlsx',index=0)
df3=pd.read_csv(varfil3,sep='\t')
df3['pkiq']='-'
for index,row in df2.iterrows():
rs=row['dbSNP']
row1=df1[df1['dbSNP']==rs]
if not len(row1): continue # when drug locus is not in row1
#import pdb; pdb.set_trace()
uniq=row1['UniqueID'].values.tolist()[0]
row2=df3[df3['Allele Name']==uniq]
df3.loc[row2.index,'pkiq']=row['UniqueID']
print df3.head(1)
res_1=df3[df3['Allele Name'].isin(df['UniqueID'])]
res_1.to_excel('2.xlsx',index=0)
| 25.275 | 87 | 0.723046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.486647 |
c08a254cca4494b2d1aa73495456b23d2cb83ea5 | 390 | py | Python | 1_Ejemplo_practico_ECG/utils.py | IEEE-UPIBI/Comunicacion-Serial-Python-Arduino | 806916a5d47e8d29933e1402296e2ca6d5d5a79e | [
"MIT"
]
| null | null | null | 1_Ejemplo_practico_ECG/utils.py | IEEE-UPIBI/Comunicacion-Serial-Python-Arduino | 806916a5d47e8d29933e1402296e2ca6d5d5a79e | [
"MIT"
]
| 1 | 2021-04-23T23:20:42.000Z | 2021-04-23T23:20:42.000Z | 2_Ejemplo_practico_SensorMPU6050/utils.py | IEEE-UPIBI/Comunicacion-Serial-Python-Arduino | 806916a5d47e8d29933e1402296e2ca6d5d5a79e | [
"MIT"
]
| null | null | null | import serial
import time
### FUNCTIONS ####
#### SERIAL COMMUNICATION ####
def arduino_communication(COM="COM5",BAUDRATE=9600,TIMEOUT=1):
""" Initalizes connection with Arduino Board """
try:
arduino = serial.Serial(COM, BAUDRATE , timeout=TIMEOUT)
time.sleep(2)
except:
print("Error de coneccion con el puerto")
return arduino
| 16.956522 | 64 | 0.628205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.348718 |
c08b025b2f074208a6371fa035f6cf38f392405a | 3,595 | py | Python | trav_lib/visualize.py | thwhitfield/trav_lib | 46185f5545d958eba1538c769a98d07908dd0d19 | [
"MIT"
]
| null | null | null | trav_lib/visualize.py | thwhitfield/trav_lib | 46185f5545d958eba1538c769a98d07908dd0d19 | [
"MIT"
]
| null | null | null | trav_lib/visualize.py | thwhitfield/trav_lib | 46185f5545d958eba1538c769a98d07908dd0d19 | [
"MIT"
]
| null | null | null | """Classes and functions used for data visualization"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def plot_correlation_matrix_heat_map(df,label,qty_fields=10):
df = pd.concat([df[label],df.drop(label,axis=1)],axis=1)
correlation_matrix = df.corr()
index = correlation_matrix.sort_values(label, ascending=False).index
correlation_matrix = correlation_matrix[index].sort_values(label,ascending=False)
fig,ax = plt.subplots()
fig.set_size_inches((10,10))
sns.heatmap(correlation_matrix.iloc[:qty_fields,:qty_fields],annot=True,fmt='.2f',ax=ax)
# Code added due to bug in matplotlib 3.1.1
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + .5, top - .5)
return(fig,ax)
def plot_log_hist(s,bin_factor=1,min_exp=None):
"""Plot 2 histograms with log x scales, one for positive values & one for negative values.
Bin_factor is used to scale how many bins to use (1 is default and corresponds to
one bin per order of magnitude. Higher than 1 will skew the bins away from even powers of
10).
Parameters
----------
s: pandas series (generally using df[col])
Series or column of dataframe to analyze
bin_factor: int
Default 1, used to scale how many bins to use
min_exp: int
The minimum exponent to use in creating bins & plotting.
This can be set manually for cases where you want a specific
minimum value to be shown.
Returns
-------
fig, (ax1,ax2): matplotlib fig and ax objects
"""
# Split series into positive & negative components
s_pos = s[s >= 0]
s_neg = s[s < 0].abs()
# Not the best way to deal with this, but this was the easiest solution for now.
# TODO Fix this code to deal with no negative values or no positive values more appropriately
if s_neg.shape[0] == 0:
s_neg.loc[0] = 1
if s_pos.shape[0] == 0:
s_pos.loc[0] = 1
# Calculate appropriate min_exp if none provied
if min_exp == None:
threshold = s_pos.shape[0] - (s_pos==0).sum()
for i in range(10):
n_betw = s_pos[s_pos!=0].between(0,10**-i).sum()
if not (n_betw / threshold) > .1:
min_exp = -i
break
# Clip values to the 10**min_exp so that they are included in the histograms (if
# this isn't done then values which are 0 will be excluded from the histogram)
s_pos = s_pos.clip(lower=10**min_exp)
s_neg = s_neg.clip(lower=10**min_exp)
# Calculate the lowest integer which encompases all the positive and negative values
pos_max = int(np.ceil(np.log10(max(s_pos))))
neg_max = int(np.ceil(np.log10(max(s_neg))))
# Use that for both negative & positive values
plot_max = max(pos_max,neg_max)
# Create the bins (bin spacing is logarithmic)
bins = np.logspace(min_exp,plot_max,(plot_max+1)*bin_factor)
fig,(ax1,ax2) = plt.subplots(nrows=1,ncols=2,sharey=True)
fig.set_size_inches((10,5))
s_neg.hist(bins=bins,ax=ax1)
ax1.set_xscale('log')
ax1.set_title('Distribution of Negative Values')
ax1.set_xlabel('Negative values')
s_pos.hist(bins=bins,ax=ax2)
ax2.set_xscale('log')
ax2.set_title('Distribution of Positive Values')
ax2.set_xlabel('Positive Values')
# Invert axis so that values are increasingly negative from right to left.
# Decrease the spacing between the two subplots
ax1.invert_xaxis()
plt.subplots_adjust(wspace=.02)
return(fig,(ax1,ax2)) | 35.594059 | 97 | 0.662309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,704 | 0.473992 |
c08cb3b6fdb628373adc1c5e8da4f386b0294fba | 1,828 | py | Python | test/test_configeditor.py | ta-assistant/Admin-CLI | 1c03ede0e09d8ddc270646937aa7af463c55f1f5 | [
"MIT"
]
| 1 | 2021-07-22T15:43:02.000Z | 2021-07-22T15:43:02.000Z | test/test_configeditor.py | ta-assistant/Admin-CLI | 1c03ede0e09d8ddc270646937aa7af463c55f1f5 | [
"MIT"
]
| 28 | 2021-05-15T08:18:21.000Z | 2021-08-02T06:12:30.000Z | test/test_configeditor.py | ta-assistant/TA-CLI | 1c03ede0e09d8ddc270646937aa7af463c55f1f5 | [
"MIT"
]
| null | null | null | import unittest
import os, sys, inspect, json
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from lib.file_management.configeditor import ConfigEditor
from lib.file_management.file_management_lib import DirManagement
class TestSendData(unittest.TestCase):
def setUp(self) -> None:
self.path = os.path.join(parentdir,"ta")
DirManagement.create_dir(self.path)
workdata = {
"workDraft": {
"outputDraft": [
"ID",
"param1",
"param2",
"comment"
],
"fileDraft": "{ID}_test.py"
},
"scores": [
{
"ID": "6310545000",
"param1": "100",
"param2": "print('hello')",
"comment": "good"
}]
}
with open(os.path.join(self.path, "work.json"), "w") as create:
json.dump(workdata, create)
self.con = ConfigEditor('testWork2', parentdir)
self.con.writeconfig()
return super().setUp()
def test_writeconfig(self):
"""
return None
"""
self.assertIsNone(self.con.writeconfig())
def test_readconfig(self):
"""
return str
"""
self.assertIs(type(self.con.readconfig()), dict)
def test_ishaveconfig(self):
"""
return None
"""
self.assertIsNone(self.con.ishaveconfig())
def test_checkdata(self):
"""
return None
"""
self.assertIsNone(self.con.checkdata())
def tearDown(self) -> None:
"""
retrun None
"""
DirManagement.remove_dir(os.path.join(parentdir,"ta"))
return super().tearDown()
if __name__ == '__main__':
unittest.main() | 25.041096 | 86 | 0.561816 | 1,450 | 0.793217 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.202954 |
c08e3ff69724d52b478b9cfd81ca7910c42f6c6e | 7,390 | py | Python | chemreg/compound/migrations/0001_vega_sprint.py | Chemical-Curation/chemcurator | bcd7fab84e407f06502e6873c38820724d4e54e7 | [
"MIT"
]
| 1 | 2020-10-05T18:02:24.000Z | 2020-10-05T18:02:24.000Z | chemreg/compound/migrations/0001_vega_sprint.py | Chemical-Curation/chemcurator_django | bcd7fab84e407f06502e6873c38820724d4e54e7 | [
"MIT"
]
| 207 | 2020-01-30T19:17:44.000Z | 2021-02-24T19:45:29.000Z | chemreg/compound/migrations/0001_vega_sprint.py | Chemical-Curation/chemcurator_django | bcd7fab84e407f06502e6873c38820724d4e54e7 | [
"MIT"
]
| null | null | null | # Generated by Django 3.0.3 on 2020-11-18 06:06
import chemreg.common.utils
import chemreg.common.validators
import chemreg.compound.models
import chemreg.compound.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def fwd_create_illdefined_querystructuretype(apps, schema_editor):
QueryStructureType = apps.get_model("compound", "QueryStructureType")
db_alias = schema_editor.connection.alias
QueryStructureType.objects.using(db_alias).create(
name="ill-defined",
label="Ill defined",
short_description="Ill defined",
long_description="Ill defined",
)
def rev_create_illdefined_querystructuretype(apps, schema_editor):
QueryStructureType = apps.get_model("compound", "QueryStructureType")
db_alias = schema_editor.connection.alias
QueryStructureType.objects.using(db_alias).filter(name="ill-defined").delete()
class Migration(migrations.Migration):
initial = True
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="BaseCompound",
fields=[
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"id",
models.CharField(
default=chemreg.compound.utils.build_cid,
max_length=50,
primary_key=True,
serialize=False,
unique=True,
),
),
("structure", models.TextField()),
("qc_note", models.TextField(blank=True, default="")),
(
"created_by",
models.ForeignKey(
default=chemreg.common.utils.get_current_user_pk,
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="basecompound_created_by_set",
to=settings.AUTH_USER_MODEL,
),
),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_compound.basecompound_set+",
to="contenttypes.ContentType",
),
),
(
"replaced_by",
models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="replaces",
to="compound.BaseCompound",
),
),
(
"updated_by",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="basecompound_updated_by_set",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ["pk"],},
),
migrations.CreateModel(
name="DefinedCompound",
fields=[
(
"basecompound_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="compound.BaseCompound",
),
),
("inchikey", models.CharField(max_length=29, null=True)),
],
options={"abstract": False, "base_manager_name": "objects",},
bases=("compound.basecompound",),
),
migrations.CreateModel(
name="QueryStructureType",
fields=[
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"name",
models.SlugField(
max_length=49, primary_key=True, serialize=False, unique=True
),
),
("label", models.CharField(max_length=99, unique=True)),
("short_description", models.CharField(max_length=499)),
("long_description", models.TextField()),
("deprecated", models.BooleanField(default=False)),
(
"created_by",
models.ForeignKey(
default=chemreg.common.utils.get_current_user_pk,
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="querystructuretype_created_by_set",
to=settings.AUTH_USER_MODEL,
),
),
(
"updated_by",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="querystructuretype_updated_by_set",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ["pk"], "abstract": False,},
),
migrations.RunPython(
fwd_create_illdefined_querystructuretype,
rev_create_illdefined_querystructuretype,
),
migrations.CreateModel(
name="IllDefinedCompound",
fields=[
(
"basecompound_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="compound.BaseCompound",
),
),
(
"query_structure_type",
models.ForeignKey(
default=chemreg.compound.models.get_illdefined_qst,
on_delete=django.db.models.deletion.PROTECT,
to="compound.QueryStructureType",
validators=[chemreg.common.validators.validate_deprecated],
),
),
],
options={
"verbose_name": "ill-defined compound",
"abstract": False,
"base_manager_name": "objects",
},
bases=("compound.basecompound",),
),
]
| 38.092784 | 85 | 0.472666 | 6,449 | 0.872666 | 0 | 0 | 0 | 0 | 0 | 0 | 1,070 | 0.14479 |
c08e8f408c1440f68bb49f4c21e145acaad7cc8e | 3,466 | py | Python | TwitterCode/crawler.py | aghriss/CS5502_project | 68403f38ef26067360cb22404cdabe0d0543097a | [
"MIT"
]
| null | null | null | TwitterCode/crawler.py | aghriss/CS5502_project | 68403f38ef26067360cb22404cdabe0d0543097a | [
"MIT"
]
| null | null | null | TwitterCode/crawler.py | aghriss/CS5502_project | 68403f38ef26067360cb22404cdabe0d0543097a | [
"MIT"
]
| null | null | null | '''
Twitter Crawler to get tweets and user data
'''
import tweepy
import json
import os
import time
def get_counts_quantile(tweets):
counts = []
for t in tweets:
counts.append()
def save_tweet(result):
"""Function to save tweepy result status"""
pass
def save_user(result_set):
"""Function to save tweepy set fo result statuses"""
pass
class TweetCrawler():
def __init__(self, credentials_path, save_path, location_id=None):
assert os.path.exists(save_path)
assert os.path.exists(credentials_path)
self.save_path = save_path
self.location_id = 23424977
try:
with open(credentials_path,"r") as f:
creds = json.load(f)
f.close()
self.api = tweepy.API(tweepy.AppAuthHandler(creds['API_KEY'], creds['SECRET_KEY']))
except:
raise "Auth Error, check credentials and connection"
if location_id:
self.location_id = location_id
def crawl(self):
location, trends = self.get_trends()
for trend in trends:
query = trend['query']
trending = self.get_trending_tweets(query)
non_trending = self.get_untrending_tweets(query)
self.store(trending, trending=True)
self.store(non_trending, trensing=False)
def get_trends(self,):
trends_dict = self.api.trends_place(self.location_id)[0]
location_name = trends_dict['locations'][0]['name']
non_empty_trends = list(filter(lambda x: x['tweet_volume'] is not None,
trends_dict['trends']))
print("Retrieved %i for location: %s"%(len(non_empty_trends), location_name))
return location_name, non_empty_trends
def get_trending_tweets(self, query):
popular_tweets = self.api.search(query, count=500, result_type="popular")
tuples = []
for popular in popular_tweets:
user_timeline = self.api.user_timeline(popular.author.id, count=200)
tuples.append([popular, user_timeline])
return tuples
def get_untrending_tweets(self, query):
popular_tweets = self.api.search(query, count=500, result_type="recent")
tuples = []
for popular in popular_tweets:
user_timeline = self.get_user(popular.author.id)
tuples.append([popular, user_timeline])
return tuples
def get_user(self, user_id):
time.sleep(0.1)
return self.api.user_timeline(user_id, count=200)
def save_user(self, user):
print("Saving user %s"%user.id_str)
json.dump(user._json, open(os.path.join(self.save_path, "user_"+user.id_str+".json"), 'w'))
def save_tweet(self, tweet):
print("Saving tweet %s"%tweet.id_str)
json.dump(tweet._json, open(os.path.join(self.save_path, "tweet_"+ tweet.id_str+".json"), 'w'))
def rate_status(self):
state = self.api.rate_limit_status()
limits = state['resources']['statuses']
return {'tweet':limits['/statuses/show/:id']['remaining'],
'users': limits['/statuses/user_timeline']['remaining']}
def get_tweet(self, tweet_id):
time.sleep(0.1)
return self.api.get_status(tweet_id)
#crawler = TweetCrawler("twitter_credentials.json", './data')
#self=crawler
| 32.698113 | 103 | 0.611656 | 2,995 | 0.864108 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.159838 |
c08e9da0f8073946d9eb1f38656fc0912b347134 | 2,206 | py | Python | instruments/swap.py | neoyung/IrLib | 942793c49a477c9f5747410be74daf868391f289 | [
"MIT"
]
| 1 | 2021-10-04T03:15:50.000Z | 2021-10-04T03:15:50.000Z | instruments/swap.py | neoyung/IrLib | 942793c49a477c9f5747410be74daf868391f289 | [
"MIT"
]
| null | null | null | instruments/swap.py | neoyung/IrLib | 942793c49a477c9f5747410be74daf868391f289 | [
"MIT"
]
| null | null | null | from irLib.instruments.instrument import instrument
from irLib.helpers.schedule import period
from irLib.instruments.legs import fixLeg, floatLeg
class swap(instrument):
def __init__(self, tradeDate, spotLag=period(0, 'day'), position='long', *legs):
super().__init__(tradeDate, spotLag, position)
self.legs = legs
def setPricingEngine(self, discountCurve):
self.discountCurve = discountCurve
self.pricingEngine = self.discountCurve
for leg in self.legs:
leg.setPricingEngine(discountCurve)
def calculateNPV(self, day):
super().calculateNPV()
NPV = 0
for leg in self.legs:
NPV += leg.calculateNPV(day)
return NPV * self.longShort
def isExpired(self, day):
return all([leg.isExpired(day) for leg in self.legs])
class vanillaSwap(swap):
def __init__(self, tradeDate, payer, fixSchedule, floatSchedule, floatingCurve, discountCurve=None, spotLag=period(0, 'day'), swapRate=None):
assert payer in ('payer', 'receiver'), 'payer or receiver?'
self.payer = payer
self.position = 'long' if self.payer == 'payer' else 'short'
self.floatLeg = floatLeg(
tradeDate, floatingCurve, floatSchedule, spotLag)
self.fixLeg = fixLeg(tradeDate, 1., fixSchedule, spotLag)
super().__init__(tradeDate, spotLag, self.position, self.fixLeg, self.floatLeg)
if swapRate is None:
assert discountCurve is not None, 'need discount curve to determine swap rate'
super().setPricingEngine(discountCurve)
self.dayCount = self.discountCurve.dayCount
self.tenor = self.dayCount.getYearFrac(min(self.floatLeg.schedule.startDate, self.fixLeg.schedule.startDate),
max(self.floatLeg.schedule.terminationDate, self.fixLeg.schedule.terminationDate))
self.swapRate = self.floatLeg.calculateNPV(
self.tradeDate) / self.fixLeg.calculateNPV(self.tradeDate)
else:
self.swapRate = swapRate
self.fixLeg.rate = self.swapRate
self.fixLeg.position = 'short'
self.fixLeg.longShort = -1
| 41.622642 | 145 | 0.655938 | 2,054 | 0.931097 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.05621 |
c08f4ab3e25ce0f369e7d00947095aeb1fb9b437 | 21,083 | py | Python | skhubness/neighbors/lsh.py | VarIr/scikit-hubness | 6eaeedda2c4b52bb7bf2553b3c5b04a076287ae3 | [
"BSD-3-Clause"
]
| 33 | 2019-08-05T12:29:19.000Z | 2022-03-08T18:48:28.000Z | skhubness/neighbors/lsh.py | AndreasPhilippi/scikit-hubness | 6eaeedda2c4b52bb7bf2553b3c5b04a076287ae3 | [
"BSD-3-Clause"
]
| 84 | 2019-07-12T09:05:42.000Z | 2022-03-31T08:50:15.000Z | skhubness/neighbors/lsh.py | AndreasPhilippi/scikit-hubness | 6eaeedda2c4b52bb7bf2553b3c5b04a076287ae3 | [
"BSD-3-Clause"
]
| 9 | 2019-09-26T11:03:04.000Z | 2021-07-01T08:43:11.000Z | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
# PEP 563: Postponed Evaluation of Annotations
from __future__ import annotations
from functools import partial
import multiprocessing as mp
from typing import Tuple, Union
import warnings
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.metrics import euclidean_distances, pairwise_distances
from sklearn.metrics.pairwise import cosine_distances
from sklearn.utils.validation import check_is_fitted, check_array, check_X_y
try:
import puffinn
except ImportError:
puffinn = None # pragma: no cover
try:
import falconn
except ImportError:
falconn = None # pragma: no cover
from tqdm.auto import tqdm
from .approximate_neighbors import ApproximateNearestNeighbor
from ..utils.check import check_n_candidates
__all__ = ['FalconnLSH', 'PuffinnLSH', ]
class PuffinnLSH(BaseEstimator, ApproximateNearestNeighbor):
""" Wrap Puffinn LSH for scikit-learn compatibility.
Parameters
----------
n_candidates: int, default = 5
Number of neighbors to retrieve
metric: str, default = 'euclidean'
Distance metric, allowed are "angular", "jaccard".
Other metrics are partially supported, such as 'euclidean', 'sqeuclidean'.
In these cases, 'angular' distances are used to find the candidate set
of neighbors with LSH among all indexed objects, and (squared) Euclidean
distances are subsequently only computed for the candidates.
memory: int, default = None
Max memory usage. If None, determined heuristically.
recall: float, default = 0.90
Probability of finding the true nearest neighbors among the candidates
n_jobs: int, default = 1
Number of parallel jobs
verbose: int, default = 0
Verbosity level. If verbose > 0, show tqdm progress bar on indexing and querying.
Attributes
----------
valid_metrics:
List of valid distance metrics/measures
"""
valid_metrics = ["angular", "cosine", "euclidean", "sqeuclidean", "minkowski",
"jaccard",
]
metric_map = {'euclidean': 'angular',
'sqeuclidean': 'angular',
'minkowski': 'angular',
'cosine': 'angular',
}
def __init__(self, n_candidates: int = 5,
metric: str = 'euclidean',
memory: int = None,
recall: float = 0.9,
n_jobs: int = 1,
verbose: int = 0,
):
if puffinn is None: # pragma: no cover
raise ImportError(f'Please install the `puffinn` package, before using this class:\n'
f'$ git clone https://github.com/puffinn/puffinn.git\n'
f'$ cd puffinn\n'
f'$ python3 setup.py build\n'
f'$ pip install .\n') from None
super().__init__(n_candidates=n_candidates,
metric=metric,
n_jobs=n_jobs,
verbose=verbose,
)
self.memory = memory
self.recall = recall
def fit(self, X, y=None) -> PuffinnLSH:
""" Build the puffinn LSH index and insert data from X.
Parameters
----------
X: np.array
Data to be indexed
y: any
Ignored
Returns
-------
self: Puffinn
An instance of Puffinn with a built index
"""
if y is None:
X = check_array(X)
else:
X, y = check_X_y(X, y)
self.y_train_ = y
if self.metric not in self.valid_metrics:
warnings.warn(f'Invalid metric "{self.metric}". Using "euclidean" instead')
self.metric = 'euclidean'
try:
self._effective_metric = self.metric_map[self.metric]
except KeyError:
self._effective_metric = self.metric
# Larger memory means many iterations (time-recall trade-off)
memory = max(np.multiply(*X.shape) * 8 * 500, 1024**2)
if self.memory is not None:
memory = max(self.memory, memory)
# Construct the index
index = puffinn.Index(self._effective_metric,
X.shape[1],
memory,
)
disable_tqdm = False if self.verbose else True
for v in tqdm(X, desc='Indexing', disable=disable_tqdm):
index.insert(v.tolist())
index.rebuild()
self.index_ = index
self.n_indexed_ = X.shape[0]
self.X_indexed_norm_ = np.linalg.norm(X, ord=2, axis=1).reshape(-1, 1)
return self
def kneighbors(self, X=None, n_candidates=None, return_distance=True) -> Union[Tuple[np.array, np.array], np.array]:
""" Retrieve k nearest neighbors.
Parameters
----------
X: np.array or None, optional, default = None
Query objects. If None, search among the indexed objects.
n_candidates: int or None, optional, default = None
Number of neighbors to retrieve.
If None, use the value passed during construction.
return_distance: bool, default = True
If return_distance, will return distances and indices to neighbors.
Else, only return the indices.
"""
check_is_fitted(self, 'index_')
index = self.index_
if n_candidates is None:
n_candidates = self.n_candidates
n_candidates = check_n_candidates(n_candidates)
# For compatibility reasons, as each sample is considered as its own
# neighbor, one extra neighbor will be computed.
if X is None:
n_query = self.n_indexed_
X = np.array([index.get(i) for i in range(n_query)])
search_from_index = True
else:
X = check_array(X)
n_query = X.shape[0]
search_from_index = False
dtype = X.dtype
# If chosen metric is not among the natively supported ones, reorder the neighbors
reorder = True if self.metric not in ('angular', 'cosine', 'jaccard') else False
# If fewer candidates than required are found for a query,
# we save index=-1 and distance=NaN
neigh_ind = -np.ones((n_query, n_candidates),
dtype=np.int32)
if return_distance or reorder:
neigh_dist = np.empty_like(neigh_ind,
dtype=dtype) * np.nan
metric = 'cosine' if self.metric == 'angular' else self.metric
disable_tqdm = False if self.verbose else True
if search_from_index: # search indexed against indexed
for i in tqdm(range(n_query),
desc='Querying',
disable=disable_tqdm,
):
# Find the approximate nearest neighbors.
# Each of the true `n_candidates` nearest neighbors
# has at least `recall` chance of being found.
ind = index.search_from_index(i, n_candidates, self.recall, )
neigh_ind[i, :len(ind)] = ind
if return_distance or reorder:
X_neigh_denormalized = \
X[ind] * self.X_indexed_norm_[ind].reshape(len(ind), -1)
neigh_dist[i, :len(ind)] = pairwise_distances(X[i:i+1, :] * self.X_indexed_norm_[i],
X_neigh_denormalized,
metric=metric,
)
else: # search new query against indexed
for i, x in tqdm(enumerate(X),
desc='Querying',
disable=disable_tqdm,
):
# Find the approximate nearest neighbors.
# Each of the true `n_candidates` nearest neighbors
# has at least `recall` chance of being found.
ind = index.search(x.tolist(),
n_candidates,
self.recall,
)
neigh_ind[i, :len(ind)] = ind
if return_distance or reorder:
X_neigh_denormalized =\
np.array([index.get(i) for i in ind]) * self.X_indexed_norm_[ind].reshape(len(ind), -1)
neigh_dist[i, :len(ind)] = pairwise_distances(x.reshape(1, -1),
X_neigh_denormalized,
metric=metric,
)
if reorder:
sort = np.argsort(neigh_dist, axis=1)
neigh_dist = np.take_along_axis(neigh_dist, sort, axis=1)
neigh_ind = np.take_along_axis(neigh_ind, sort, axis=1)
if return_distance:
return neigh_dist, neigh_ind
else:
return neigh_ind
class FalconnLSH(ApproximateNearestNeighbor):
"""Wrapper for using falconn LSH
Falconn is an approximate nearest neighbor library,
that uses multiprobe locality-sensitive hashing.
Parameters
----------
n_candidates: int, default = 5
Number of neighbors to retrieve
radius: float or None, optional, default = None
Retrieve neighbors within this radius.
Can be negative: See Notes.
metric: str, default = 'euclidean'
Distance metric, allowed are "angular", "euclidean", "manhattan", "hamming", "dot"
num_probes: int, default = 50
The number of buckets the query algorithm probes.
The higher number of probes is, the better accuracy one gets,
but the slower queries are.
n_jobs: int, default = 1
Number of parallel jobs
verbose: int, default = 0
Verbosity level. If verbose > 0, show tqdm progress bar on indexing and querying.
Attributes
----------
valid_metrics:
List of valid distance metrics/measures
Notes
-----
From the falconn docs: radius can be negative, and for the distance function
'negative_inner_product' it actually makes sense.
"""
valid_metrics = ['euclidean', 'l2', 'minkowski', 'squared_euclidean', 'sqeuclidean',
'cosine', 'neg_inner', 'NegativeInnerProduct']
def __init__(self, n_candidates: int = 5, radius: float = 1., metric: str = 'euclidean', num_probes: int = 50,
n_jobs: int = 1, verbose: int = 0):
if falconn is None: # pragma: no cover
raise ImportError(f'Please install the `falconn` package, before using this class:\n'
f'$ pip install falconn') from None
super().__init__(n_candidates=n_candidates,
metric=metric,
n_jobs=n_jobs,
verbose=verbose,
)
self.num_probes = num_probes
self.radius = radius
def fit(self, X: np.ndarray, y: np.ndarray = None) -> FalconnLSH:
""" Setup the LSH index from training data.
Parameters
----------
X: np.array
Data to be indexed
y: any
Ignored
Returns
-------
self: FalconnLSH
An instance of LSH with a built index
"""
X = check_array(X, dtype=[np.float32, np.float64])
if self.metric in ['euclidean', 'l2', 'minkowski']:
self.metric = 'euclidean'
distance = falconn.DistanceFunction.EuclideanSquared
elif self.metric in ['squared_euclidean', 'sqeuclidean']:
self.metric = 'sqeuclidean'
distance = falconn.DistanceFunction.EuclideanSquared
elif self.metric in ['cosine', 'NegativeInnerProduct', 'neg_inner']:
self.metric = 'cosine'
distance = falconn.DistanceFunction.NegativeInnerProduct
else:
warnings.warn(f'Invalid metric "{self.metric}". Using "euclidean" instead')
self.metric = 'euclidean'
distance = falconn.DistanceFunction.EuclideanSquared
# Set up the LSH index
lsh_construction_params = falconn.get_default_parameters(*X.shape,
distance=distance)
lsh_index = falconn.LSHIndex(lsh_construction_params)
lsh_index.setup(X)
self.X_train_ = X
self.y_train_ = y
self.index_ = lsh_index
return self
def kneighbors(self, X: np.ndarray = None,
n_candidates: int = None,
return_distance: bool = True) -> Union[Tuple[np.array, np.array], np.array]:
""" Retrieve k nearest neighbors.
Parameters
----------
X: np.array or None, optional, default = None
Query objects. If None, search among the indexed objects.
n_candidates: int or None, optional, default = None
Number of neighbors to retrieve.
If None, use the value passed during construction.
return_distance: bool, default = True
If return_distance, will return distances and indices to neighbors.
Else, only return the indices.
"""
check_is_fitted(self, ["index_", 'X_train_'])
# Check the n_neighbors parameter
if n_candidates is None:
n_candidates = self.n_candidates
elif n_candidates <= 0:
raise ValueError(f"Expected n_neighbors > 0. Got {n_candidates:d}")
else:
if not np.issubdtype(type(n_candidates), np.integer):
raise TypeError(f"n_neighbors does not take {type(n_candidates)} value, enter integer value")
if X is not None:
X = check_array(X, dtype=self.X_train_.dtype)
query_is_train = False
X = check_array(X, accept_sparse='csr')
n_retrieve = n_candidates
else:
query_is_train = True
X = self.X_train_
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_retrieve = n_candidates + 1
# Configure the LSH query objects (one per parallel worker)
query = self.index_.construct_query_pool(num_probes=self.num_probes,
num_query_objects=self.n_jobs)
if return_distance:
if self.metric == 'euclidean':
distances = partial(euclidean_distances, squared=False)
elif self.metric == 'sqeuclidean':
distances = partial(euclidean_distances, squared=True)
elif self.metric == 'cosine':
distances = cosine_distances
else:
raise ValueError(f'Internal error: unrecognized metric "{self.metric}"')
# Allocate memory for neighbor indices (and distances)
n_objects = X.shape[0]
neigh_ind = np.empty((n_objects, n_candidates), dtype=np.int32)
if return_distance:
neigh_dist = np.empty_like(neigh_ind, dtype=X.dtype)
# If verbose, show progress bar on the search loop
disable_tqdm = False if self.verbose else True
if self.n_jobs > 1:
def pquery(ix):
i, x = ix
return i, np.array(query.find_k_nearest_neighbors(x, k=n_retrieve))
with mp.pool.ThreadPool(processes=self.n_jobs) as pool:
i_knn = list(tqdm(pool.imap_unordered(func=pquery,
iterable=enumerate(X),
chunksize=10),
disable=False if self.verbose else True,
total=X.shape[0],
unit='vectors',
desc='LSH query'))
for i, knn in tqdm(i_knn, desc='Collecting results', disable=disable_tqdm):
if query_is_train:
knn = knn[1:]
neigh_ind[i, :knn.size] = knn
if return_distance:
neigh_dist[i, :knn.size] = distances(X[i].reshape(1, -1), self.X_train_[knn])
# LSH may yield fewer neighbors than n_neighbors.
# We set distances to NaN, and indices to -1
if knn.size < n_candidates:
neigh_ind[i, knn.size:] = -1
if return_distance:
neigh_dist[i, knn.size:] = np.nan
else:
for i, x in tqdm(enumerate(X),
desc='LSH',
disable=disable_tqdm,
):
knn = np.array(query.find_k_nearest_neighbors(x, k=n_retrieve))
if query_is_train:
knn = knn[1:]
neigh_ind[i, :knn.size] = knn
if return_distance:
neigh_dist[i, :knn.size] = distances(x.reshape(1, -1), self.X_train_[knn])
# LSH may yield fewer neighbors than n_neighbors.
# We set distances to NaN, and indices to -1
if knn.size < n_candidates:
neigh_ind[i, knn.size:] = -1
if return_distance:
neigh_dist[i, knn.size:] = np.nan
if return_distance:
return neigh_dist, neigh_ind
else:
return neigh_ind
def radius_neighbors(self, X: np.ndarray = None,
radius: float = None,
return_distance: bool = True) -> Union[Tuple[np.array, np.array], np.array]:
""" Retrieve neighbors within a certain radius.
Parameters
----------
X: np.array or None, optional, default = None
Query objects. If None, search among the indexed objects.
radius: float or None, optional, default = None
Retrieve neighbors within this radius.
Can be negative: See Notes.
return_distance: bool, default = True
If return_distance, will return distances and indices to neighbors.
Else, only return the indices.
Notes
-----
From the falconn docs: radius can be negative, and for the distance function
'negative_inner_product' it actually makes sense.
"""
check_is_fitted(self, ["index_", 'X_train_'])
# Constructing a query object
query = self.index_.construct_query_object()
query.set_num_probes(self.num_probes)
if return_distance:
if self.metric == 'euclidean':
distances = partial(euclidean_distances, squared=False)
elif self.metric == 'sqeuclidean':
distances = partial(euclidean_distances, squared=True)
elif self.metric == 'cosine':
distances = cosine_distances
else:
raise ValueError(f'Internal error: unrecognized metric "{self.metric}"')
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr', dtype=self.X_train_.dtype)
else:
query_is_train = True
X = self.X_train_
if radius is None:
radius = self.radius
# LSH uses squared Euclidean internally
if self.metric == 'euclidean':
radius *= radius
# Add a small number to imitate <= threshold
radius += 1e-7
# Allocate memory for neighbor indices (and distances)
n_objects = X.shape[0]
neigh_ind = np.empty(n_objects, dtype='object')
if return_distance:
neigh_dist = np.empty_like(neigh_ind)
# If verbose, show progress bar on the search loop
disable_tqdm = False if self.verbose else True
for i, x in tqdm(enumerate(X),
desc='LSH',
disable=disable_tqdm,
):
knn = np.array(query.find_near_neighbors(x, threshold=radius))
if len(knn) == 0:
knn = np.array([], dtype=int)
else:
if query_is_train:
knn = knn[1:]
neigh_ind[i] = knn
if return_distance:
if len(knn):
neigh_dist[i] = distances(x.reshape(1, -1), self.X_train_[knn]).ravel()
else:
neigh_dist[i] = np.array([])
if return_distance:
return neigh_dist, neigh_ind
else:
return neigh_ind
| 39.481273 | 120 | 0.54774 | 20,221 | 0.959114 | 0 | 0 | 0 | 0 | 0 | 0 | 7,549 | 0.358061 |
c09039628dfca0497559485ef917b2eee5612ab1 | 11,859 | py | Python | virtualreality/calibration/manual_color_mask_calibration.py | sahasam/hobo_vr | 0cf5824c91719055156ec23cf8dda2d921be948a | [
"MIT"
]
| null | null | null | virtualreality/calibration/manual_color_mask_calibration.py | sahasam/hobo_vr | 0cf5824c91719055156ec23cf8dda2d921be948a | [
"MIT"
]
| null | null | null | virtualreality/calibration/manual_color_mask_calibration.py | sahasam/hobo_vr | 0cf5824c91719055156ec23cf8dda2d921be948a | [
"MIT"
]
| null | null | null | """
pyvr calibrate.
Usage:
pyvr calibrate [options]
Options:
-h, --help
-c, --camera <camera> Source of the camera to use for calibration [default: 0]
-r, --resolution <res> Input resolution in width and height [default: -1x-1]
-n, --n_masks <n_masks> Number of masks to calibrate [default: 1]
-l, --load_from_file <file> Load previous calibration settings [default: ranges.pickle]
-s, --save <file> Save calibration settings to a file [default: ranges.pickle]
"""
import logging
import pickle
import sys
from copy import copy
from pathlib import Path
from typing import Optional, List
import cv2
from docopt import docopt
from virtualreality import __version__
class ColorRange(object):
def __init__(self,
color_num,
hue_center=0,
hue_range=180,
sat_center=0,
sat_range=180,
val_center=0,
val_range=180
):
self.color_num = color_num
self.hue_center = hue_center
self.hue_range = hue_range
self.sat_center = sat_center
self.sat_range = sat_range
self.val_center = val_center
self.val_range = val_range
class CalibrationData(object):
def __init__(self, width=1, height=1, auto_exposure=0.25, exposure=0, saturation=50, num_colors=4):
self.width = width
self.height = height
self.exposure = exposure
self.saturation = saturation
self.num_colors = num_colors
self.color_ranges: List[ColorRange] = []
color_dist = 180 // num_colors
for color in range(num_colors):
self.color_ranges.append(ColorRange(color, *[color * color_dist, color_dist] * 3))
@classmethod
def load_from_file(cls, load_file: str = str(Path(__file__).parent) + "ranges.pickle") -> Optional[
'CalibrationData']:
"""Load the calibration data from a file."""
try:
with open(load_file, "rb") as file:
ranges = pickle.load(file)
return ranges
except FileNotFoundError as fe:
logging.warning(f"Could not load calibration file '{load_file}'.")
def save_to_file(self, save_file: str = str(Path(__file__).parent) + "ranges.pickle") -> None:
with open(save_file, "wb") as file:
pickle.dump(self, file)
def colordata_to_blob(colordata, mapdata):
'''
translates CalibrationData object to BlobTracker format masks
:colordata: CalibrationData object
:mapdata: a map dict with key representing the mask name and value representing the mask number
'''
out = {}
for key, clr_range_index in mapdata.items():
temp = colordata.color_ranges[clr_range_index]
out[key] = {
'h':(temp.hue_center, temp.hue_range),
's':(temp.sat_center, temp.sat_range),
'v':(temp.val_center, temp.val_range),
}
return out
def load_mapdata_from_file(path):
'''
loads mapdata from file, for use in colordata_to_blob
'''
with open(path, 'rb') as file:
return pickle.load(file)
def save_mapdata_to_file(path, mapdata):
'''
save mapdata to file, for use in colordata_to_blob
'''
with open(path, "wb") as file:
pickle.dump(mapdata, file)
def list_supported_capture_properties(cap: cv2.VideoCapture):
"""List the properties supported by the capture device."""
# thanks: https://stackoverflow.com/q/47935846/782170
supported = list()
for attr in dir(cv2):
if attr.startswith("CAP_PROP") and cap.get(getattr(cv2, attr)) != -1:
supported.append(attr)
return supported
def get_color_mask(hsv, color_range: ColorRange):
color_low = [
color_range.hue_center - color_range.hue_range,
color_range.sat_center - color_range.sat_range,
color_range.val_center - color_range.val_range,
]
color_high = [
color_range.hue_center + color_range.hue_range,
color_range.sat_center + color_range.sat_range,
color_range.val_center + color_range.val_range,
]
color_low_neg = copy(color_low)
color_high_neg = copy(color_high)
for c in range(3):
if c==0:
c_max = 180
else:
c_max = 255
if color_low_neg[c] < 0:
color_low_neg[c] = c_max + color_low_neg[c]
color_high_neg[c] = c_max
color_low[c] = 0
elif color_high_neg[c] > c_max:
color_low_neg[c] = 0
color_high_neg[c] = color_high_neg[c] - c_max
color_high[c] = c_max
mask1 = cv2.inRange(hsv, tuple(color_low), tuple(color_high))
mask2 = cv2.inRange(hsv, tuple(color_low_neg), tuple(color_high_neg))
mask = cv2.bitwise_or(mask1, mask2)
return mask
def _set_default_camera_properties(vs, cam, vs_supported, frame_width, frame_height):
if "CAP_PROP_FOURCC" not in vs_supported:
logging.warning(f"Camera {cam} does not support setting video codec.")
else:
vs.set(cv2.CAP_PROP_FOURCC, cv2.CAP_OPENCV_MJPEG)
if "CAP_PROP_AUTO_EXPOSURE" not in vs_supported:
logging.warning(f"Camera {cam} does not support turning on/off auto exposure.")
else:
vs.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
if "CAP_PROP_EXPOSURE" not in vs_supported:
logging.warning(f"Camera {cam} does not support directly setting exposure.")
else:
vs.set(cv2.CAP_PROP_EXPOSURE, -7)
if "CAP_PROP_EXPOSURE" not in vs_supported:
logging.warning(f"Camera {cam} does not support directly setting exposure.")
else:
vs.set(cv2.CAP_PROP_EXPOSURE, -7)
if "CAP_PROP_FRAME_HEIGHT" not in vs_supported:
logging.warning(f"Camera {cam} does not support requesting frame height.")
else:
vs.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height)
if "CAP_PROP_FRAME_WIDTH" not in vs_supported:
logging.warning(f"Camera {cam} does not support requesting frame width.")
else:
vs.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width)
def manual_calibration(
cam=0, num_colors_to_track=4, frame_width=-1, frame_height=-1, load_file="", save_file="ranges.pickle"
):
"""Manually calibrate the hsv ranges and camera settings used for blob tracking."""
vs = cv2.VideoCapture(cam)
vs.set(cv2.CAP_PROP_EXPOSURE, -7)
vs_supported = list_supported_capture_properties(vs)
_set_default_camera_properties(vs, cam, vs_supported, frame_width, frame_height)
cam_window = f"camera {cam} input"
cv2.namedWindow(cam_window)
if "CAP_PROP_EXPOSURE" in vs_supported:
cv2.createTrackbar(
"exposure", cam_window, 0, 16, lambda x: vs.set(cv2.CAP_PROP_EXPOSURE, x - 8),
)
if "CAP_PROP_SATURATION" in vs_supported:
cv2.createTrackbar(
"saturation", cam_window, 0, 100, lambda x: vs.set(cv2.CAP_PROP_SATURATION, x),
)
else:
logging.warning(f"Camera {cam} does not support setting saturation.")
ranges = None
if load_file:
ranges = CalibrationData.load_from_file(load_file)
if ranges is None:
ranges = CalibrationData(width=frame_width, height=frame_height, num_colors=num_colors_to_track)
tracker_window_names = []
for color in range(num_colors_to_track):
tracker_window_names.append(f"color {color}")
cv2.namedWindow(tracker_window_names[color])
cv2.createTrackbar(
"hue center", tracker_window_names[color], ranges.color_ranges[color].hue_center, 180, lambda _: None,
)
cv2.createTrackbar(
"hue range", tracker_window_names[color], ranges.color_ranges[color].hue_range, 180, lambda _: None,
)
cv2.createTrackbar(
"sat center", tracker_window_names[color], ranges.color_ranges[color].sat_center, 255, lambda _: None,
)
cv2.createTrackbar(
"sat range", tracker_window_names[color], ranges.color_ranges[color].sat_range, 255, lambda _: None,
)
cv2.createTrackbar(
"val center", tracker_window_names[color], ranges.color_ranges[color].val_center, 255, lambda _: None,
)
cv2.createTrackbar(
"val range", tracker_window_names[color], ranges.color_ranges[color].val_range, 255, lambda _: None,
)
while 1:
ret, frame = vs.read()
if frame is None:
break
blurred = cv2.GaussianBlur(frame, (3, 3), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
exposure = cv2.getTrackbarPos("exposure", cam_window)
saturation = cv2.getTrackbarPos("saturation", cam_window)
ranges.exposure = exposure - 8
ranges.saturation = saturation
for color in range(num_colors_to_track):
hue_center = cv2.getTrackbarPos("hue center", tracker_window_names[color])
hue_range = cv2.getTrackbarPos("hue range", tracker_window_names[color])
sat_center = cv2.getTrackbarPos("sat center", tracker_window_names[color])
sat_range = cv2.getTrackbarPos("sat range", tracker_window_names[color])
val_center = cv2.getTrackbarPos("val center", tracker_window_names[color])
val_range = cv2.getTrackbarPos("val range", tracker_window_names[color])
ranges.color_ranges[color].hue_center = hue_center
ranges.color_ranges[color].hue_range = hue_range
ranges.color_ranges[color].sat_center = sat_center
ranges.color_ranges[color].sat_range = sat_range
ranges.color_ranges[color].val_center = val_center
ranges.color_ranges[color].val_range = val_range
mask = get_color_mask(hsv, ranges.color_ranges[color])
res = cv2.bitwise_and(hsv, hsv, mask=mask)
cv2.imshow(tracker_window_names[color], res)
cv2.imshow(cam_window, frame)
k = cv2.waitKey(1) & 0xFF
if k in [ord("q"), 27]:
break
for color in range(num_colors_to_track):
hue_center = cv2.getTrackbarPos("hue center", tracker_window_names[color])
hue_range = cv2.getTrackbarPos("hue range", tracker_window_names[color])
sat_center = cv2.getTrackbarPos("sat center", tracker_window_names[color])
sat_range = cv2.getTrackbarPos("sat range", tracker_window_names[color])
val_center = cv2.getTrackbarPos("val center", tracker_window_names[color])
val_range = cv2.getTrackbarPos("val range", tracker_window_names[color])
print(f"hue_center[{color}]: {hue_center}")
print(f"hue_range[{color}]: {hue_range}")
print(f"sat_center[{color}]: {sat_center}")
print(f"sat_range[{color}]: {sat_range}")
print(f"val_center[{color}]: {val_center}")
print(f"val_range[{color}]: {val_range}")
if save_file:
ranges.save_to_file(save_file)
print(f'ranges saved to list in "{save_file}".')
print("You can use this in the pyvr tracker using the --calibration-file argument.")
vs.release()
cv2.destroyAllWindows()
def main():
"""Calibrate entry point."""
# allow calling from both python -m and from pyvr:
argv = sys.argv[1:]
if len(argv) < 2 or sys.argv[1] != "calibrate":
argv = ["calibrate"] + argv
args = docopt(__doc__, version=f"pyvr version {__version__}", argv=argv)
width, height = args["--resolution"].split("x")
if args["--camera"].isdigit():
cam = int(args["--camera"])
else:
cam = args["--camera"]
manual_calibration(
cam=cam,
num_colors_to_track=int(args["--n_masks"]),
frame_width=int(width),
frame_height=int(height),
load_file=args["--load_from_file"],
save_file=args["--save"],
)
| 35.827795 | 114 | 0.646935 | 1,688 | 0.142339 | 0 | 0 | 446 | 0.037609 | 0 | 0 | 2,651 | 0.223543 |
c091621b5f0a091f64683171c4c8e2bb52a88c66 | 155 | py | Python | lambda_handlers/validators/__init__.py | renovate-tests/lambda-handlers | 0b14013f19b597524a8d50f7ea8813ee726c584c | [
"Apache-2.0"
]
| null | null | null | lambda_handlers/validators/__init__.py | renovate-tests/lambda-handlers | 0b14013f19b597524a8d50f7ea8813ee726c584c | [
"Apache-2.0"
]
| null | null | null | lambda_handlers/validators/__init__.py | renovate-tests/lambda-handlers | 0b14013f19b597524a8d50f7ea8813ee726c584c | [
"Apache-2.0"
]
| null | null | null | from .jsonschema_validator import JSONSchemaValidator as jsonschema # noqa
from .marshmallow_validator import MarshmallowValidator as marshmallow # noqa
| 51.666667 | 78 | 0.858065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.077419 |
c091c64c9f6b764d68bafb5d1ad27be880d8e240 | 227 | py | Python | xonsh/aliases/dir.py | yjpark/dotfiles | ae9ad72eb2e2a4d3da4c600d24782720229d1a4b | [
"MIT"
]
| 7 | 2015-12-18T04:33:01.000Z | 2019-09-17T06:09:51.000Z | xonsh/aliases/dir.py | yjpark/dotfiles | ae9ad72eb2e2a4d3da4c600d24782720229d1a4b | [
"MIT"
]
| 1 | 2016-05-12T15:32:47.000Z | 2016-05-12T15:32:47.000Z | xonsh/aliases/dir.py | yjpark/dotfiles | ae9ad72eb2e2a4d3da4c600d24782720229d1a4b | [
"MIT"
]
| 4 | 2016-11-29T04:06:19.000Z | 2019-12-26T14:32:46.000Z | aliases['cd-'] = 'cd -'
aliases['cl'] = 'cd (ls -1Ft | head -1)'
aliases['..'] = 'cd ..'
aliases['...'] = 'cd ../..'
aliases['....'] = 'cd ../../..'
aliases['.....'] = 'cd ../../../..'
aliases['......'] = 'cd ../../../../..'
| 22.7 | 40 | 0.339207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.590308 |
c09416ca42570e30634d8a60a3175bf1c430d092 | 1,894 | py | Python | database.py | tzoch/dropbox-bot | 2bf36e2d4146bf8c00169362f9767ed059643787 | [
"MIT"
]
| 3 | 2016-03-08T04:43:22.000Z | 2020-08-25T20:07:28.000Z | database.py | tzoch/dropbox-bot | 2bf36e2d4146bf8c00169362f9767ed059643787 | [
"MIT"
]
| null | null | null | database.py | tzoch/dropbox-bot | 2bf36e2d4146bf8c00169362f9767ed059643787 | [
"MIT"
]
| null | null | null | #! /usr/bin/python
'''
Class to handle database connections and queries for
Dropbox Mirror Bot
'''
import sqlite3
class Database(object):
def __init__(self, database=":memory:"):
self._database = database
c = self.cursor()
query = '''CREATE TABLE IF NOT EXISTS dropbox_submissions (
processed_id INTEGER PRIMARY KEY ASC,
submission_id VARCHAR(10) UNIQUE)'''
c.execute(query)
self.conn.commit()
query = '''CREATE TABLE IF NOT EXISTS dropbox_images (
id INTEGER PRIMARY KEY ASC,
imgur_id VARCHAR(10),
deletehash VARCHAR(40))'''
c.execute(query)
self.conn.commit()
@property
def conn(self):
if not hasattr(self, '_connection'):
self._connection = sqlite3.connect(self._database)
return self._connection
def cursor(self):
return self.conn.cursor()
def close(self):
self.conn.close()
def is_processed(self, submission_id):
'''Return true if the submission has already been processed'''
c = self.cursor()
query = '''SELECT submission_id FROM dropbox_submissions
WHERE submission_id = (?)'''
c.execute(query, (submission_id,))
if c.fetchone():
return True
return False
def mark_as_processed(self, submission_id):
c = self.cursor()
query = '''INSERT INTO dropbox_submissions (submission_id)
VALUES (?)'''
c.execute(query , (submission_id,))
self.conn.commit()
def log_image(self, img_id, img_deletehash):
c = self.cursor()
query = '''INSERT INTO dropbox_images (imgur_id, deletehash)
VALUES (?, ?)'''
c.execute(query, (img_id, img_deletehash))
self.conn.commit()
| 30.063492 | 70 | 0.577614 | 1,776 | 0.937698 | 0 | 0 | 169 | 0.089229 | 0 | 0 | 796 | 0.420275 |
c09416d25e79ddab37c35127443d972589822576 | 796 | py | Python | tests/starter/test_starter_admin_email.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
]
| 17 | 2015-02-10T07:10:29.000Z | 2021-05-14T22:24:45.000Z | tests/starter/test_starter_admin_email.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
]
| 459 | 2015-03-31T18:24:23.000Z | 2022-03-30T19:44:40.000Z | tests/starter/test_starter_admin_email.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
]
| 9 | 2015-04-18T16:57:31.000Z | 2020-10-30T11:49:13.000Z | import unittest
from mock import patch
from starter.starter_AdminEmail import starter_AdminEmail
from tests.activity.classes_mock import FakeLogger
from tests.classes_mock import FakeLayer1
import tests.settings_mock as settings_mock
class TestStarterAdminEmail(unittest.TestCase):
def setUp(self):
self.fake_logger = FakeLogger()
self.starter = starter_AdminEmail(settings_mock, logger=self.fake_logger)
@patch("boto.swf.layer1.Layer1")
def test_start(self, fake_conn):
fake_conn.return_value = FakeLayer1()
self.assertIsNone(self.starter.start(settings_mock))
@patch("boto.swf.layer1.Layer1")
def test_start_workflow(self, fake_conn):
fake_conn.return_value = FakeLayer1()
self.assertIsNone(self.starter.start_workflow())
| 34.608696 | 81 | 0.761307 | 559 | 0.702261 | 0 | 0 | 357 | 0.448492 | 0 | 0 | 48 | 0.060302 |
c09430f692b1d375bbff6b77320ac21d3531ed34 | 15,963 | py | Python | tests/commit/math/test__tensors.py | eliasdjo/PhiFlow | dc88dca696d25a5ea5793aa48fae390469f0d829 | [
"MIT"
]
| null | null | null | tests/commit/math/test__tensors.py | eliasdjo/PhiFlow | dc88dca696d25a5ea5793aa48fae390469f0d829 | [
"MIT"
]
| null | null | null | tests/commit/math/test__tensors.py | eliasdjo/PhiFlow | dc88dca696d25a5ea5793aa48fae390469f0d829 | [
"MIT"
]
| null | null | null | from unittest import TestCase
import numpy as np
import phi
from phi import math
from phi.math import channel, batch
from phi.math._shape import CHANNEL_DIM, BATCH_DIM, shape_stack, spatial
from phi.math._tensors import TensorStack, CollapsedTensor, wrap, tensor, cached
from phi.math.backend import Backend
BACKENDS = phi.detect_backends()
class TestTensors(TestCase):
def test_tensor_from_constant(self):
for backend in BACKENDS:
with backend:
for const in (1, 1.5, True, 1+1j):
tens = math.wrap(const)
self.assertEqual(math.NUMPY, tens.default_backend)
self.assertTrue(isinstance(tens.native(), (int, float, bool, complex)), msg=backend)
math.assert_close(tens, const)
tens = math.tensor(const)
self.assertEqual(backend, math.choose_backend(tens), f'{const} was not converted to the specified backend')
math.assert_close(tens, const)
def test_tensor_from_native(self):
for creation_backend in BACKENDS:
native = creation_backend.ones((4,))
for backend in BACKENDS:
with backend:
tens = math.tensor(native, convert=False)
self.assertEqual(creation_backend, tens.default_backend)
math.assert_close(tens, native)
tens = math.tensor(native)
self.assertEqual(backend, tens.default_backend, f'Conversion failed from {creation_backend} to {backend}')
math.assert_close(tens, native)
def test_tensor_from_tuple_of_numbers(self):
data_tuple = (1, 2, 3)
for backend in BACKENDS:
with backend:
tens = math.tensor(data_tuple, convert=False)
self.assertEqual(math.NUMPY, math.choose_backend(tens))
math.assert_close(tens, data_tuple)
tens = math.tensor(data_tuple)
self.assertEqual(backend, math.choose_backend(tens))
math.assert_close(tens, data_tuple)
def test_tensor_from_tuple_of_tensor_like(self):
native = ([1, 2, 3], math.zeros(channel(vector=3)))
for backend in BACKENDS:
with backend:
tens = wrap(native, batch(stack=2), channel(vector=3))
self.assertEqual(math.NUMPY, math.choose_backend(tens))
self.assertEqual(batch(stack=2) & channel(vector=3), tens.shape)
tens = tensor(native, batch(stack=2), channel(vector=3))
self.assertEqual(backend, math.choose_backend(tens))
self.assertEqual(batch(stack=2) & channel(vector=3), tens.shape)
def test_tensor_from_tensor(self):
ref = math.stack([math.zeros(spatial(x=5)), math.zeros(spatial(x=4))], batch('stack'))
for backend in BACKENDS:
with backend:
tens = math.tensor(ref, convert=False)
self.assertEqual(math.NUMPY, math.choose_backend(tens))
self.assertEqual(2, tens.shape.get_size('stack'))
self.assertEqual(('stack', 'x'), tens.shape.names)
tens = math.tensor(ref)
self.assertEqual(backend, math.choose_backend(tens))
self.assertEqual(backend, math.choose_backend(tens.stack[0]))
self.assertEqual(backend, math.choose_backend(tens.stack[1]))
tens = math.tensor(ref, batch('n1', 'n2'))
self.assertEqual(backend, math.choose_backend(tens))
def test_multi_dim_tensor_from_numpy(self):
v = math.tensor(np.ones([1, 4, 3, 2]), batch('batch'), spatial('x,y'), channel('vector'))
self.assertEqual((1, 4, 3, 2), v.shape.sizes)
v = math.tensor(np.ones([10, 4, 3, 2]), batch('batch'), spatial('x,y'), channel('vector'))
self.assertEqual((10, 4, 3, 2), v.shape.sizes)
def test_tensor_from_shape(self):
s = spatial(x=4, y=3)
t = math.tensor(s)
math.assert_close(t, [4, 3])
self.assertEqual(t.shape.get_item_names('dims'), ('x', 'y'))
def test_native_constant_ops(self):
v = math.tensor(np.ones([1, 4, 3, 2]), batch('batch'), spatial('x,y'), channel('vector'))
math.assert_close(v + 1, 2)
math.assert_close(v * 3, 3)
math.assert_close(v / 2, 0.5)
math.assert_close(v ** 2, 1)
math.assert_close(2 ** v, 2)
math.assert_close(v + [0, 1], [1, 2])
def test_native_native_ops(self):
v = math.ones(batch(batch=10) & spatial(x=4, y=3) & channel(vector=2))
d = v.unstack('vector')[0]
math.assert_close(v + d, d + v, 2)
math.assert_close(v * d, d * v, 1)
def test_native_unstack(self):
v = math.ones(batch(batch=10), spatial(x=4, y=3), channel(vector=2))
vx, vy = v.vector.unstack()
self.assertEqual((10, 4, 3), vx.shape.sizes)
self.assertEqual(4, len(v.x.unstack()))
self.assertEqual(10, len(v.batch.unstack()))
def test_native_slice(self):
v = math.ones(batch(batch=10), spatial(x=4, y=3), channel(vector=2))
self.assertEqual((10, 4, 3), v.vector[0].shape.sizes)
self.assertEqual((10, 2, 2), v.y[0:2].x[0].shape.sizes)
def test_stacked_shapes(self):
t0 = math.ones(batch(batch=10) & spatial(x=4, y=3) & channel(vector=2))
for dim in t0.shape.names:
tensors = t0.unstack(dim)
stacked = math.stack(tensors, t0.shape[dim].with_sizes([None]))
self.assertEqual(set(t0.shape.names), set(stacked.shape.names))
self.assertEqual(t0.shape.volume, stacked.shape.volume)
def test_stacked_native(self):
t0 = math.ones(batch(batch=10) & spatial(x=4, y=3) & channel(vector=2))
tensors = t0.unstack('vector')
stacked = math.stack(tensors, channel('vector2'))
math.assert_close(stacked, t0)
self.assertEqual((10, 4, 3, 2), stacked.native(stacked.shape).shape)
self.assertEqual((4, 3, 2, 10), stacked.native(order=('x', 'y', 'vector2', 'batch')).shape)
self.assertEqual((2, 10, 3, 4), stacked.native(order=('vector2', 'batch', 'y', 'x')).shape) # this should re-stack since only the stacked dimension position is different
def test_stacked_get(self):
t0 = math.ones(batch(batch=10) & spatial(x=4, y=3) & channel(vector=2))
tensors = t0.unstack('vector')
stacked = math.stack(tensors, channel('channel'))
self.assertEqual(tensors, stacked.channel.unstack())
assert tensors[0] is stacked.channel[0]
assert tensors[1] is stacked.channel[1:2].channel.unstack()[0]
self.assertEqual(4, len(stacked.x.unstack()))
def test_shape_math(self):
vector = math.ones(spatial(x=4, y=3) & channel(vector=2))
vector *= vector.shape.spatial
math.assert_close(vector.vector[0], 4)
math.assert_close(vector.vector[1], 3)
def test_collapsed(self):
scalar = math.zeros(spatial(x=4, y=3))
math.assert_close(scalar, 0)
self.assertEqual((4, 3), scalar.shape.sizes)
self.assertEqual(4, scalar.y[0].shape.size)
self.assertEqual(0, scalar.y[0].x[0].shape.rank)
self.assertEqual(3, len(scalar.y.unstack()))
def test_collapsed_op2(self):
# Collapsed + Collapsed
a = math.zeros(channel(vector=4))
b = math.ones(batch(batch=3))
c = a + b
self.assertIsInstance(c, CollapsedTensor)
self.assertEqual(c.shape.volume, 12)
self.assertEqual(c._inner.shape.volume, 1)
# Collapsed + Native
n = math.ones(channel(vector=3)) + (0, 1, 2)
math.assert_close(n, (1, 2, 3))
def test_semi_collapsed(self):
scalar = math.ones(spatial(x=4, y=3))
scalar = CollapsedTensor(scalar, scalar.shape._expand(batch(batch=10)))
self.assertEqual((10, 4, 3), scalar.shape.sizes)
self.assertEqual(4, len(scalar.x.unstack()))
self.assertEqual(10, len(scalar.batch.unstack()))
self.assertEqual(0, scalar.y[0].batch[0].x[0].shape.rank)
def test_zeros_nonuniform(self):
nonuniform = shape_stack(batch('stack'), batch(time=1) & spatial(x=3, y=3), spatial(x=3, y=4), channel())
self.assertEqual(math.zeros(nonuniform).shape, nonuniform)
self.assertEqual(math.ones(nonuniform).shape, nonuniform)
self.assertEqual(math.random_normal(nonuniform).shape, nonuniform)
self.assertEqual(math.random_uniform(nonuniform).shape, nonuniform)
def test_close_different_shapes(self):
a = math.ones(channel(vector='x,y'))
b = math.wrap(3)
self.assertFalse(math.close(a, b))
self.assertFalse(math.close(cached(a), b))
math.assert_close(a+2, b)
def test_repr(self):
print("--- Eager ---")
print(repr(math.zeros(batch(b=10))))
print(repr(math.zeros(batch(b=10)) > 0))
print(repr(math.ones(channel(vector=3))))
print(repr(math.ones(batch(vector=3))))
def tracable(x):
print(x)
return x
print("--- Placeholders ---")
for backend in BACKENDS:
if backend.supports(Backend.jit_compile):
with backend:
math.jit_compile(tracable)(math.ones(channel(vector=3)))
def test_tensor_like(self):
class Success(Exception): pass
class MyObjV:
def __init__(self, x):
self.x = x
def __value_attrs__(self):
return 'x',
def __with_tattrs__(self, **tattrs):
math.assert_close(tattrs['x'], 1)
raise Success
class MyObjT:
def __init__(self, x1, x2):
self.x1 = x1
self.x2 = x2
def __variable_attrs__(self):
return 'x1', 'x2'
v = MyObjV(math.wrap(0))
t = MyObjT(math.wrap(0), math.wrap(1))
self.assertIsInstance(v, math.TensorLike)
self.assertIsInstance(t, math.TensorLike)
try:
math.cos(v)
except Success:
pass
try:
math.cos(t)
except AssertionError:
pass
def test_Dict(self):
d1 = math.Dict(a=1, b=math.ones(), c=math.ones(spatial(x=3)))
math.assert_close(d1 * 2, d1 + d1, 2 * d1, 2 / d1)
math.assert_close(0 + d1, d1, d1 - 0, abs(d1), round(d1))
math.assert_close(-d1, 0 - d1)
math.assert_close(d1 // 2, d1 * 0, d1 % 1)
math.assert_close(d1 / 2, d1 * 0.5, 0.5 * d1)
math.assert_close(math.sin(d1 * 0), d1 * 0)
def test_collapsed_non_uniform_tensor(self):
non_uniform = math.stack([math.zeros(spatial(a=2)), math.ones(spatial(a=3))], batch('b'))
e = math.expand(non_uniform, channel('vector'))
assert e.shape.without('vector') == non_uniform.shape
def test_slice_by_item_name(self):
t = math.tensor(spatial(x=4, y=3))
math.assert_close(t.dims['x'], 4)
math.assert_close(t.dims['y'], 3)
math.assert_close(t.dims['y,x'], (3, 4))
math.assert_close(t.dims[('y', 'x')], (3, 4))
math.assert_close(t.dims[spatial('x,y')], (4, 3))
def test_serialize_tensor(self):
t = math.random_normal(batch(batch=10), spatial(x=4, y=3), channel(vector=2))
math.assert_close(t, math.from_dict(math.to_dict(t)))
def test_flip_item_names(self):
t = math.zeros(spatial(x=4, y=3), channel(vector='x,y'))
self.assertEqual(('x', 'y'), t.vector.item_names)
t_ = t.vector.flip()
self.assertEqual(('y', 'x'), t_.vector.item_names)
t_ = t.vector[::-1]
self.assertEqual(('y', 'x'), t_.vector.item_names)
def test_op2_incompatible_item_names(self):
t1 = math.random_normal(channel(vector='x,y,z'))
t2 = math.random_normal(channel(vector='r,g,b'))
self.assertEqual(('r', 'g', 'b'), t2.vector.item_names)
try:
t1 + t2
self.fail("Tensors with incompatible item names cannot be added")
except math.IncompatibleShapes:
pass
t1 + t1
t2_ = t2 + math.random_normal(channel(vector=3))
self.assertEqual(('r', 'g', 'b'), t2_.vector.item_names)
t2_ = math.random_normal(channel(vector=3)) + t2
self.assertEqual(('r', 'g', 'b'), t2_.vector.item_names)
def test_layout_single(self):
a = object()
t = math.layout(a)
self.assertEqual(a, t.native())
def test_layout_list(self):
a = ['a', 'b', 'c']
t = math.layout(a, channel(letters=a))
self.assertEqual(a, t.native())
self.assertEqual('a', t.letters['a'].native())
self.assertEqual('a', t.letters['b, a'].letters['a'].native())
def test_layout_tree(self):
a = [['a', 'b1'], 'b2', 'c']
t = math.layout(a, channel(outer='list,b2,c', inner=None))
self.assertEqual(a, t.native())
self.assertEqual(['a', 'b1'], t.outer['list'].native())
self.assertEqual('a', t.outer['list'].inner[0].native())
self.assertEqual(['a', 'b', 'c'], t.inner[0].native())
self.assertEqual('a', t.inner[0].outer['list'].native())
def test_layout_size(self):
a = [['a', 'b1'], 'b2', 'c']
t = math.layout(a, channel(outer='list,b2,c', inner=None))
self.assertEqual(3, t.shape.get_size('outer'))
self.assertEqual(2, t.outer['list'].shape.get_size('inner'))
self.assertEqual(1, t.outer['c'].shape.get_size('inner'))
def test_layout_dict(self):
a = {'a': 'text', 'b': [0, 1]}
t = math.layout(a, channel('dict,inner'))
self.assertEqual(a, t.native())
self.assertEqual(('a', 'b'), t.shape.get_item_names('dict'))
self.assertEqual(a, t.native())
self.assertEqual('text', t.dict['a'].native())
self.assertEqual('e', t.dict['a'].inner[1].native())
self.assertEqual(1, t.dict['b'].inner[1].native())
self.assertEqual(('e', 1), t.inner[1].native())
def test_layout_dict_conflict(self):
a = [dict(a=1), dict(b=2)]
t = math.layout(a, channel('outer,dict'))
self.assertEqual(None, t.shape.get_item_names('dict'))
self.assertEqual(a, t.native())
self.assertEqual([1, 2], t.dict[0].native())
self.assertEqual(2, t.dict[0].outer[1].native())
def test_layout_None(self):
none = math.layout(None)
self.assertEqual(None, none.native())
l = math.layout([None, None], channel('v'))
self.assertEqual(None, none.v[0].native())
def test_iterate_0d(self):
total = 0.
for value in math.ones():
total += value
self.assertIsInstance(total, float)
self.assertEqual(total, 1)
def test_iterate_1d(self):
total = 0.
for value in math.ones(channel(vector=3)):
total += value
self.assertIsInstance(total, float)
self.assertEqual(total, 3)
def test_iterate_2d(self):
total = 0.
for value in math.ones(channel(v1=2, v2=2)):
total += value
self.assertIsInstance(total, float)
self.assertEqual(total, 4)
def test_iterate_layout(self):
a = [dict(a=1), dict(b=2)]
t = math.layout(a, channel('outer,dict'))
total = []
for d in t:
total.append(d)
self.assertEqual(total, [1, 2])
def test_default_backend_layout(self):
self.assertIsNone(math.layout(None).default_backend)
def test_reduction_properties(self):
t = math.meshgrid(x=2, y=2)
self.assertEqual(0.5, t.mean)
self.assertEqual(0.5, t.std)
self.assertEqual(1, t.max)
self.assertEqual(0, t.min)
self.assertEqual(4, t.sum)
self.assertEqual(False, t.all)
self.assertEqual(True, t.any)
| 40.930769 | 178 | 0.58811 | 15,616 | 0.978262 | 0 | 0 | 0 | 0 | 0 | 0 | 879 | 0.055065 |
c094fb0ecd2841945312d32ccd2105e21bce1b3b | 1,202 | py | Python | index/admin.py | KolibriSolutions/BepMarketplace | c47d252fd744cde6b927e37c34d7a103c6162be5 | [
"BSD-3-Clause"
]
| 1 | 2019-06-29T15:24:24.000Z | 2019-06-29T15:24:24.000Z | index/admin.py | KolibriSolutions/BepMarketplace | c47d252fd744cde6b927e37c34d7a103c6162be5 | [
"BSD-3-Clause"
]
| 2 | 2020-01-12T17:47:33.000Z | 2020-01-12T17:47:45.000Z | index/admin.py | KolibriSolutions/BepMarketplace | c47d252fd744cde6b927e37c34d7a103c6162be5 | [
"BSD-3-Clause"
]
| 2 | 2019-06-29T15:24:26.000Z | 2020-01-08T15:15:03.000Z | # Bep Marketplace ELE
# Copyright (c) 2016-2021 Kolibri Solutions
# License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE
#
from django.contrib import admin
from django.shortcuts import reverse
from django.utils.html import format_html
from .models import Track, Broadcast, FeedbackReport, UserMeta, Term, UserAcceptedTerms
class UserMetaAdmin(admin.ModelAdmin):
search_fields = ['User__username', 'Fullname', 'User__email', 'User__username']
list_filter = ('User__groups', 'Cohort', 'EnrolledBEP', 'EnrolledExt')
list_display = ['Fullname', 'User', 'user_link']
def user_link(self, obj):
url = reverse('admin:auth_user_change', args=[obj.User.id])
return format_html("<a href='{}'>{}</a>", url, obj)
class UserAcceptedTermsAdmin(admin.ModelAdmin):
search_fields = ['User__username']
class FeedbackReportAdmin(admin.ModelAdmin):
list_filter = ['Status']
admin.site.register(Term)
admin.site.register(UserAcceptedTerms, UserAcceptedTermsAdmin)
admin.site.register(UserMeta, UserMetaAdmin)
admin.site.register(Broadcast)
admin.site.register(FeedbackReport, FeedbackReportAdmin)
admin.site.register(Track)
| 33.388889 | 102 | 0.757903 | 568 | 0.472546 | 0 | 0 | 0 | 0 | 0 | 0 | 368 | 0.306156 |
c0955acbb8e9da3cf689449a7886da397be4ac74 | 9,025 | py | Python | stacks/firecares_web.py | FireCARES/firecares-ansible | f650798a7a343626402f6d46782651a2f7a5f086 | [
"MIT"
]
| 1 | 2017-02-12T13:38:57.000Z | 2017-02-12T13:38:57.000Z | stacks/firecares_web.py | FireCARES/firecares-ansible | f650798a7a343626402f6d46782651a2f7a5f086 | [
"MIT"
]
| 16 | 2015-10-21T13:00:54.000Z | 2021-09-23T23:21:07.000Z | stacks/firecares_web.py | FireCARES/firecares-ansible | f650798a7a343626402f6d46782651a2f7a5f086 | [
"MIT"
]
| 4 | 2015-08-19T00:52:54.000Z | 2017-02-13T08:14:10.000Z | from datetime import datetime
from pytz import timezone
from troposphere import Ref, Template, Parameter, GetAZs, Output, Join, GetAtt, autoscaling, ec2, elasticloadbalancing as elb
t = Template()
t.add_description("Create FireCARES Webserver Load Balancer, Auto-Scaling group and Celery beat VM")
base_ami = "ami-7646e460"
now = datetime.utcnow().replace(tzinfo=timezone('UTC')).isoformat()
key_name = t.add_parameter(Parameter(
"KeyName",
Description="Name of an existing EC2 KeyPair to enable SSH access to the instances",
Type="AWS::EC2::KeyPair::KeyName",
ConstraintDescription="Must be the name of an existing EC2 KeyPair."
))
ami = t.add_parameter(Parameter(
"baseAmi",
Description="Name of the AMI to use",
Type="String",
ConstraintDescription="Must be the name of an existing AMI.",
Default=base_ami
))
beatami = t.add_parameter(Parameter(
"beatAmi",
Description="Name of the beat AMI",
Type="String",
ConstraintDescription="Must be the name of an existing AMI."
))
web_capacity = t.add_parameter(Parameter(
"WebServerCapacity",
Default="2",
Description="The initial number of WebServer instances",
Type="Number",
ConstraintDescription="must be between 1 and 5 EC2 instances.",
MinValue="1",
MaxValue="5",
))
commit = t.add_parameter(Parameter(
"CommitHash",
Description="Commit hash used for building the web VM",
Type="String"
))
beat_instance_class = t.add_parameter(Parameter(
"BeatInstanceClass",
Default="t2.large",
Description="Celery beat EC2 instance type",
Type="String",
ConstraintDescription="must be a valid EC2 instance type.",
AllowedValues=[
"t1.micro",
"t2.nano",
"t2.micro",
"t2.small",
"t2.medium",
"t2.large",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"m4.large",
"m4.xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.10xlarge",
"c1.medium",
"c1.xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c4.large",
"c4.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"g2.2xlarge",
"g2.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"i2.xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"d2.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"hi1.4xlarge",
"hs1.8xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
]
))
web_instance_class = t.add_parameter(Parameter(
"WebInstanceClass",
Default="t2.small",
Description="WebServer EC2 instance type",
Type="String",
ConstraintDescription="must be a valid EC2 instance type.",
AllowedValues=[
"t1.micro",
"t2.nano",
"t2.micro",
"t2.small",
"t2.medium",
"t2.large",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"m4.large",
"m4.xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.10xlarge",
"c1.medium",
"c1.xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c4.large",
"c4.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"g2.2xlarge",
"g2.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"i2.xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"d2.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"hi1.4xlarge",
"hs1.8xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
]
))
environment = t.add_parameter(Parameter(
"Environment",
Description="Stack environment (e.g. prod, dev, int)",
Type="String",
MinLength="1",
MaxLength="12",
Default="dev",
))
load_balancer = t.add_resource(elb.LoadBalancer(
"LoadBalancer",
CrossZone=True,
AvailabilityZones=GetAZs(""),
LoadBalancerName=Join('-', ['fc', Ref(environment), Ref(commit)]),
AppCookieStickinessPolicy=[
{
"PolicyName": "AppCookieBasedPolicy",
"CookieName": "sticky"
}
],
Listeners=[
{
"LoadBalancerPort": "80",
"InstancePort": "80",
"Protocol": "HTTP"
},
{
"LoadBalancerPort": "443",
"InstancePort": "80",
"Protocol": "HTTPS",
"SSLCertificateId": "arn:aws:acm:us-east-1:164077527722:certificate/a8085d69-3f7b-442e-baa6-70f3bd9b4981",
"PolicyNames": [
"AppCookieBasedPolicy"
]
}
]
))
web_sg = t.add_resource(ec2.SecurityGroup(
"WebServers",
GroupDescription=Join(' - ', ["FireCARES webserver group", Ref(environment), Ref(commit)]),
SecurityGroupIngress=[
ec2.SecurityGroupRule("ELBAccess",
IpProtocol="tcp",
FromPort="80",
ToPort="80",
SourceSecurityGroupOwnerId=GetAtt(load_balancer, "SourceSecurityGroup.OwnerAlias"),
SourceSecurityGroupName=GetAtt(load_balancer, "SourceSecurityGroup.GroupName")
),
ec2.SecurityGroupRule("JenkinsAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="54.173.150.226/32"),
ec2.SecurityGroupRule("TylerAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="73.173.214.176/32"),
ec2.SecurityGroupRule("JoeAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="65.254.97.100/32"),
ec2.SecurityGroupRule("JoeAccess2", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="108.66.75.162/32"),
ec2.SecurityGroupRule("JoeAccess3", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="71.86.4.190/32"),
ec2.SecurityGroupRule("JoeAccess4", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="75.133.14.178/32"),
ec2.SecurityGroupRule("SontagAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="47.215.167.239/32"),
ec2.SecurityGroupRule("SontagAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="54.87.125.141/32"),
ec2.SecurityGroupRule("SontagAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="54.167.99.192/32"),
ec2.SecurityGroupRule("SontagAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="52.205.224.226/32"),
ec2.SecurityGroupRule("SontagAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="52.206.122.170/32"),
ec2.SecurityGroupRule("SontagAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="52.202.117.147/32")
],
))
launch_configuration = t.add_resource(autoscaling.LaunchConfiguration(
"WebServerLaunchConfiguration",
ImageId=Ref(ami),
InstanceType=Ref(web_instance_class),
KeyName=Ref(key_name),
SecurityGroups=[Ref(web_sg)]
))
beat = t.add_resource(ec2.Instance(
"BeatInstance",
ImageId=Ref(beatami),
InstanceType=Ref(beat_instance_class),
KeyName=Ref(key_name),
SecurityGroups=[Ref(web_sg)],
Tags=[
ec2.Tag("environment", Ref(environment)),
ec2.Tag("Name", Join('-', ['celerybeat', Ref(environment), Ref(commit)])),
ec2.Tag("Group", Join('-', ['celerybeat', Ref(environment)]))
]
))
autoscaling_group = t.add_resource(autoscaling.AutoScalingGroup(
"WebserverAutoScale",
AvailabilityZones=['us-east-1b', 'us-east-1c'],
DesiredCapacity=Ref(web_capacity),
MinSize="1",
MaxSize="5",
Tags=[
autoscaling.Tag("environment", Ref(environment), True),
autoscaling.Tag("Name", Join('-', ['web-server', Ref(environment), Ref(commit)]), True),
autoscaling.Tag("Group", Join('-', ['web-server', Ref(environment)]), True)
],
LoadBalancerNames=[Ref(load_balancer)],
HealthCheckType="EC2",
LaunchConfigurationName=Ref(launch_configuration)
))
t.add_output([
Output(
"stackURL",
Description="Stack url",
Value=Join("", [GetAtt(load_balancer, 'DNSName')]),
)
])
t.add_output([
Output(
"WebServerSecurityGroup",
Description="Web server security group.",
Value=Join("", [GetAtt(web_sg, 'GroupId')]),
)
])
t.add_output([
Output(
"AMI",
Description="Web server ami image group.",
Value=Ref(ami),
)
])
if __name__ == '__main__':
print t.to_json()
| 29.493464 | 125 | 0.582493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,445 | 0.381717 |
c095ea2cd17b98f861280b8dd90e12ab34027235 | 513 | py | Python | solutions/unitReview/gcd.py | mrparkonline/python3_while | 3b24be84d16230e2b923276dca4c943f4c5ad26d | [
"MIT"
]
| null | null | null | solutions/unitReview/gcd.py | mrparkonline/python3_while | 3b24be84d16230e2b923276dca4c943f4c5ad26d | [
"MIT"
]
| null | null | null | solutions/unitReview/gcd.py | mrparkonline/python3_while | 3b24be84d16230e2b923276dca4c943f4c5ad26d | [
"MIT"
]
| null | null | null | # GCD Program
from math import gcd
# input
num1 = int(input('Enter a number: '))
num2 = int(input('Enter another number: '))
# processing & output
divisor = 1
upper_limit = min(num1, num2)
gcd_answer = 0
#print(num1, 'and', num2, 'share these factors:')
print('GCD of', num1, 'and', num2, 'is:')
while divisor <= upper_limit:
if num1 % divisor == 0 and num2 % divisor == 0:
gcd_answer = divisor
divisor += 1
# end of while loop
print(gcd_answer)
print('Math Module GCD:', gcd(num1,num2)) | 22.304348 | 51 | 0.651072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.364522 |
c096a0804783d5b361fcbd0253a74c4dbbc3423f | 447 | py | Python | topk/gen_count.py | ririripley/recipes | 04267c68a7424326b4aa8dd14b1a879b59ab887c | [
"BSD-3-Clause"
]
| 1,418 | 2015-01-07T09:40:09.000Z | 2022-03-29T08:37:02.000Z | topk/gen_count.py | algoideas/recipes | 77bc5cb180e49edb31983938386ef23f752e2d2f | [
"BSD-3-Clause"
]
| 22 | 2015-02-17T17:31:18.000Z | 2022-02-08T07:00:29.000Z | topk/gen_count.py | algoideas/recipes | 77bc5cb180e49edb31983938386ef23f752e2d2f | [
"BSD-3-Clause"
]
| 854 | 2015-01-03T11:56:10.000Z | 2022-03-31T08:50:28.000Z | #!/usr/bin/python
import random
word_len = 5
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
output = open('word_count', 'w')
words = set()
N = 1000*1000
for x in xrange(N):
arr = [random.choice(alphabet) for i in range(word_len)]
words.add(''.join(arr))
print len(words)
for word in words:
output.write(word)
output.write('\t')
output.write(str(random.randint(1, 2*N)))
output.write('\n')
| 20.318182 | 75 | 0.686801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.237136 |
c09740c69f29292cd8143f6167d141bb98d730a6 | 728 | py | Python | notification/views.py | ChristopherOloo/KilimoQAPortal | c905a42282bbce70b5477862185ad332185307ce | [
"MIT"
]
| 67 | 2022-01-05T18:59:23.000Z | 2022-03-18T13:13:39.000Z | notification/views.py | ChristopherOloo/KilimoQAPortal | c905a42282bbce70b5477862185ad332185307ce | [
"MIT"
]
| 3 | 2022-01-10T10:03:23.000Z | 2022-03-11T16:58:38.000Z | notification/views.py | ChristopherOloo/KilimoQAPortal | c905a42282bbce70b5477862185ad332185307ce | [
"MIT"
]
| 4 | 2022-01-08T17:39:19.000Z | 2022-02-28T07:40:16.000Z | from django.shortcuts import render
from .models import PrivRepNotification,Notification
from django.http import JsonResponse, HttpResponseRedirect, HttpResponse
def read_All_Notifications(request):
notifics = Notification.objects.filter(noti_receiver=request.user).order_by('-date_created')
for objs in notifics:
objs.is_read = True
objs.save()
# return HttpResponse(status=204)
return JsonResponse({'action': 'readedAll'})
def read_All_Priv_Notifications(request):
notifications = PrivRepNotification.objects.filter(for_user=request.user)
for obj in notifications:
obj.is_read = True
obj.save()
return JsonResponse({'action':'readedAllPrivNotifications'})
| 26.962963 | 96 | 0.747253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.141484 |
c0984d7ef444e29454bfa97d1cc9a76bb27bd524 | 1,243 | py | Python | sortedListToBST.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
]
| null | null | null | sortedListToBST.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
]
| null | null | null | sortedListToBST.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
]
| null | null | null | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sortedListToBST(self, head):
if not head:
return None
pre_mid, mid = self.sortedListToBSTHelper(head)
root = TreeNode(mid.val)
if pre_mid != None:
pre_mid.next = None
else:
head = None
root.left = self.sortedListToBST(head)
root.right = self.sortedListToBST(mid.next)
return root
def sortedListToBSTHelper(self, head):
prev = None
slow = head
fast = head.next
while fast != None and fast.next != None:
prev = slow
slow = slow.next
fast = fast.next.next
return prev, slow
head = ListNode(1)
p1 = ListNode(2)
p2 = ListNode(3)
p3 = ListNode(4)
p4 = ListNode(5)
p5 = ListNode(6)
p6 = ListNode(7)
head.next = p1
p1.next = p2
p2.next = p3
p3.next = p4
p4.next = p5
p5.next = p6
test = Solution()
print test.sortedListToBST(head).val
| 21.067797 | 55 | 0.584875 | 905 | 0.728077 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.057924 |
c099053af9b9ff299e9a4615defe7551e4d4dfdc | 1,105 | py | Python | train.py | divyanshrm/Polyth-Net-Classification-of-Polythene-Bags-Using-Deep-Dearning | f52c0887cb12cf1322a37d1042917be5d679c725 | [
"MIT"
]
| null | null | null | train.py | divyanshrm/Polyth-Net-Classification-of-Polythene-Bags-Using-Deep-Dearning | f52c0887cb12cf1322a37d1042917be5d679c725 | [
"MIT"
]
| null | null | null | train.py | divyanshrm/Polyth-Net-Classification-of-Polythene-Bags-Using-Deep-Dearning | f52c0887cb12cf1322a37d1042917be5d679c725 | [
"MIT"
]
| null | null | null | import tensorflow as tf
import tensorflow.keras as k
import numpy as np
from load_and_augment import load_and_augment_data
from modelconfig import modelconfig
from compile_model import compile_model_adam
import compile_model
if __name__=='__main__':
path=r'/content/drive/My Drive/data'
testing_path=r'/content/drive/My Drive/test/'
training_gen,val_gen,test_gen=load_and_augment_data(path,testing_path)
model=modelconfig(0.25)
model=compile_model_adam(model,0.001,1.2)
cb=tf.keras.callbacks.EarlyStopping(monitor='val_loss',
min_delta=0,
patience=5,
verbose=0, mode='auto')
history=model.fit_generator(generator=training_gen,steps_per_epoch=25,epochs=100,validation_data=val_gen, validation_steps=10,callbacks=[cb])
training=pd.DataFrame(history.history)
training.to_csv('training_statistics.csv',index=False)
evaluation_test=model.evaluate_gen(test_gen)
print('test accuracy= {} and f1={}'.format(evaluation_test[1],evaluation_test[2]))
model.save('model_polythene.h5')
| 42.5 | 143 | 0.729412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.147511 |
c09b58cc8746669f100104bd829d92eb5454df67 | 1,548 | py | Python | fb2_get_list.py | kawaiigamer/python-tools | 68fd75299657811fef36339732c80539ccad386e | [
"Unlicense"
]
| null | null | null | fb2_get_list.py | kawaiigamer/python-tools | 68fd75299657811fef36339732c80539ccad386e | [
"Unlicense"
]
| null | null | null | fb2_get_list.py | kawaiigamer/python-tools | 68fd75299657811fef36339732c80539ccad386e | [
"Unlicense"
]
| null | null | null | import os
import glob
import codecs
from typing import List
def dirs(root_dit: str) -> List[str]:
return next(os.walk(root_dit))[1]
def select_directory_from_list(directories: List[str]) -> str:
for i in range(0, len(directories)):
print("(%d) %s" % (i, directories[i]))
while True:
try:
return directories[int(input('Directory to check(number)_->'))]
except Exception as e:
print("Wrong input: %s" % e)
continue
def text_between(_str: str, begin: str, end: str) -> str:
start = _str.find(begin)
stop = _str.find(end)
if start != -1 and stop != -1:
return _str[start+len(begin):stop]
else:
return ""
def f2b_print_data_list():
checking_directory = select_directory_from_list(dirs('.'))
f2b_files = glob.glob("%s/*.fb2" % checking_directory)
counter = 0
for f2b_file in f2b_files:
try:
text = codecs.open(f2b_file, 'r', encoding='utf8').read()
counter += 1
print("%d. %s - %s %s %s" %
(counter,
text_between(text, "<book-title>", "</book-title>"),
text_between(text, "<first-name>", "</first-name>"),
text_between(text, "<middle-name>", "</middle-name>"),
text_between(text, "<last-name>", "</last-name>")
))
except Exception as e:
print("Exception while parsing %s: %s" % (f2b_file, e))
if __name__ == "__main__":
f2b_print_data_list()
| 29.207547 | 75 | 0.554264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.166667 |
c09c7f0c8e41ed1996a2664259286c39cad5f12c | 2,403 | py | Python | simplecaptcha/fields.py | Kromey/django-simplecaptcha | ad462f8742be19b1e87103f097853d41e21d0e0a | [
"MIT"
]
| 5 | 2015-11-12T06:31:08.000Z | 2017-03-09T06:45:46.000Z | simplecaptcha/fields.py | Kromey/django-simplecaptcha | ad462f8742be19b1e87103f097853d41e21d0e0a | [
"MIT"
]
| null | null | null | simplecaptcha/fields.py | Kromey/django-simplecaptcha | ad462f8742be19b1e87103f097853d41e21d0e0a | [
"MIT"
]
| null | null | null | import time
from django import forms
from django.core.exceptions import ValidationError
from .widgets import CaptchaWidget
from .settings import DURATION
class CaptchaField(forms.MultiValueField):
"""A field that contains and validates a simple catcha question
WARNING: If you use this field directly in your own forms, you may be
caught by surprise by the fact that Django forms rely upon class object
rather than instance objects for its fields. This means that your captcha
will not be updated when you instantiate a new form, and you'll end up
asking your users the same question over and over -- largely defeating the
purpose of a captcha! To solve this, either use the @decorator instead, or
be sure to call upon the widget to update its captcha question.
"""
widget = CaptchaWidget
def __init__(self, *args, **kwargs):
"""Sets up the MultiValueField"""
fields = (
forms.CharField(),
forms.CharField(),
forms.CharField(),
)
super().__init__(fields, *args, **kwargs)
def compress(self, data_list):
"""Validates the captcha answer and returns the result
If no data is provided, this method will simply return None. Otherwise,
it will validate that the provided answer and timestamp hash to the
supplied hash value, and that the timestamp is within the configured
time that captchas are considered valid.
"""
if data_list:
# Calculate the hash of the supplied values
hashed = self.widget.hash_answer(answer=data_list[0], timestamp=data_list[1])
# Current time
timestamp = time.time()
if float(data_list[1]) < timestamp - DURATION:
raise ValidationError("Captcha expired, please try again", code='invalid')
elif hashed != data_list[2]:
raise ValidationError("Incorrect answer", code='invalid')
# Return the supplied answer
return data_list[0]
else:
return None
@property
def label(self):
"""The captcha field's label is the captcha question itself"""
return self.widget._question
@label.setter
def label(self, value):
"""The question is generated by the widget and cannot be externally set"""
pass
| 35.338235 | 90 | 0.650853 | 2,241 | 0.932584 | 0 | 0 | 275 | 0.11444 | 0 | 0 | 1,275 | 0.530587 |
c09e72d5be2ef0cef0c360e31efc8610a74ed555 | 4,940 | py | Python | skills_taxonomy_v2/analysis/sentence_classifier/notebooks/Skills Classifier 1.0 - Doccano Baseline Classifier.py | india-kerle/skills-taxonomy-v2 | a71366dfea3c35580dbafddba9470f83795805ae | [
"MIT"
]
| 3 | 2021-11-21T17:21:12.000Z | 2021-12-10T21:19:57.000Z | skills_taxonomy_v2/analysis/sentence_classifier/notebooks/Skills Classifier 1.0 - Doccano Baseline Classifier.py | india-kerle/skills-taxonomy-v2 | a71366dfea3c35580dbafddba9470f83795805ae | [
"MIT"
]
| 16 | 2021-10-06T11:20:35.000Z | 2022-02-02T11:44:28.000Z | skills_taxonomy_v2/analysis/sentence_classifier/notebooks/Skills Classifier 1.0 - Doccano Baseline Classifier.py | india-kerle/skills-taxonomy-v2 | a71366dfea3c35580dbafddba9470f83795805ae | [
"MIT"
]
| 1 | 2021-10-04T12:27:20.000Z | 2021-10-04T12:27:20.000Z | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Existing skill tags data
# 1. Look at data
# 2. Build a simple baseline classifier
#
# Karlis tagged 50 jobs with where the skills were mentioned. Can we train something to identify sentences as about skills or not?
#
# Would be helpful for taking out the junk.
# +
from sklearn.linear_model import LogisticRegression
import json
import random
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
accuracy_score,
classification_report,
f1_score,
precision_score,
recall_score,
)
# -
# ### Import data
with open(
"../../../../inputs/karlis_ojo_manually_labelled/OJO_test_labelling_April2021_jobs.jsonl",
"r",
) as file:
jobs_data = [json.loads(line) for line in file]
jobs_data[0].keys()
with open(
"../../../../inputs/karlis_ojo_manually_labelled/OJO_test_labelling_April2021_labels.json",
"r",
) as file:
labels_data = json.load(file)
label_type_dict = {label_type["id"]: label_type["text"] for label_type in labels_data}
label_type_dict
# ### Restructuring to have a look
# +
all_job_tags_text = {}
for job_id, job_info in enumerate(jobs_data):
text = job_info["text"]
annotations = job_info["annotations"]
job_tags_text = {}
for label_number, label_type in label_type_dict.items():
job_tags_text[label_type] = [
text[label["start_offset"] : label["end_offset"]]
for label in annotations
if label["label"] == label_number
]
all_job_tags_text[job_id] = job_tags_text
# -
job_id = 1
print(jobs_data[job_id]["text"])
print("\n")
print(all_job_tags_text[job_id]["SKILL"])
print(all_job_tags_text[job_id]["SKILL-RELATED"])
# ## Create a basic classifier
# Label sentences with containing skills (1) or not (0)
#
# Method assumes sentences are split by full stop and will run into problems if the skill has a full stop in.
def label_sentences(job_id):
annotations = jobs_data[job_id]["annotations"]
skill_spans = [
(label["start_offset"], label["end_offset"])
for label in annotations
if label["label"] in [1, 5]
]
sentences = jobs_data[job_id]["text"].split(".")
# Indices of where sentences start and end
sentences_ix = []
for i, sentence in enumerate(sentences):
if i == 0:
start = 0
else:
start = sentences_ix[i - 1][1] + 1
sentences_ix.append((start, start + len(sentence)))
# Find which sentences contain skills
sentences_label = [0] * len(sentences)
for (skill_start, skill_end) in skill_spans:
for i, (sent_s, sent_e) in enumerate(sentences_ix):
if sent_s <= skill_start and sent_e >= skill_end:
sentences_label[i] = 1
return sentences, sentences_label
# Testing
job_id = 2
sentences, sentences_label = label_sentences(job_id)
print(all_job_tags_text[job_id]["SKILL"])
print(all_job_tags_text[job_id]["SKILL-RELATED"])
print([sentences[i] for i, label in enumerate(sentences_label) if label == 1])
print([sentences[i] for i, label in enumerate(sentences_label) if label == 0])
# Create training dataset
X = []
y = []
for job_id in range(len(jobs_data)):
sentences, sentences_label = label_sentences(job_id)
for sentence, sentence_label in zip(sentences, sentences_label):
X.append(sentence)
y.append(sentence_label)
# +
# Random shuffle data points
shuffle_index = list(range(len(X)))
random.Random(42).shuffle(shuffle_index)
X = [X[i] for i in shuffle_index]
y = [y[i] for i in shuffle_index]
# Split test/train set
train_split = 0.75
len_train = round(len(X) * train_split)
X_train = X[0:len_train]
y_train = y[0:len_train]
X_test = X[len_train:]
y_test = y[len_train:]
# -
print(len(X))
print(len(y_train))
print(len(y_test))
vectorizer = CountVectorizer(
analyzer="word",
token_pattern=r"(?u)\b\w+\b",
ngram_range=(1, 2),
stop_words="english",
)
X_train_vect = vectorizer.fit_transform(X_train)
model = MultinomialNB()
model = model.fit(X_train_vect, y_train)
X_test_vect = vectorizer.transform(X_test)
y_test_pred = model.predict(X_test_vect)
print(classification_report(y_test, y_test_pred))
# +
# LogisticRegression
model = LogisticRegression(max_iter=1000, class_weight="balanced")
model = model.fit(X_train_vect, y_train)
X_test_vect = vectorizer.transform(X_test)
y_test_pred = model.predict(X_test_vect)
print(classification_report(y_test, y_test_pred))
# -
| 26.417112 | 130 | 0.696964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,458 | 0.295142 |
c0a105c9215ae6c27a0f573e79372161fa79223f | 3,054 | py | Python | exhibitor/migrations/0001_initial.py | Make-Munich/SaBoT | cabc7e2f5e0f7166d94d2ef683f75d8d3be02834 | [
"MIT"
]
| 19 | 2016-04-09T10:13:26.000Z | 2020-06-21T23:14:16.000Z | exhibitor/migrations/0001_initial.py | Make-Munich/SaBoT | cabc7e2f5e0f7166d94d2ef683f75d8d3be02834 | [
"MIT"
]
| 13 | 2017-01-14T20:42:45.000Z | 2019-08-10T22:48:44.000Z | exhibitor/migrations/0001_initial.py | Make-Munich/SaBoT | cabc7e2f5e0f7166d94d2ef683f75d8d3be02834 | [
"MIT"
]
| 9 | 2016-04-09T12:52:48.000Z | 2018-08-16T19:08:16.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-11 13:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Exhibitor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createDate', models.DateField(auto_now_add=True)),
('modifyDate', models.DateField(auto_now=True)),
('projectName', models.CharField(max_length=128, verbose_name='Project name')),
('logo', models.ImageField(blank=True, upload_to=b'exhibitors/logos', verbose_name='Project logo')),
('homepage', models.URLField(blank=True, verbose_name='Project homepage url')),
('descriptionDE', models.TextField(blank=True, verbose_name='Description text of your project (German)')),
('descriptionEN', models.TextField(blank=True, verbose_name='Description text of your project (English)')),
('boothPreferedLocation', models.PositiveIntegerField(choices=[(1, b'Mensa'), (2, b'First Floor'), (0, b'No preference')], default=0, verbose_name='Do you have a preferred location for your booth?')),
('boothNumTables', models.PositiveIntegerField(blank=True, null=True, verbose_name='How many tables do you need (roughly 1.20m x 0.80m)?')),
('boothNumChairs', models.PositiveIntegerField(blank=True, null=True, verbose_name='How many chairs do you need?')),
('boothComment', models.TextField(blank=True, verbose_name='Here you have the chance to leave us further comments regarding your booth:')),
('accepted', models.BooleanField(default=False, editable=False)),
('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='exhibitors', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ExhibitorParticipants',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isAdmin', models.BooleanField(default=False)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exhibitor.Exhibitor')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='exhibitor',
name='participants',
field=models.ManyToManyField(blank=True, editable=False, related_name='exhibitorparticipation', through='exhibitor.ExhibitorParticipants', to=settings.AUTH_USER_MODEL),
),
]
| 57.622642 | 216 | 0.655206 | 2,830 | 0.926654 | 0 | 0 | 0 | 0 | 0 | 0 | 834 | 0.273084 |
c0a1254294cd0330d5440776840a312ac8bb2711 | 1,455 | py | Python | pyatlas/tests/programatic_apikey_pytests.py | jasonmimick/pyatlas | 4b0198d0c6f87691175d79629b2689f02e58ec8b | [
"Apache-2.0"
]
| null | null | null | pyatlas/tests/programatic_apikey_pytests.py | jasonmimick/pyatlas | 4b0198d0c6f87691175d79629b2689f02e58ec8b | [
"Apache-2.0"
]
| null | null | null | pyatlas/tests/programatic_apikey_pytests.py | jasonmimick/pyatlas | 4b0198d0c6f87691175d79629b2689f02e58ec8b | [
"Apache-2.0"
]
| null | null | null | import pytest
import pprint
import string
import random
import os
from pyatlas import AtlasClient
#from testutils import *
@pytest.fixture
def public_key():
return "NGKMIHEO"
@pytest.fixture
def private_key():
return "66bcc7de-b0de-4d8d-9695-ef97637c6895"
@pytest.fixture
def client(public_key, private_key):
return AtlasClient(public_key, private_key)
@pytest.fixture
def project_name():
return new_test_project_name()
@pytest.fixture
def org_id():
return "5d371dda553855dd17d4fcf9"
@pytest.fixture
def project(client, project_name, org_id):
print(f'Creating new project for test project_name:{project_name}')
project = client.create_project( project_name, org_id=org_id )
return project
def test_create_apikey(client,project):
project_name=project['content']['name']
print(f'project_name={project_name}')
desc = f"test key for project {project_name}"
key = client.create_apikey(project_name=project_name
,description=desc)
print('-------------------- start generated apikey --------------------')
print(key)
print('-------------------- end generated apikey --------------------')
assert key is not None
## utils
def random_token(N=5):
token=''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(N))
print(f'token={token}')
return token
def new_test_project_name():
project_name=f'pyatlas-test-{random_token()}'
return project_name
| 25.982143 | 103 | 0.707904 | 0 | 0 | 0 | 0 | 577 | 0.396564 | 0 | 0 | 429 | 0.294845 |
c0a2cc928103a456829c10c8f15fb433031cebc9 | 6,036 | py | Python | etils/epath/abstract_path.py | google/etils | ff4c222cd6ce2f416d66a3cd64b39125f5ad25de | [
"Apache-2.0"
]
| 13 | 2021-12-14T19:18:53.000Z | 2022-03-30T17:09:20.000Z | etils/epath/abstract_path.py | google/etils | ff4c222cd6ce2f416d66a3cd64b39125f5ad25de | [
"Apache-2.0"
]
| 2 | 2022-01-07T01:34:33.000Z | 2022-01-12T01:35:33.000Z | etils/epath/abstract_path.py | google/etils | ff4c222cd6ce2f416d66a3cd64b39125f5ad25de | [
"Apache-2.0"
]
| 1 | 2022-01-04T14:34:30.000Z | 2022-01-04T14:34:30.000Z | # Copyright 2022 The etils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract path."""
from __future__ import annotations
import os
import pathlib
import typing
from typing import Any, AnyStr, Iterator, Optional, Type, TypeVar
from etils.epath.typing import PathLike
T = TypeVar('T')
# Ideally, `Path` should be `abc.ABC`. However this trigger pytype errors
# when calling `Path()` (can't instantiate abstract base class)
# Also this allow path childs to only partially implement the Path API (e.g.
# read only path)
def abstractmethod(fn: T) -> T:
return fn
class Path(pathlib.PurePosixPath):
"""Abstract base class for pathlib.Path-like API.
See [pathlib.Path](https://docs.python.org/3/library/pathlib.html)
documentation.
"""
def __new__(cls: Type[T], *args: PathLike) -> T:
"""Create a new path.
```python
path = abcpath.Path()
```
Args:
*args: Paths to create
Returns:
path: The registered path
"""
from etils.epath import register # pylint: disable=g-import-not-at-top
if cls == Path:
if not args:
return register.make_path('.')
root, *parts = args
return register.make_path(root).joinpath(*parts)
else:
return super().__new__(cls, *args)
# ====== Pure paths ======
# py3.9 backport of PurePath.is_relative_to.
def is_relative_to(self, *other: PathLike) -> bool:
"""Return True if the path is relative to another path or False."""
try:
self.relative_to(*other)
return True
except ValueError:
return False
def format(self: T, *args: Any, **kwargs: Any) -> T:
"""Apply `str.format()` to the path."""
return type(self)(os.fspath(self).format(*args, **kwargs)) # pytype: disable=not-instantiable
# ====== Read-only methods ======
@abstractmethod
def exists(self) -> bool:
"""Returns True if self exists."""
raise NotImplementedError
@abstractmethod
def is_dir(self) -> bool:
"""Returns True if self is a dir."""
raise NotImplementedError
def is_file(self) -> bool:
"""Returns True if self is a file."""
return not self.is_dir()
@abstractmethod
def iterdir(self: T) -> Iterator[T]:
"""Iterates over the directory."""
raise NotImplementedError
@abstractmethod
def glob(self: T, pattern: str) -> Iterator[T]:
"""Yielding all matching files (of any kind)."""
# Might be able to implement using `iterdir` (recursivelly for `rglob`).
raise NotImplementedError
def rglob(self: T, pattern: str) -> Iterator[T]:
"""Yielding all matching files recursivelly (of any kind)."""
return self.glob(f'**/{pattern}')
def expanduser(self: T) -> T:
"""Returns a new path with expanded `~` and `~user` constructs."""
if '~' not in self.parts: # pytype: disable=attribute-error
return self
raise NotImplementedError
@abstractmethod
def resolve(self: T, strict: bool = False) -> T:
"""Returns the absolute path."""
raise NotImplementedError
@abstractmethod
def open(
self,
mode: str = 'r',
encoding: Optional[str] = None,
errors: Optional[str] = None,
**kwargs: Any,
) -> typing.IO[AnyStr]:
"""Opens the file."""
raise NotImplementedError
def read_bytes(self) -> bytes:
"""Reads contents of self as bytes."""
with self.open('rb') as f:
return f.read()
def read_text(self, encoding: Optional[str] = None) -> str:
"""Reads contents of self as bytes."""
with self.open('r', encoding=encoding) as f:
return f.read()
# ====== Write methods ======
@abstractmethod
def mkdir(
self,
mode: int = 0o777,
parents: bool = False,
exist_ok: bool = False,
) -> None:
"""Create a new directory at this given path."""
raise NotImplementedError
@abstractmethod
def rmdir(self) -> None:
"""Remove the empty directory at this given path."""
raise NotImplementedError
@abstractmethod
def rmtree(self) -> None:
"""Remove the directory, including all sub-files."""
raise NotImplementedError
@abstractmethod
def unlink(self, missing_ok: bool = False) -> None:
"""Remove this file or symbolic link."""
raise NotImplementedError
def write_bytes(self, data: bytes) -> int:
"""Writes content as bytes."""
with self.open('wb') as f:
return f.write(data)
def write_text(
self,
data: str,
encoding: Optional[str] = None,
errors: Optional[str] = None,
) -> int:
"""Writes content as str."""
if encoding and encoding.lower() not in {'utf8', 'utf-8'}:
raise NotImplementedError(f'Non UTF-8 encoding not supported for {self}')
if errors:
raise NotImplementedError(f'Error not supported for writing {self}')
with self.open('w') as f:
return f.write(data)
def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
"""Create a file at this given path."""
if mode != 0o666:
raise NotImplementedError(f'Only mode=0o666 supported for {self}')
if self.exists():
if exist_ok:
return
else:
raise FileExistsError(f'{self} already exists.')
self.write_text('')
@abstractmethod
def rename(self: T, target: PathLike) -> T:
"""Renames the path."""
@abstractmethod
def replace(self: T, target: PathLike) -> T:
"""Overwrites the destination path."""
@abstractmethod
def copy(self: T, dst: PathLike, overwrite: bool = False) -> T:
"""Copy the current file to the given destination."""
| 28.338028 | 98 | 0.650431 | 4,949 | 0.819914 | 0 | 0 | 1,902 | 0.315109 | 0 | 0 | 2,594 | 0.429755 |
c0a387ecd74cdd18e1dae76a145f773dea75a7b7 | 262 | py | Python | src/syntax/statements/__init__.py | andaviaco/tronido | 08a1619a0e8c10f35ed6710eb6e1f72dc5b87421 | [
"MIT"
]
| null | null | null | src/syntax/statements/__init__.py | andaviaco/tronido | 08a1619a0e8c10f35ed6710eb6e1f72dc5b87421 | [
"MIT"
]
| null | null | null | src/syntax/statements/__init__.py | andaviaco/tronido | 08a1619a0e8c10f35ed6710eb6e1f72dc5b87421 | [
"MIT"
]
| null | null | null | from .ifstat import IfStat
from .returnstat import ReturnStat
from .whilestat import WhileStat
from .breakstat import BreakStat
from .switchstat import SwitchStat
from .casestat import CaseStat
from .forstat import ForStat
from .continuestat import ContinueStat
| 29.111111 | 38 | 0.847328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c0a3c20650d9f2b0b50513762c0375912b29d194 | 2,594 | py | Python | tests/test_action_guest_process_start.py | lingfish/stackstorm-vsphere | 49199f5ebdc05b70b7504962e104642b0c30ba30 | [
"Apache-2.0"
]
| null | null | null | tests/test_action_guest_process_start.py | lingfish/stackstorm-vsphere | 49199f5ebdc05b70b7504962e104642b0c30ba30 | [
"Apache-2.0"
]
| 2 | 2019-03-25T18:03:02.000Z | 2019-03-26T13:13:59.000Z | tests/test_action_guest_process_start.py | lingfish/stackstorm-vsphere | 49199f5ebdc05b70b7504962e104642b0c30ba30 | [
"Apache-2.0"
]
| 1 | 2021-03-05T10:12:21.000Z | 2021-03-05T10:12:21.000Z | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import mock
from vsphere_base_action_test_case import VsphereBaseActionTestCase
from guest_process_start import StartProgramInGuest
__all__ = [
'StartProgramInGuestTestCase'
]
class StartProgramInGuestTestCase(VsphereBaseActionTestCase):
__test__ = True
action_cls = StartProgramInGuest
@mock.patch('pyVmomi.vim.vm.guest.ProcessManager')
def test_normal(self, mock_process_manager):
# Vary the arguments list including passing None
# Each tuple has two array items, [0] is arguments input
# [1] is expected cmdspec
for argdata in (None, 'onearg', 'two arguments'):
(action, mock_vm) = self.mock_one_vm('vm-12345')
mockProcMgr = mock.Mock()
mockProcMgr.StartProgramInGuest = mock.Mock()
mockProcMgr.StartProgramInGuest.return_value = 12345
action.si_content.guestOperationsManager = mock.Mock()
action.si_content.guestOperationsManager.processManager =\
mockProcMgr
mock_process_manager.ProgramSpec.return_value = 'cmdspec'
envvars = ["A=B", "C=D"] if argdata else None
result = action.run(vm_id='vm-12345', username='u',
password='p', command='c',
arguments=argdata, workdir='/tmp',
envvar=envvars)
mock_process_manager.ProgramSpec.assert_called_with(
arguments='' if not argdata else argdata,
envVariables=envvars,
programPath='c',
workingDirectory='/tmp'
)
mockProcMgr.StartProgramInGuest.assert_called_once_with(
mock_vm, action.guest_credentials, 'cmdspec',
)
self.assertEqual(result, 12345)
| 43.233333 | 74 | 0.658443 | 1,663 | 0.641095 | 0 | 0 | 1,539 | 0.593292 | 0 | 0 | 1,057 | 0.407479 |
c0a3f676d422bbdd29b5d1ae6fd198e164330819 | 4,192 | py | Python | src/soda/mutator.py | UCLA-VAST/soda | 1b3994ded643d82ebc2fce7b1eb1d13c70800897 | [
"MIT"
]
| 9 | 2020-05-09T19:52:46.000Z | 2021-09-15T13:45:27.000Z | src/soda/mutator.py | UCLA-VAST/soda | 1b3994ded643d82ebc2fce7b1eb1d13c70800897 | [
"MIT"
]
| 1 | 2021-07-26T08:51:49.000Z | 2021-07-26T08:51:49.000Z | src/soda/mutator.py | UCLA-VAST/soda | 1b3994ded643d82ebc2fce7b1eb1d13c70800897 | [
"MIT"
]
| 1 | 2020-10-28T03:06:44.000Z | 2020-10-28T03:06:44.000Z | from typing import (
Iterable,
Mapping,
MutableMapping,
Optional,
Tuple,
TypeVar,
Union,
)
import collections
import logging
import operator
import types
from haoda import ir
from soda import tensor
import soda.visitor
_logger = logging.getLogger().getChild(__name__)
def shift(obj, offset, excluded=(), op=operator.sub, verbose=False):
"""Shift soda.ir.Ref with the given offset.
All soda.ir.Ref, excluding the given names, will be shifted with the
given offset using the given operator. The operator will be applied pointwise
on the original index and the given offset.
Args:
obj: A haoda.ir.Node or a tensor.Tensor object.
offset: Second operand given to the operator.
excluded: Sequence of names to be excluded from the mutation. Default to ().
op: Shifting operator. Should be either add or sub. Default to sub.
verbose: Whether to log shiftings. Default to False.
Returns:
Mutated obj. If obj is an IR node, it will be a different object than the
input. If obj is a tensor, it will be the same object but with fields
mutated.
"""
if op not in (operator.add, operator.sub):
_logger.warn('shifting with neither + nor -, which most likely is an error')
def visitor(obj, args):
if isinstance(obj, ir.Ref):
if obj.name not in excluded:
new_idx = tuple(op(a, b) for a, b in zip(obj.idx, offset))
if verbose:
_logger.debug('reference %s(%s) shifted to %s(%s)', obj.name,
', '.join(map(str, obj.idx)), obj.name,
', '.join(map(str, new_idx)))
obj.idx = new_idx
if isinstance(obj, ir.Node):
return obj.visit(visitor)
if isinstance(obj, tensor.Tensor):
obj.mutate(visitor)
else:
raise TypeError('argument is not an IR node or a tensor')
return obj
def normalize(obj: Union[ir.Node, Iterable[ir.Node]],
references: Optional[Mapping[str, Tuple[int, ...]]] = None):
"""Make the least access index 0.
Works on an ir.Node or an iterable of ir.Nodes. If it is shifted, a different
object is constructed and returned. Otherwise, obj will be returned as-is.
Args:
obj: A node or an iterable of nodes.
Returns:
Normalized node or iterable.
Raises:
TypeError: If argument is not an ir.Node or an iterable of ir.Nodes.
"""
if isinstance(obj, types.GeneratorType):
return normalize(tuple(obj))
norm_idx = soda.visitor.get_normalize_index(obj, references)
shifter = lambda x: shift(x, norm_idx) if any(norm_idx) else x
if isinstance(obj, collections.Iterable):
return type(obj)(map(shifter, obj)) # type: ignore
if isinstance(obj, ir.Node):
return shifter(obj)
raise TypeError('argument is not an ir.Node or an iterable of ir.Nodes')
NodeT = TypeVar('NodeT', bound=ir.Node)
def replace_expressions(
obj: NodeT,
cses: MutableMapping[NodeT, ir.Ref],
used: Optional[MutableMapping[NodeT, NodeT]] = None,
references: Optional[Mapping[str, Tuple[int, ...]]] = None,
) -> NodeT:
"""Get AST with common subexpression elimination.
Get AST with the given common subexpressions. If used is not None, the used
common subexpressions will be added to used.
Args:
obj: An ir.Node.
cses: Dict mapping normalized common subexpressions to the new ir.Ref.
used: Set of used common subexpressions, or None.
Returns:
The ir.Node as the AST.
"""
def visitor(
obj: NodeT,
args: Tuple[MutableMapping[NodeT, ir.
Ref], Optional[MutableMapping[NodeT, NodeT]]]
) -> NodeT:
cses, used = args
norm_idx = soda.visitor.get_normalize_index(obj, references)
normalized = shift(obj, norm_idx) if any(norm_idx) else obj
if normalized in cses:
if used is not None:
if normalized not in used:
used[normalized] = replace_expressions(
normalized, {k: v for k, v in cses.items() if k != normalized},
used)
new_obj = shift(cses[normalized], norm_idx, op=operator.add)
_logger.debug('replacing %s with %s', obj, new_obj)
return new_obj
return obj
return obj.visit(visitor, (cses, used))
| 32.246154 | 80 | 0.669132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,741 | 0.415315 |
c0a4975e4ff94754e10e515af8e9f52525f2cf81 | 485 | py | Python | tests/skillmap_parser_test.py | niyue/skillmap | ca1011d5f822134ad1d7c5f7f243da30a0731170 | [
"MIT"
]
| 69 | 2022-02-27T13:05:20.000Z | 2022-03-31T23:12:48.000Z | tests/skillmap_parser_test.py | chandrab/skillmap | 6d07dc5392b0fd66d310db8cb85051cf9e0e93df | [
"MIT"
]
| 1 | 2022-02-27T22:44:36.000Z | 2022-03-02T01:15:52.000Z | tests/skillmap_parser_test.py | chandrab/skillmap | 6d07dc5392b0fd66d310db8cb85051cf9e0e93df | [
"MIT"
]
| 2 | 2022-02-27T23:37:15.000Z | 2022-03-14T12:32:22.000Z | from skillmap.skillmap_parser import SkillMapParser
def test_parse_toml():
parser = SkillMapParser()
skill_map = parser.parse('tests/url_shortener.toml')
assert skill_map
assert skill_map['skillmap']['name'] == "url shortener"
assert skill_map['groups']['webui']['name'] == "web ui"
assert skill_map['groups']['webui']['skills']['url_validator']['name'] == "url validator"
assert skill_map['groups']['webui']['skills']['url_validator']['icon'] == "globe"
| 40.416667 | 93 | 0.682474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.404124 |
c0a71acf6116e8faa1f0455b3919ee53b2e3be9c | 2,923 | py | Python | htdocs/plotting/auto/scripts/p66.py | jamayfieldjr/iem | 275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a | [
"MIT"
]
| 1 | 2019-10-07T17:01:24.000Z | 2019-10-07T17:01:24.000Z | htdocs/plotting/auto/scripts/p66.py | jamayfieldjr/iem | 275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a | [
"MIT"
]
| null | null | null | htdocs/plotting/auto/scripts/p66.py | jamayfieldjr/iem | 275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a | [
"MIT"
]
| null | null | null | """Consec days"""
import calendar
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
PDICT = {'above': 'Temperature At or Above (AOA) Threshold',
'below': 'Temperature Below Threshold'}
PDICT2 = {'high': 'High Temperature',
'low': 'Low Temperature'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """This chart presents the daily frequency of the
given date having the prescribed number of previous days above or below
some provided treshold."""
desc['arguments'] = [
dict(type='station', name='station', default='IATDSM',
label='Select Station:', network='IACLIMATE'),
dict(type='select', name='var', default='high', options=PDICT2,
label='Select which daily variable'),
dict(type='select', name='dir', default='above', options=PDICT,
label='Select temperature direction'),
dict(type='int', name='threshold', default='60',
label='Temperature Threshold (F):'),
dict(type='int', name='days', default='7',
label='Number of Days:')
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
days = ctx['days']
threshold = ctx['threshold']
varname = ctx['var']
mydir = ctx['dir']
table = "alldata_%s" % (station[:2],)
agg = "min" if mydir == 'above' else 'max'
op = ">=" if mydir == 'above' else '<'
df = read_sql("""
with data as (select day,
"""+agg+"""("""+varname+""")
OVER (ORDER by day ASC ROWS BETWEEN %s PRECEDING
and CURRENT ROW) as agg from """ + table + """
where station = %s)
select extract(doy from day) as doy,
sum(case when agg """+op+""" %s then 1 else 0 end)
/ count(*)::float * 100. as freq
from data GROUP by doy ORDER by doy asc
""", pgconn, params=(days - 1, station, threshold), index_col='doy')
fig, ax = plt.subplots(1, 1, sharex=True)
label = "AOA" if mydir == 'above' else 'below'
ax.set_title(("[%s] %s\nFrequency of %s Consec Days"
r" with %s %s %s$^\circ$F "
) % (station, ctx['_nt'].sts[station]['name'],
days, varname.capitalize(), label, threshold))
ax.set_ylabel("Frequency of Days [%]")
ax.set_ylim(0, 100)
ax.set_yticks([0, 5, 10, 25, 50, 75, 90, 95, 100])
ax.grid(True)
ax.bar(df.index.values, df['freq'], width=1)
ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274,
305, 335, 365))
ax.set_xticklabels(calendar.month_abbr[1:])
ax.set_xlim(0, 366)
return fig, df
if __name__ == '__main__':
plotter(dict())
| 34.388235 | 75 | 0.584673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,234 | 0.422169 |
c0a82c8edb06be2ad657e3733a1e2ee863cd955f | 32 | py | Python | resolwe/rest/__init__.py | plojyon/resolwe | 1bee6f0860fdd087534adf1680e9350d79ab97cf | [
"Apache-2.0"
]
| 27 | 2015-12-07T18:29:12.000Z | 2022-03-16T08:01:47.000Z | resolwe/rest/__init__.py | plojyon/resolwe | 1bee6f0860fdd087534adf1680e9350d79ab97cf | [
"Apache-2.0"
]
| 681 | 2015-12-01T11:52:24.000Z | 2022-03-21T07:43:37.000Z | resolwe/rest/__init__.py | plojyon/resolwe | 1bee6f0860fdd087534adf1680e9350d79ab97cf | [
"Apache-2.0"
]
| 28 | 2015-12-01T08:32:57.000Z | 2021-12-14T00:04:16.000Z | """Resolwe REST API helpers."""
| 16 | 31 | 0.65625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.96875 |
c0a873492ea0286949973b05633bb259a0fc0b1d | 422 | py | Python | requests/requests-custom_authentication.py | all3g/pieces | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
]
| 34 | 2016-10-31T02:05:24.000Z | 2018-11-08T14:33:13.000Z | requests/requests-custom_authentication.py | join-us/python-programming | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
]
| 2 | 2017-05-11T03:00:31.000Z | 2017-11-01T23:37:37.000Z | requests/requests-custom_authentication.py | join-us/python-programming | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
]
| 21 | 2016-08-19T09:05:45.000Z | 2018-11-08T14:33:16.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from requests.auth import AuthBase
class PizzaAuth(AuthBase):
"""Attaches HTTP Pizza Authentication to the given Request object."""
def __init__(self, username):
# setup any auth-related data here.
self.username = username
def __call__(self, r):
# modify and return the request
r.headers['X-Pizza'] = self.username
return r
| 24.823529 | 73 | 0.64455 | 341 | 0.808057 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.436019 |
c0a93dc0b3c06bf5e6cdc0aa43def476e965448d | 866 | py | Python | csv_test.py | mii012345/deep-learning | 660785157446583eefeefa9d5dc25927aab6a9e4 | [
"MIT"
]
| 3 | 2017-06-04T06:59:38.000Z | 2017-06-05T14:01:48.000Z | csv_test.py | mii012345/deep-learning | 660785157446583eefeefa9d5dc25927aab6a9e4 | [
"MIT"
]
| null | null | null | csv_test.py | mii012345/deep-learning | 660785157446583eefeefa9d5dc25927aab6a9e4 | [
"MIT"
]
| null | null | null | import csv
import numpy as np
import pickle
with open('data (2).csv','r') as f:
csv = csv.reader(f)
csvlist = []
for i in csv:
csvlist.append(i)
#6行目から
mas = []
for i in range(364):
i+=6
a = 0
b = 0
c = 0
date = csvlist[i][0]
weather = csvlist[i][1]
if date[0:10] == "2016/11/1 " or date[0:10] == "2016/11/2 " or date[0:10] == "2016/11/3 " or date[0:9] == "2016/11/4" or date[0:9] == "2016/11/5" or date[0:9] == "2016/11/6" or date[0:9] == "2016/11/7":
continue
if weather == "1" or weather == "2":
a = 1
elif weather == "3" or weather == "4" or weather == "5" or weather == "6":
b = 1
else:
c = 1
w = [a,b,c]
print(date[0:10])
mas.append(w)
mas = np.array(mas)
with open('tenki_num.pkl','wb') as f:
pickle.dump(mas,f)
| 24.055556 | 207 | 0.489607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.170481 |
c0ab2b2d4a6d03d592483deeff8d92956a06c0e7 | 287 | py | Python | src/test_main.py | HenrikPilz/BMEcatConverter | 28c6840fc70a3f04e3eae5fc7be32c7bc779c1da | [
"BSD-3-Clause"
]
| 1 | 2021-03-14T08:20:51.000Z | 2021-03-14T08:20:51.000Z | src/test_main.py | HenrikPilz/BMEcatConverter | 28c6840fc70a3f04e3eae5fc7be32c7bc779c1da | [
"BSD-3-Clause"
]
| 1 | 2021-11-29T09:56:18.000Z | 2021-12-01T22:01:13.000Z | src/test_main.py | HenrikPilz/BMEcatConverter | 28c6840fc70a3f04e3eae5fc7be32c7bc779c1da | [
"BSD-3-Clause"
]
| 2 | 2021-08-30T08:14:34.000Z | 2021-09-28T15:10:23.000Z | import os
import test
import unittest
def tests():
if not os.path.exists("../test_output"):
os.makedirs(os.path.join(os.path.dirname(__file__), "../test_output"), exist_ok=True)
unittest.main(test)
# if __name__ == '__main__':
# Datenmodultests
tests()
| 19.133333 | 94 | 0.648084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.275261 |
c0ab90f34a7bc1c416809bd67bdc787e6a30c4a3 | 99 | py | Python | problem/01000~09999/02857/2857.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
]
| 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/02857/2857.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
]
| 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/02857/2857.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
]
| 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | a=1
for i in range(5):
if 'FBI' in input():
print(i+1,end=' ')
a=0
if a: print('HE GOT AWAY!') | 16.5 | 27 | 0.555556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.222222 |
c0abaf869bbe93d0c4be20bb53db1ca7697f6d3d | 1,971 | py | Python | ntm/ntm.py | clemkoa/ntm | 723d4ebea63f8f9439fd1c56f36e3cb680c8a277 | [
"MIT"
]
| 41 | 2020-05-19T05:48:04.000Z | 2021-11-24T11:31:08.000Z | ntm/ntm.py | clemkoa/ntm | 723d4ebea63f8f9439fd1c56f36e3cb680c8a277 | [
"MIT"
]
| 3 | 2021-06-07T09:00:59.000Z | 2021-12-30T17:21:07.000Z | ntm/ntm.py | clemkoa/ntm | 723d4ebea63f8f9439fd1c56f36e3cb680c8a277 | [
"MIT"
]
| 4 | 2020-12-31T17:39:42.000Z | 2021-12-29T14:11:43.000Z | import torch
from torch import nn
import torch.nn.functional as F
from ntm.controller import Controller
from ntm.memory import Memory
from ntm.head import ReadHead, WriteHead
class NTM(nn.Module):
def __init__(self, vector_length, hidden_size, memory_size, lstm_controller=True):
super(NTM, self).__init__()
self.controller = Controller(lstm_controller, vector_length + 1 + memory_size[1], hidden_size)
self.memory = Memory(memory_size)
self.read_head = ReadHead(self.memory, hidden_size)
self.write_head = WriteHead(self.memory, hidden_size)
self.fc = nn.Linear(hidden_size + memory_size[1], vector_length)
nn.init.xavier_uniform_(self.fc.weight, gain=1)
nn.init.normal_(self.fc.bias, std=0.01)
def get_initial_state(self, batch_size=1):
self.memory.reset(batch_size)
controller_state = self.controller.get_initial_state(batch_size)
read = self.memory.get_initial_read(batch_size)
read_head_state = self.read_head.get_initial_state(batch_size)
write_head_state = self.write_head.get_initial_state(batch_size)
return (read, read_head_state, write_head_state, controller_state)
def forward(self, x, previous_state):
previous_read, previous_read_head_state, previous_write_head_state, previous_controller_state = previous_state
controller_input = torch.cat([x, previous_read], dim=1)
controller_output, controller_state = self.controller(controller_input, previous_controller_state)
# Read
read_head_output, read_head_state = self.read_head(controller_output, previous_read_head_state)
# Write
write_head_state = self.write_head(controller_output, previous_write_head_state)
fc_input = torch.cat((controller_output, read_head_output), dim=1)
state = (read_head_output, read_head_state, write_head_state, controller_state)
return F.sigmoid(self.fc(fc_input)), state
| 50.538462 | 118 | 0.742263 | 1,793 | 0.909691 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.006596 |
c0af4a37c3b086f10b2224f1101fb1be4a7fdce1 | 3,468 | py | Python | facebook_business/adobjects/adkeywordstats.py | enricapq/facebook-python-business-sdk | 49c569ac5cf812b1bcb533520c35896b0436fa4c | [
"CNRI-Python"
]
| null | null | null | facebook_business/adobjects/adkeywordstats.py | enricapq/facebook-python-business-sdk | 49c569ac5cf812b1bcb533520c35896b0436fa4c | [
"CNRI-Python"
]
| null | null | null | facebook_business/adobjects/adkeywordstats.py | enricapq/facebook-python-business-sdk | 49c569ac5cf812b1bcb533520c35896b0436fa4c | [
"CNRI-Python"
]
| 1 | 2018-09-24T14:04:48.000Z | 2018-09-24T14:04:48.000Z | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdKeywordStats(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAdKeywordStats = True
super(AdKeywordStats, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
actions = 'actions'
clicks = 'clicks'
cost_per_total_action = 'cost_per_total_action'
cost_per_unique_click = 'cost_per_unique_click'
cpc = 'cpc'
cpm = 'cpm'
cpp = 'cpp'
ctr = 'ctr'
frequency = 'frequency'
id = 'id'
impressions = 'impressions'
name = 'name'
reach = 'reach'
spend = 'spend'
total_actions = 'total_actions'
total_unique_actions = 'total_unique_actions'
unique_actions = 'unique_actions'
unique_clicks = 'unique_clicks'
unique_ctr = 'unique_ctr'
unique_impressions = 'unique_impressions'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'keywordstats'
_field_types = {
'actions': 'list<AdsActionStats>',
'clicks': 'unsigned int',
'cost_per_total_action': 'float',
'cost_per_unique_click': 'float',
'cpc': 'float',
'cpm': 'float',
'cpp': 'float',
'ctr': 'float',
'frequency': 'float',
'id': 'string',
'impressions': 'unsigned int',
'name': 'string',
'reach': 'unsigned int',
'spend': 'float',
'total_actions': 'unsigned int',
'total_unique_actions': 'unsigned int',
'unique_actions': 'list<AdsActionStats>',
'unique_clicks': 'unsigned int',
'unique_ctr': 'float',
'unique_impressions': 'unsigned int',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| 35.387755 | 79 | 0.684544 | 1,878 | 0.541522 | 0 | 0 | 176 | 0.05075 | 0 | 0 | 1,992 | 0.574394 |
c0b3ae1a797739b59abdda1942df55aaa68ec172 | 1,198 | py | Python | DQM/TrackingMonitorSource/python/StandaloneTrackMonitor_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DQM/TrackingMonitorSource/python/StandaloneTrackMonitor_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DQM/TrackingMonitorSource/python/StandaloneTrackMonitor_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
standaloneTrackMonitor = DQMEDAnalyzer('StandaloneTrackMonitor',
moduleName = cms.untracked.string("StandaloneTrackMonitor"),
folderName = cms.untracked.string("highPurityTracks"),
vertexTag = cms.untracked.InputTag("selectedPrimaryVertices"),
puTag = cms.untracked.InputTag("addPileupInfo"),
clusterTag = cms.untracked.InputTag("siStripClusters"),
trackInputTag = cms.untracked.InputTag('selectedTracks'),
offlineBeamSpot = cms.untracked.InputTag('offlineBeamSpot'),
trackQuality = cms.untracked.string('highPurity'),
doPUCorrection = cms.untracked.bool(False),
isMC = cms.untracked.bool(True),
puScaleFactorFile = cms.untracked.string("PileupScaleFactor_run203002.root"),
haveAllHistograms = cms.untracked.bool(False),
verbose = cms.untracked.bool(False),
trackEtaH = cms.PSet(Xbins = cms.int32(60), Xmin = cms.double(-3.0),Xmax = cms.double(3.0)),
trackPtH = cms.PSet(Xbins = cms.int32(100),Xmin = cms.double(0.0),Xmax = cms.double(100.0))
)
| 59.9 | 104 | 0.687813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.168614 |
c0b40331943eedccfdcfa2dfe402031536f745fe | 7,266 | py | Python | tests/test_spatialvector.py | jungr-ait/spatialmath-python | 140d499e733ed9775762df90d36e4b2c4c2fc6eb | [
"MIT"
]
| 183 | 2020-04-24T02:49:36.000Z | 2022-03-31T16:13:38.000Z | tests/test_spatialvector.py | jungr-ait/spatialmath-python | 140d499e733ed9775762df90d36e4b2c4c2fc6eb | [
"MIT"
]
| 29 | 2020-05-21T04:13:33.000Z | 2022-02-15T12:46:17.000Z | tests/test_spatialvector.py | jungr-ait/spatialmath-python | 140d499e733ed9775762df90d36e4b2c4c2fc6eb | [
"MIT"
]
| 39 | 2020-05-06T11:22:55.000Z | 2022-03-21T14:15:16.000Z |
import unittest
import numpy.testing as nt
import numpy as np
from spatialmath.spatialvector import *
class TestSpatialVector(unittest.TestCase):
def test_list_powers(self):
x = SpatialVelocity.Empty()
self.assertEqual(len(x), 0)
x.append(SpatialVelocity([1, 2, 3, 4, 5, 6]))
self.assertEqual(len(x), 1)
x.append(SpatialVelocity([7, 8, 9, 10, 11, 12]))
self.assertEqual(len(x), 2)
y = x[0]
self.assertIsInstance(y, SpatialVelocity)
self.assertEqual(len(y), 1)
self.assertTrue(all(y.A == np.r_[1, 2, 3, 4, 5, 6]))
y = x[1]
self.assertIsInstance(y, SpatialVelocity)
self.assertEqual(len(y), 1)
self.assertTrue(all(y.A == np.r_[7, 8, 9, 10, 11, 12]))
x.insert(0, SpatialVelocity([20, 21, 22, 23, 24, 25]))
y = x[0]
self.assertIsInstance(y, SpatialVelocity)
self.assertEqual(len(y), 1)
self.assertTrue(all(y.A == np.r_[20, 21, 22, 23, 24, 25]))
y = x[1]
self.assertIsInstance(y, SpatialVelocity)
self.assertEqual(len(y), 1)
self.assertTrue(all(y.A == np.r_[1, 2, 3, 4, 5, 6]))
def test_velocity(self):
a = SpatialVelocity([1, 2, 3, 4, 5, 6])
self.assertIsInstance(a, SpatialVelocity)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialM6)
self.assertEqual(len(a), 1)
self.assertTrue(all(a.A == np.r_[1, 2, 3, 4, 5, 6]))
a = SpatialVelocity(np.r_[1, 2, 3, 4, 5, 6])
self.assertIsInstance(a, SpatialVelocity)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialM6)
self.assertEqual(len(a), 1)
self.assertTrue(all(a.A == np.r_[1, 2, 3, 4, 5, 6]))
s = str(a)
self.assertIsInstance(s, str)
self.assertEqual(s.count('\n'), 0)
self.assertTrue(s.startswith('SpatialVelocity'))
r = np.random.rand(6, 10)
a = SpatialVelocity(r)
self.assertIsInstance(a, SpatialVelocity)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialM6)
self.assertEqual(len(a), 10)
b = a[3]
self.assertIsInstance(b, SpatialVelocity)
self.assertIsInstance(b, SpatialVector)
self.assertIsInstance(b, SpatialM6)
self.assertEqual(len(b), 1)
self.assertTrue(all(b.A == r[:,3]))
s = str(a)
self.assertIsInstance(s, str)
self.assertEqual(s.count('\n'), 9)
def test_acceleration(self):
a = SpatialAcceleration([1, 2, 3, 4, 5, 6])
self.assertIsInstance(a, SpatialAcceleration)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialM6)
self.assertEqual(len(a), 1)
self.assertTrue(all(a.A == np.r_[1, 2, 3, 4, 5, 6]))
a = SpatialAcceleration(np.r_[1, 2, 3, 4, 5, 6])
self.assertIsInstance(a, SpatialAcceleration)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialM6)
self.assertEqual(len(a), 1)
self.assertTrue(all(a.A == np.r_[1, 2, 3, 4, 5, 6]))
s = str(a)
self.assertIsInstance(s, str)
self.assertEqual(s.count('\n'), 0)
self.assertTrue(s.startswith('SpatialAcceleration'))
r = np.random.rand(6, 10)
a = SpatialAcceleration(r)
self.assertIsInstance(a, SpatialAcceleration)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialM6)
self.assertEqual(len(a), 10)
b = a[3]
self.assertIsInstance(b, SpatialAcceleration)
self.assertIsInstance(b, SpatialVector)
self.assertIsInstance(b, SpatialM6)
self.assertEqual(len(b), 1)
self.assertTrue(all(b.A == r[:,3]))
s = str(a)
self.assertIsInstance(s, str)
def test_force(self):
a = SpatialForce([1, 2, 3, 4, 5, 6])
self.assertIsInstance(a, SpatialForce)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialF6)
self.assertEqual(len(a), 1)
self.assertTrue(all(a.A == np.r_[1, 2, 3, 4, 5, 6]))
a = SpatialForce(np.r_[1, 2, 3, 4, 5, 6])
self.assertIsInstance(a, SpatialForce)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialF6)
self.assertEqual(len(a), 1)
self.assertTrue(all(a.A == np.r_[1, 2, 3, 4, 5, 6]))
s = str(a)
self.assertIsInstance(s, str)
self.assertEqual(s.count('\n'), 0)
self.assertTrue(s.startswith('SpatialForce'))
r = np.random.rand(6, 10)
a = SpatialForce(r)
self.assertIsInstance(a, SpatialForce)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialF6)
self.assertEqual(len(a), 10)
b = a[3]
self.assertIsInstance(b, SpatialForce)
self.assertIsInstance(b, SpatialVector)
self.assertIsInstance(b, SpatialF6)
self.assertEqual(len(b), 1)
self.assertTrue(all(b.A == r[:, 3]))
s = str(a)
self.assertIsInstance(s, str)
def test_momentum(self):
a = SpatialMomentum([1, 2, 3, 4, 5, 6])
self.assertIsInstance(a, SpatialMomentum)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialF6)
self.assertEqual(len(a), 1)
self.assertTrue(all(a.A == np.r_[1, 2, 3, 4, 5, 6]))
a = SpatialMomentum(np.r_[1, 2, 3, 4, 5, 6])
self.assertIsInstance(a, SpatialMomentum)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialF6)
self.assertEqual(len(a), 1)
self.assertTrue(all(a.A == np.r_[1, 2, 3, 4, 5, 6]))
s = str(a)
self.assertIsInstance(s, str)
self.assertEqual(s.count('\n'), 0)
self.assertTrue(s.startswith('SpatialMomentum'))
r = np.random.rand(6, 10)
a = SpatialMomentum(r)
self.assertIsInstance(a, SpatialMomentum)
self.assertIsInstance(a, SpatialVector)
self.assertIsInstance(a, SpatialF6)
self.assertEqual(len(a), 10)
b = a[3]
self.assertIsInstance(b, SpatialMomentum)
self.assertIsInstance(b, SpatialVector)
self.assertIsInstance(b, SpatialF6)
self.assertEqual(len(b), 1)
self.assertTrue(all(b.A == r[:, 3]))
s = str(a)
self.assertIsInstance(s, str)
def test_arith(self):
# just test SpatialVelocity since all types derive from same superclass
r1 = np.r_[1, 2, 3, 4, 5, 6]
r2 = np.r_[7, 8, 9, 10, 11, 12]
a1 = SpatialVelocity(r1)
a2 = SpatialVelocity(r2)
self.assertTrue(all((a1 + a2).A == r1 + r2))
self.assertTrue(all((a1 - a2).A == r1 - r2))
self.assertTrue(all((-a1).A == -r1))
def test_inertia(self):
# constructor
# addition
pass
def test_products(self):
# v x v = a *, v x F6 = a
# a x I, I x a
# v x I, I x v
# twist x v, twist x a, twist x F
pass
# ---------------------------------------------------------------------------------------#
if __name__ == '__main__':
unittest.main() | 32.58296 | 90 | 0.579136 | 7,019 | 0.966006 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.050922 |
c0b679addf1f8188d5d6b4db7a0f798d7c5b295a | 3,048 | py | Python | python/meta-regex.py | tbedford/code-snippets | 9afe36c2726829f14fa5ec11acb8214bed704938 | [
"MIT"
]
| null | null | null | python/meta-regex.py | tbedford/code-snippets | 9afe36c2726829f14fa5ec11acb8214bed704938 | [
"MIT"
]
| null | null | null | python/meta-regex.py | tbedford/code-snippets | 9afe36c2726829f14fa5ec11acb8214bed704938 | [
"MIT"
]
| 1 | 2018-10-09T02:03:12.000Z | 2018-10-09T02:03:12.000Z | # \s Returns a match where the string contains a white space character
# \S Returns a match where the string DOES NOT contain a white space character
import re
s1 = '''
---
slug: python-non-greedy-regexes
title: Python non-greedy regexes
summary: How to make Python regexes a little less greedy using the `?` modifier.
cat: Code
date_published: 2019-10-19
date_updated: 2019-10-19
---
# How to make Python regexes a little less greedy
There are these little things that once you learn about them you wonder how you ever did without them. The Python non-greedy modifier definitely falls into that category. I spent far t
Here was the problem:
```
---
title: This is some title
description: This is the description
---
Some content...
```
This is a simplified version of the metadata that each piece of content on the site has. What the code needs to do is extract the metadata and the content.
This seems straightforward. You might come up with:
```
---\s([\s\S]*)\s---\s([\s\S]*)
```
We can simplify that but getting rid of the extra new lines in our captured text by using the `.strip()` function in Python so you end up with:
```
---([\s\S]*)---([\s\S]*)
```
The metadata drops into the first `()` and the content into the second `()` and there are rainbows and unicorns and all is good in the world. Until this happens...
```
---
title: This is some title
description: This is the description
---
Some content...
Item | Description
--- | ---
A | A thing
B | Another thing
Some more content...
```
And now there are tears because it all goes horribly wrong. You see Python regexes are downright greedy. They try to match as much text as possible. Which means your regex now matches right down to the first `---` in the Markdown table. This is where you probably start trying all kinds of variations on your regex to restrict the match to only the metadata. But there's an easy little fix...
```
---([\s\S]*?)---([\s\S]*)
```
The secret is that addition of the `?` operator. Like many operators it has many functions but when it's next to `*` it means "don't be so darn greedy".
Here's the actual code where I use it:
``` python
def extract_parts(source):
m = re.search(r'---([\s\S]*?)---([\s\S]*)', source, re.MULTILINE)
metadata = m.group(1)
markdown = m.group(2)
return metadata.strip(), markdown.strip()
```
This little `?` turns out to be hellishly useful. For example:
``` html
<p>Para 1</p><p>Para 2></p>
```
If you only want the first para you could use `<p>.*?</p>`, and you'd only match the first para.
You can test this out with the following code:
``` python
import re
s = "<p>para 1</p><p>para 2</p>"
m = re.search(r'<p>.*</p>', s)
print(m.group(0))
m = re.search(r'<p>.*?</p>', s)
print(m.group(0))
```
Yes. Useful indeed. Once you know about the non-greedy operator you'll wonder how you ever did without it!
'''
# Greedy *? to for matched delimiters
def extract(source):
m = re.search(r'---([\s\S]*?)---', source, re.MULTILINE)
return m.group(1).strip()
print(extract(s1))
| 26.973451 | 392 | 0.686024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,913 | 0.955709 |
c0b773458653a85f2fb1e0a33ea41844604c6b4f | 3,006 | py | Python | xdl-algorithm-solution/DIN_WITH_MOGUJIE_DATA/script/train.py | xiaobaoding/x-deeplearning | 1280043aba15ff57ac5e973bcce2489c698380d2 | [
"Apache-2.0"
]
| null | null | null | xdl-algorithm-solution/DIN_WITH_MOGUJIE_DATA/script/train.py | xiaobaoding/x-deeplearning | 1280043aba15ff57ac5e973bcce2489c698380d2 | [
"Apache-2.0"
]
| null | null | null | xdl-algorithm-solution/DIN_WITH_MOGUJIE_DATA/script/train.py | xiaobaoding/x-deeplearning | 1280043aba15ff57ac5e973bcce2489c698380d2 | [
"Apache-2.0"
]
| null | null | null | #coding=utf-8
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import time
import math
import random
import argparse
import tensorflow as tf
import numpy
from model import *
from utils import *
import xdl
from xdl.python.training.train_session import QpsMetricsHook, MetricsPrinterHook
#config here
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--seed", help="random seed", default=3)
parser.add_argument("-jt", "--job_type", help="'train' or 'test'", default='train')
parser.add_argument("-m", "--model", help="'din' or 'dien'", default='din_mogujie')
parser.add_argument("-si", "--save_interval", help="checkpoint save interval steps", default=20000)
parser.add_argument("-dr", "--data_dir", help="data dir")
args, unknown = parser.parse_known_args()
seed = args.seed
job_type = args.job_type
model_type = args.model
save_interval = args.save_interval
def get_data_prefix():
return "../data/"
#return args.data_dir
train_file = os.path.join(get_data_prefix(), "train_data.tfrecords")
def train():
if model_type == 'din_mogujie':
model = Model_DIN_MOGUJIE(
EMBEDDING_DIM, HIDDEN_SIZE, ATTENTION_SIZE,False, train_file,batch_size)
else:
raise Exception('only support din_mogujie and dien')
#data set
with xdl.model_scope('train'):
train_ops = model.build_network()
lr = 0.001
# Adam Adagrad
train_ops.append(xdl.Adam(lr).optimize())
hooks = []
log_format = "[%(time)s] lstep[%(lstep)s] gstep[%(gstep)s] lqps[%(lqps)s] gqps[%(gqps)s] loss[%(loss)s]"
hooks = [QpsMetricsHook(), MetricsPrinterHook(log_format)]
if xdl.get_task_index() == 0:
hooks.append(xdl.CheckpointHook(save_interval))
train_sess = xdl.TrainSession(hooks=hooks)
"""
with xdl.model_scope('test'):
test_ops = model.build_network(
EMBEDDING_DIM, is_train=False)
test_sess = xdl.TrainSession()
"""
model.run(train_ops, train_sess)
def test():
pass
if __name__ == '__main__':
SEED = seed
if SEED is None:
SEED = 3
tf.set_random_seed(SEED)
numpy.random.seed(SEED)
random.seed(SEED)
if job_type == 'train':
train()
elif job_type == 'test':
test()
else:
print('job type must be train or test, do nothing...')
| 30.06 | 112 | 0.663007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,348 | 0.448436 |
c0b94e63b8ef518a54d1b8787a0fbfafc3083387 | 53 | py | Python | script/__init__.py | KaoruNishikawa/nanten_tools | f5af30a40e8d558ae247c8e864fdea5edc0f5fb7 | [
"MIT"
]
| null | null | null | script/__init__.py | KaoruNishikawa/nanten_tools | f5af30a40e8d558ae247c8e864fdea5edc0f5fb7 | [
"MIT"
]
| null | null | null | script/__init__.py | KaoruNishikawa/nanten_tools | f5af30a40e8d558ae247c8e864fdea5edc0f5fb7 | [
"MIT"
]
| null | null | null | __version__ = "0.1.0"
__author__ = "Kaoru Nishikawa"
| 17.666667 | 30 | 0.716981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.45283 |
c0ba497faffdf2c98170646061c1181fdbd7ee74 | 1,542 | py | Python | feature_extract/config.py | bradysalz/MinVAD | 4d4a396b381bbb4714b434f60e09fb2fa7d3c474 | [
"MIT"
]
| null | null | null | feature_extract/config.py | bradysalz/MinVAD | 4d4a396b381bbb4714b434f60e09fb2fa7d3c474 | [
"MIT"
]
| 2 | 2016-12-09T21:16:28.000Z | 2016-12-09T21:29:10.000Z | feature_extract/config.py | bradysalz/MinVAD | 4d4a396b381bbb4714b434f60e09fb2fa7d3c474 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 21 18:51:11 2016
@author: brady
"""
#################### TRAINING ####################
# POS DIRS
TRAIN_CLEAN = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\clean'
TRAIN_0DB = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\0db'
TRAIN_5DB = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\5db'
TRAIN_10DB = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\10db'
TRAIN_15DB = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\15db'
TRAIN_ALLDB = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\all_data'
TRAIN_AN4 = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\an4_clstk'
TRAIN_MSAK = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\msak0'
TRAIN_FSEW = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\fsew0'
# NEG DIRS
TRAIN_KITCHEN = r'C:\Users\brady\GitHub\MinVAD\data\train\negative\building_106_kitchen\training_segments'
TRAIN_URBAN = r'C:\Users\brady\GitHub\MinVAD\data\train\negative\UrbanSound\data'
# Label Helpers
TRAIN_LABELS = r'C:\Users\brady\GitHub\MinVAD\data\train\positive\clean'
POS_DIRS = [TRAIN_ALLDB, TRAIN_MSAK, TRAIN_FSEW]
NEG_DIRS = [TRAIN_KITCHEN, TRAIN_URBAN]
#################### TESTING ####################
TEST_0DB = r'C:\Users\brady\GitHub\MinVAD\data\test\positive\0db'
TEST_5DB = r'C:\Users\brady\GitHub\MinVAD\data\test\positive\5db'
TEST_10DB = r'C:\Users\brady\GitHub\MinVAD\data\test\positive\10db'
TEST_15DB = r'C:\Users\brady\GitHub\MinVAD\data\test\positive\15db'
TEST_DIRS = [TEST_0DB, TEST_5DB, TEST_10DB, TEST_15DB] | 41.675676 | 106 | 0.732815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,162 | 0.753567 |
c0ba62a6a723e9a0a8608f8d14f5550d2b17eba4 | 6,735 | py | Python | kuka_arm/scripts/IK_server.py | congthanh184/RoboND-Kinematics-Project | 4c14ad5a9461fa5cdf465a04f8f11ff296b00760 | [
"MIT"
]
| null | null | null | kuka_arm/scripts/IK_server.py | congthanh184/RoboND-Kinematics-Project | 4c14ad5a9461fa5cdf465a04f8f11ff296b00760 | [
"MIT"
]
| null | null | null | kuka_arm/scripts/IK_server.py | congthanh184/RoboND-Kinematics-Project | 4c14ad5a9461fa5cdf465a04f8f11ff296b00760 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# Copyright (C) 2017 Udacity Inc.
#
# This file is part of Robotic Arm: Pick and Place project for Udacity
# Robotics nano-degree program
#
# All Rights Reserved.
# Author: Harsh Pandya
# import modules
import rospy
import tf
from kuka_arm.srv import *
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import Pose
from mpmath import *
from sympy import *
import numpy
class KukaR210:
def __init__(self):
self.alpha0 = self.alpha2 = self.alpha6 = 0
self.alpha1 = self.alpha3 = self.alpha5 = -pi/2
self.alpha4 = pi/2
self.a0 = self.a4 = self.a5 = self.a6 = 0
self.a1 = 0.35
self.a2 = 1.25
self.a3 = -0.054
self.d2 = self.d3 = self.d5 = self.d6 = 0
self.d1 = 0.75
self.d4 = 1.5
self.dg = 0.303
self.d34 = 0.96
self.d45 = 0.54
qz = tf.transformations.rotation_matrix(pi, (0,0,1))
qy = tf.transformations.rotation_matrix(-pi/2, (0,1,0))
self.R_corr = numpy.dot(qz, qy)
def get_dh_transformation(self, alpha, a, d, theta):
xaxis, zaxis = (1, 0, 0), (0, 0, 1)
qx = tf.transformations.rotation_matrix(alpha, xaxis)
qz = tf.transformations.rotation_matrix(theta, zaxis)
ax = tf.transformations.translation_matrix((a, 0, 0))
dz = tf.transformations.translation_matrix((0, 0, d))
T = numpy.dot(numpy.dot(qx, ax), numpy.dot(qz, dz))
return T
# Get joint2 position
def get_joint2_position(self, q1):
T0_1 = self.get_dh_transformation(self.alpha0, self.a0, self.d1, q1)
T1_2 = self.get_dh_transformation(0, self.a1, 0, 0)
T = numpy.dot(T0_1, T1_2)
return T[0:3, 3]
def get_T0_3_inv(self, q1, q2, q3):
T0_1 = self.get_dh_transformation(self.alpha0, self.a0, self.d1, q1)
T1_2 = self.get_dh_transformation(self.alpha1, self.a1, self.d2, q2 - (numpy.pi/2))
T2_3 = self.get_dh_transformation(self.alpha2, self.a2, self.d3, q3)
T0_3 = numpy.dot(numpy.dot(T0_1, T1_2), T2_3)
return numpy.linalg.inv(T0_3)
def get_ee_pose_base(self, position, orientation):
ee_pose = numpy.dot(tf.transformations.translation_matrix((position.x, position.y, position.z)),
tf.transformations.quaternion_matrix((orientation.x, orientation.y, orientation.z, orientation.w)))
return numpy.dot(ee_pose, self.R_corr)
def get_wrist_position(self, ee_base):
return ee_base[0:3, 3] - self.dg * ee_base[0:3, 2]
def vec_len(self, vec):
sqr_len = [pos**2 for pos in vec]
return numpy.sqrt(sum(sqr_len))
def IK(self, ee_position, ee_orientation):
# calculate wrist position from ee position and orientation
ee_base = self.get_ee_pose_base(ee_position, ee_orientation)
wrist_pos = self.get_wrist_position(ee_base)
# calculate theta1 by wrist position
q1 = numpy.arctan2(wrist_pos[1], wrist_pos[0])
# calculate triangle's side oppsition with theta3
vec_J2_W = numpy.subtract(wrist_pos, self.get_joint2_position(q1))
side_B = self.vec_len(vec_J2_W)
side_d4_cor = numpy.sqrt(self.d4**2 + self.a3**2)
delta = numpy.arctan2(abs(self.a3), self.d34) - numpy.arctan2(abs(self.a3), self.d4)
# find theta 3 prime which expresses the relative angle with theta 2
c3_prime = (side_B**2 - self.a2**2 - side_d4_cor**2) / (2 * self.a2 * side_d4_cor)
prime3 = numpy.arctan2(numpy.sqrt(1 - (c3_prime**2)), c3_prime)
# find theta2 and theta3
beta = numpy.arctan2(vec_J2_W[2], numpy.sqrt(vec_J2_W[0]**2 + vec_J2_W[1]**2))
gamma = numpy.arctan2(Kuka.d4 * numpy.sin(prime3), Kuka.d4 * numpy.cos(prime3) + Kuka.a2)
q2 = (numpy.pi/2) - beta - gamma
q3 = prime3 - (numpy.pi/2) - delta
# get T3_6
T0_3_inv = self.get_T0_3_inv(q1, q2, q3)
T3_6 = numpy.dot(T0_3_inv, ee_base)
# calculate theta4, theta5, theta6
q4 = numpy.arctan2( T3_6[2][2], -T3_6[0][2])
q5 = numpy.arctan2( numpy.sqrt(1 - T3_6[1][2]**2), T3_6[1][2])
q6 = numpy.arctan2( -T3_6[1][1], T3_6[1][0])
return (q1, q2, q3, q4, q5, q6)
Kuka = KukaR210()
def handle_calculate_IK(req):
rospy.loginfo("Received %s eef-poses from the plan" % len(req.poses))
if len(req.poses) < 1:
print "No valid poses received"
return -1
else:
### Your FK code here
# Create symbols
#
#
# Create Modified DH parameters
#
#
# Define Modified DH Transformation matrix
#
#
# Create individual transformation matrices
#
#
# Extract rotation matrices from the transformation matrices
#
#
###
joint_trajectory_list = []
# Initialize service response
for x in xrange(0, len(req.poses)):
# IK code starts here
joint_trajectory_point = JointTrajectoryPoint()
# Extract end-effector position and orientation from request
# px,py,pz = end-effector position
# roll, pitch, yaw = end-effector orientation
px = req.poses[x].position.x
py = req.poses[x].position.y
pz = req.poses[x].position.z
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(
[req.poses[x].orientation.x, req.poses[x].orientation.y,
req.poses[x].orientation.z, req.poses[x].orientation.w])
### Your IK code here
# Compensate for rotation discrepancy between DH parameters and Gazebo
#
#
# Calculate joint angles using Geometric IK method
#
#
###
position = req.poses[x].position
orientation = req.poses[x].orientation
(theta1, theta2, theta3, theta4, theta5, theta6) = Kuka.IK(position, orientation)
# Populate response for the IK request
# In the next line replace theta1,theta2...,theta6 by your joint angle variables
joint_trajectory_point.positions = [theta1, theta2, theta3, theta4, theta5, theta6]
joint_trajectory_list.append(joint_trajectory_point)
rospy.loginfo("length of Joint Trajectory List: %s" % len(joint_trajectory_list))
return CalculateIKResponse(joint_trajectory_list)
def IK_server():
# initialize node and declare calculate_ik service
rospy.init_node('IK_server')
s = rospy.Service('calculate_ik', CalculateIK, handle_calculate_IK)
print "Ready to receive an IK request"
rospy.spin()
if __name__ == "__main__":
IK_server()
| 35.634921 | 119 | 0.621678 | 3,850 | 0.571641 | 0 | 0 | 0 | 0 | 0 | 0 | 1,416 | 0.210245 |
c0bafd320c0a8a62b60bbf8a3554bd41b71dc5db | 12,205 | py | Python | bin/ADFRsuite/lib/python2.7/site-packages/radical/utils/profile.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
]
| null | null | null | bin/ADFRsuite/lib/python2.7/site-packages/radical/utils/profile.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
]
| null | null | null | bin/ADFRsuite/lib/python2.7/site-packages/radical/utils/profile.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
]
| 1 | 2021-11-04T21:48:14.000Z | 2021-11-04T21:48:14.000Z |
import os
import csv
import glob
import time
import threading
from .misc import name2env as ru_name2env
from .misc import get_hostname as ru_get_hostname
from .misc import get_hostip as ru_get_hostip
from .read_json import read_json as ru_read_json
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
class Profiler(object):
"""
This class is really just a persistent file handle with a convenience call
(prof()) to write lines with timestamp and events.
Any profiling intelligence is applied when reading and evaluating the
created profiles.
"""
fields = ['time', 'name', 'uid', 'state', 'event', 'msg']
# --------------------------------------------------------------------------
#
def __init__(self, name, env_name=None, path=None):
"""
Open the file handle, sync the clock, and write timestam_zero
"""
# use the profiler name as basis for the env check
if not env_name:
env_name = '%s' % ru_name2env(name)
if not path:
path = os.getcwd()
self._path = path
self._name = name
self._enabled = False
# example: for RADICAL_PILOT_COMPONENT, we check
# RADICAL_PILOT_COMPONENT_PROFILE
# RADICAL_PILOT_PROFILE
# RADICAL_PROFILE
# if any of those is set in env, the profiler is enabled
env_elems = env_name.split('_')
if env_elems[-1] == 'PROFILE':
env_elems = env_elems[:-1]
env_check = ''
for elem in env_elems:
env_check += '%s_' % elem
if '%sPROFILE' % env_check in os.environ:
self._enabled = True
break
# FIXME
if 'RADICAL_PILOT_PROFILE' in os.environ:
self._enabled = True
if not self._enabled:
return
# profiler is enabled - sync time and open handle
self._ts_zero, self._ts_abs, self._ts_mode = self._timestamp_init()
try:
os.makedirs(self._path)
except OSError:
pass # already exists
self._handle = open("%s/%s.prof" % (self._path, self._name), 'a')
# write header and time normalization info
self._handle.write("#%s\n" % (','.join(Profiler.fields)))
self._handle.write("%.4f,%s:%s,%s,%s,%s,%s\n" % \
(self.timestamp(), self._name, "", "", "", 'sync abs',
"%s:%s:%s:%s:%s" % (
ru_get_hostname(), ru_get_hostip(),
self._ts_zero, self._ts_abs, self._ts_mode)))
# ------------------------------------------------------------------------------
#
@property
def enabled(self):
return self._enabled
# ------------------------------------------------------------------------------
#
def close(self):
if not self._enabled:
return
if self._enabled:
self.prof("END")
self._handle.close()
# ------------------------------------------------------------------------------
#
def flush(self):
if not self._enabled:
return
if self._enabled:
# see https://docs.python.org/2/library/stdtypes.html#file.flush
self.prof("flush")
self._handle.flush()
os.fsync(self._handle.fileno())
# ------------------------------------------------------------------------------
#
def prof(self, event, uid=None, state=None, msg=None, timestamp=None,
logger=None, name=None):
if not self._enabled:
return
if not timestamp:
timestamp = self.timestamp()
if not name:
name = self._name
# if uid is a list, then recursively call self.prof for each uid given
if isinstance(uid, list):
for _uid in uid:
self.prof(event, _uid, state, msg, timestamp, logger)
return
if logger:
logger("%s (%10s%s) : %s", event, uid, state, msg)
tid = threading.current_thread().name
if None == uid : uid = ''
if None == msg : msg = ''
if None == state: state = ''
self._handle.write("%.4f,%s:%s,%s,%s,%s,%s\n" \
% (timestamp, name, tid, uid, state, event, msg))
# --------------------------------------------------------------------------
#
def _timestamp_init(self):
"""
return a tuple of [system time, absolute time]
"""
# retrieve absolute timestamp from an external source
#
# We first try to contact a network time service for a timestamp, if that
# fails we use the current system time.
try:
import ntplib
ntphost = os.environ.get('RADICAL_UTILS_NTPHOST', '0.pool.ntp.org')
t_one = time.time()
response = ntplib.NTPClient().request(ntphost, timeout=1)
t_two = time.time()
ts_ntp = response.tx_time
ts_sys = (t_one + t_two) / 2.0
return [ts_sys, ts_ntp, 'ntp']
except Exception:
pass
# on any errors, we fall back to system time
t = time.time()
return [t,t, 'sys']
# --------------------------------------------------------------------------
#
def timestamp(self):
return time.time()
# --------------------------------------------------------------------------
#
def timestamp():
return time.time()
# ------------------------------------------------------------------------------
#
def read_profiles(profiles):
"""
We read all profiles as CSV files and convert them into lists of dicts.
"""
ret = dict()
for prof in profiles:
rows = list()
with open(prof, 'r') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=Profiler.fields)
for row in reader:
if row['time'].startswith('#'):
# skip header
continue
row['time'] = float(row['time'])
rows.append(row)
ret[prof] = rows
return ret
# ------------------------------------------------------------------------------
#
def combine_profiles(profs):
"""
We merge all profiles and sort by time.
This routine expects all profiles to have a synchronization time stamp.
Two kinds of sync timestamps are supported: absolute (`sync abs`) and
relative (`sync rel`).
Time syncing is done based on 'sync abs' timestamps. We expect one such
absolute timestamp to be available per host (the first profile entry will
contain host information). All timestamps from the same host will be
corrected by the respectively determined NTP offset.
"""
pd_rel = dict() # profiles which have relative time refs
t_host = dict() # time offset per host
p_glob = list() # global profile
t_min = None # absolute starting point of profiled session
c_end = 0 # counter for profile closing tag
# first get all absolute timestamp sync from the profiles, for all hosts
for pname, prof in profs.iteritems():
if not len(prof):
print 'empty profile %s' % pname
continue
if not prof[0]['msg'] or ':' not in prof[0]['msg']:
print 'unsynced profile %s' % pname
continue
t_prof = prof[0]['time']
host, ip, t_sys, t_ntp, t_mode = prof[0]['msg'].split(':')
host_id = '%s:%s' % (host, ip)
if t_min: t_min = min(t_min, t_prof)
else : t_min = t_prof
if t_mode != 'sys':
continue
# determine the correction for the given host
t_sys = float(t_sys)
t_ntp = float(t_ntp)
t_off = t_sys - t_ntp
if host_id in t_host and t_host[host_id] != t_off:
print 'conflicting time sync for %s (%s)' % (pname, host_id)
continue
t_host[host_id] = t_off
# now that we can align clocks for all hosts, apply that correction to all
# profiles
for pname, prof in profs.iteritems():
if not len(prof):
continue
if not prof[0]['msg']:
continue
host, ip, _, _, _ = prof[0]['msg'].split(':')
host_id = '%s:%s' % (host, ip)
if host_id in t_host:
t_off = t_host[host_id]
else:
print 'WARNING: no time offset for %s' % host_id
t_off = 0.0
t_0 = prof[0]['time']
t_0 -= t_min
# correct profile timestamps
for row in prof:
t_orig = row['time']
row['time'] -= t_min
row['time'] -= t_off
# count closing entries
if row['event'] == 'END':
c_end += 1
# add profile to global one
p_glob += prof
# # Check for proper closure of profiling files
# if c_end == 0:
# print 'WARNING: profile "%s" not correctly closed.' % prof
# if c_end > 1:
# print 'WARNING: profile "%s" closed %d times.' % (prof, c_end)
# sort by time and return
p_glob = sorted(p_glob[:], key=lambda k: k['time'])
return p_glob
# ------------------------------------------------------------------------------
#
def clean_profile(profile, sid, state_final, state_canceled):
"""
This method will prepare a profile for consumption in radical.analytics. It
performs the following actions:
- makes sure all events have a `ename` entry
- remove all state transitions to `CANCELLED` if a different final state
is encountered for the same uid
- assignes the session uid to all events without uid
- makes sure that state transitions have an `ename` set to `state`
"""
entities = dict() # things which have a uid
if not isinstance(state_final, list):
state_final = [state_final]
for event in profile:
uid = event['uid' ]
state = event['state']
time = event['time' ]
name = event['event']
# we derive entity_type from the uid -- but funnel
# some cases into the session
if uid:
event['entity_type'] = uid.split('.',1)[0]
else:
event['entity_type'] = 'session'
event['uid'] = sid
uid = sid
if uid not in entities:
entities[uid] = dict()
entities[uid]['states'] = dict()
entities[uid]['events'] = list()
if name == 'advance':
# this is a state progression
assert(state)
assert(uid)
event['event_name'] = 'state'
if state in state_final and state != state_canceled:
# a final state other than CANCELED will cancel any previous
# CANCELED state.
if state_canceled in entities[uid]['states']:
del(entities[uid]['states'][state_canceled])
if state in entities[uid]['states']:
# ignore duplicated recordings of state transitions
# FIXME: warning?
continue
# raise ValueError('double state (%s) for %s' % (state, uid))
entities[uid]['states'][state] = event
else:
# FIXME: define different event types (we have that somewhere)
event['event_name'] = 'event'
entities[uid]['events'].append(event)
# we have evaluated, cleaned and sorted all events -- now we recreate
# a clean profile out of them
ret = list()
for uid,entity in entities.iteritems():
ret += entity['events']
for state,event in entity['states'].iteritems():
ret.append(event)
# sort by time and return
ret = sorted(ret[:], key=lambda k: k['time'])
return ret
# ------------------------------------------------------------------------------
| 28.990499 | 84 | 0.498484 | 5,151 | 0.42204 | 0 | 0 | 62 | 0.00508 | 0 | 0 | 5,162 | 0.422941 |
c0bb7b8a74c23f921be8c3f93658d3fa62727ccc | 5,214 | py | Python | input_fn.py | ilyakhov/pytorch-word2vec | bb9b0ed408a12e3652d2d897330292b7b93c7997 | [
"MIT"
]
| 12 | 2019-05-22T13:08:42.000Z | 2021-07-11T07:12:37.000Z | input_fn.py | ilyakhov/pytorch-word2vec | bb9b0ed408a12e3652d2d897330292b7b93c7997 | [
"MIT"
]
| null | null | null | input_fn.py | ilyakhov/pytorch-word2vec | bb9b0ed408a12e3652d2d897330292b7b93c7997 | [
"MIT"
]
| 1 | 2021-02-20T09:04:19.000Z | 2021-02-20T09:04:19.000Z | import numpy as np
import torch
from torch.utils.data import Dataset
class CBOWDataSet(Dataset):
def __init__(self, corpus,
pipeline='hier_softmax',
nodes_index=None,
turns_index=None,
vocab_size=None,
neg_samples=None,
max_path_len=17,
window_size=6,
device=None,
skip_target=False,
dtype=torch.float32):
"""
:param corpus: the flat list of tokens
:param pipeline: 'hier_softmax'/'neg_sampling'
params for 'hierarchical softmax' pipeline:
:param nodes_index: index of nodes from leaf parent to the root
:param turns_index: the list of 1/-1 indices:
1 — the leaf is the left child of corresponding node
-1 — the leaf is the right child
:param vocab_size: is used for padding
:param max_path_len: length of the longest path from word (leaf)
to the root
params for 'negative sampling' pipeline:
:param neg_samples: the number of negative samples
:param window_size: word context size
:param device: cuda:0/cuda:1/cpu
:param dtype: torch float type
"""
self.window_size = window_size
self.step = window_size // 2
self.left_step = self.step
self.right_step = window_size - self.step
self.corpus = corpus[-self.left_step:] + corpus + \
corpus[:self.right_step]
self.device = device
self.dtype = dtype
self.pipeline = pipeline
if self.pipeline == 'hier_softmax':
self.nodes_index = nodes_index
self.max_path_len = max_path_len
self.turns_index = turns_index
self.vocab_size = vocab_size
self.skip_target = skip_target
elif self.pipeline == 'neg_sampling':
self.np_corpus = np.array(self.corpus)
self.neg_samples = neg_samples
else:
raise NotImplementedError(
f'Pipeline for "pipeline": {self.pipeline}')
def __len__(self):
return len(self.corpus) - self.window_size
def __getitem__(self, item):
if self.pipeline == 'hier_softmax':
return self.__h_getitem(item)
elif self.pipeline == 'neg_sampling':
return self.__n_getitem(item)
else:
raise NotImplementedError(
f'__getitem__ for pipeline: {self.pipeline}')
def __h_getitem(self, i):
"""
Hierarchical softmax pipepline
:param i: item index
:return: torch tensors:
context, target, nodes, mask, turns_coeffs
"""
i += self.left_step
target = self.corpus[i]
context = self.corpus[(i - self.left_step):i]
context += self.corpus[(i + 1):(i + self.right_step + 1)]
try:
assert len(context) == self.window_size
except AssertionError:
raise Exception(
'Context size is not valid: context - '
'{0} has size - {1}; window_size - {2}'
.format(context, len(context), self.window_size)
)
nodes = self.nodes_index[target]
nodes_len = len(nodes)
mask = np.zeros(self.max_path_len)
mask[:nodes_len] = 1
pad_len = self.max_path_len - nodes_len
nodes = np.concatenate([nodes, np.ones(pad_len) * self.vocab_size])
# nodes = np.concatenate([nodes, np.ones(pad_len) * -1])
nodes = torch.tensor(nodes, dtype=torch.long, device=self.device)
turns_coeffs = self.turns_index.get(target)
turns_coeffs = np.concatenate([turns_coeffs, np.zeros(pad_len)])
turns_coeffs = torch.tensor(turns_coeffs, dtype=self.dtype,
device=self.device)
mask = torch.tensor(mask, dtype=self.dtype, device=self.device)
context = torch.tensor(context, dtype=torch.long, device=self.device)
target = torch.tensor(target, dtype=torch.long, device=self.device)
if self.skip_target is False:
return context, target, nodes, mask, turns_coeffs
else:
return context, nodes, mask, turns_coeffs
def __n_getitem(self, i):
"""
Negative sampling pipeline
:param i: item index
:return: torch tensors:
context, target, neg_samples
"""
i += self.left_step
target = self.corpus[i]
context = self.corpus[(i - self.left_step):i]
context += self.corpus[(i + 1):(i + self.right_step + 1)]
try:
assert len(context) == self.window_size
except AssertionError:
raise Exception(
'Context size is not valid: context - '
'{0} has size - {1}; window_size - {2}'
.format(context, len(context), self.window_size)
)
context = torch.tensor(context, dtype=torch.long, device=self.device)
target = torch.tensor(target, dtype=torch.long, device=self.device)
return context, target
| 36.71831 | 77 | 0.575374 | 5,145 | 0.98601 | 0 | 0 | 0 | 0 | 0 | 0 | 1,518 | 0.290916 |
c0bc31b78e193431b864fc09c2a40bbe17627b76 | 301 | py | Python | 1 ano/logica-de-programacao/condicionais/peso-animais-maior-menor.py | Biguelini/Atividades-Tecnico-em-Informatica | 468e9ac05a666143d8752d053854ecc22bcc8b79 | [
"MIT"
]
| 4 | 2021-04-27T01:00:32.000Z | 2021-09-24T16:25:48.000Z | 1 ano/logica-de-programacao/condicionais/peso-animais-maior-menor.py | Biguelini/Atividades-Tecnico-em-Informatica | 468e9ac05a666143d8752d053854ecc22bcc8b79 | [
"MIT"
]
| null | null | null | 1 ano/logica-de-programacao/condicionais/peso-animais-maior-menor.py | Biguelini/Atividades-Tecnico-em-Informatica | 468e9ac05a666143d8752d053854ecc22bcc8b79 | [
"MIT"
]
| 1 | 2021-05-12T18:28:06.000Z | 2021-05-12T18:28:06.000Z | peso1 = float(input('Digite o peso do primeiro animal... '))
peso2 = float(input('Digite o peso do segundo animal... '))
if peso1 > peso2:
print('O primeiro animal é mais pesado')
elif peso1 < peso2:
print('O segundo animal é mais pesado')
else:
print('Os dois animais têm o mesmo peso')
| 30.1 | 60 | 0.681063 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.582237 |
c0bd2f1bb8af7bb26fd427d057716fce6e53b345 | 607 | py | Python | uniform_distribution.py | khinthandarkyaw98/Python_Practice | 9b431129c79315a57dae81048a22bf85c4b5132c | [
"MIT"
]
| null | null | null | uniform_distribution.py | khinthandarkyaw98/Python_Practice | 9b431129c79315a57dae81048a22bf85c4b5132c | [
"MIT"
]
| null | null | null | uniform_distribution.py | khinthandarkyaw98/Python_Practice | 9b431129c79315a57dae81048a22bf85c4b5132c | [
"MIT"
]
| null | null | null | # uniform_distribution
# used to describe the probability where every event has equal chances of
# occuring
"""
E.g. Generation of random numbers.
It has three parameters.
a - lower bound - default 0.0
b - upper bound - default 1.0
size = The shape of the returned array
"""
# 2x3 uniform distribution sample
from numpy import random
x = random.uniform(size = (2, 3))
print(x)
# visulization of uniform distribution
# from numpy import random
import matplotlib.pyplot as plt
import seaborn as sns
sns.distplot(random.uniform(size = 1000), hist = False)
plt.show()
| 20.233333 | 75 | 0.70346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 383 | 0.630972 |
c0c0cf39ce27029feb9aa7a105da2d19af17d25d | 1,563 | py | Python | delivery/services/external_program_service.py | mariya/arteria-delivery | ec2fd79cfc6047a44dd251183b971535e9afd0dc | [
"MIT"
]
| null | null | null | delivery/services/external_program_service.py | mariya/arteria-delivery | ec2fd79cfc6047a44dd251183b971535e9afd0dc | [
"MIT"
]
| 18 | 2016-11-10T14:32:54.000Z | 2019-10-14T07:07:54.000Z | delivery/services/external_program_service.py | mariya/arteria-delivery | ec2fd79cfc6047a44dd251183b971535e9afd0dc | [
"MIT"
]
| 6 | 2016-10-18T12:16:46.000Z | 2019-09-11T11:38:17.000Z |
from tornado.process import Subprocess
from tornado import gen
from subprocess import PIPE
from delivery.models.execution import ExecutionResult, Execution
class ExternalProgramService(object):
"""
A service for running external programs
"""
@staticmethod
def run(cmd):
"""
Run a process and do not wait for it to finish
:param cmd: the command to run as a list, i.e. ['ls','-l', '/']
:return: A instance of Execution
"""
p = Subprocess(cmd,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE)
return Execution(pid=p.pid, process_obj=p)
@staticmethod
@gen.coroutine
def wait_for_execution(execution):
"""
Wait for an execution to finish
:param execution: instance of Execution
:return: an ExecutionResult for the execution
"""
status_code = yield execution.process_obj.wait_for_exit(raise_error=False)
out = execution.process_obj.stdout.read().decode('UTF-8')
err = execution.process_obj.stderr.read().decode('UTF-8')
return ExecutionResult(out, err, status_code)
@staticmethod
def run_and_wait(cmd):
"""
Run an external command and wait for it to finish
:param cmd: the command to run as a list, i.e. ['ls','-l', '/']
:return: an ExecutionResult for the execution
"""
execution = ExternalProgramService.run(cmd)
return ExternalProgramService.wait_for_execution(execution)
| 29.490566 | 82 | 0.621881 | 1,399 | 0.895074 | 470 | 0.300704 | 1,284 | 0.821497 | 0 | 0 | 608 | 0.388996 |
c0c468474802f951d70f92ebb743554a56f1d46b | 1,577 | py | Python | Estructuras de control/ejercicio12.py | mariapdm/talleres_de_algoritmos | fc9ad8f8ad2c3cb4f4ae0a7fad2e36824bba2afb | [
"MIT"
]
| null | null | null | Estructuras de control/ejercicio12.py | mariapdm/talleres_de_algoritmos | fc9ad8f8ad2c3cb4f4ae0a7fad2e36824bba2afb | [
"MIT"
]
| null | null | null | Estructuras de control/ejercicio12.py | mariapdm/talleres_de_algoritmos | fc9ad8f8ad2c3cb4f4ae0a7fad2e36824bba2afb | [
"MIT"
]
| null | null | null | """
Datos de entrada
nota_examen_ matematicas-->nem-->float
nota_ta1_matematicas-->ntm-->
nota_ta2_matematicas-->nttm-->
nota_ta3_matematicas-->ntttm-->
nota_examen_fisica-->nef-->float
nota_ta1_fisica-->ntf-->float
nota_ta2_fisica-->nttf-->float
nota_examen_quimica-->neq-->float
nota_ta1_quimica-->ntq-->float
nota_ta2_quimica-->nttq-->float
nota_ta3_quimica-->ntttq-->float
Datos de salida
Promedio_tres-->pt-->float
promedio_matematicas-->pm-->float
promedio_fisica-->pf-->float
promedio_quimica-->pq-->float
"""
#Entradas
nem=float(input("Ingrese la nota del examen de matemáticas: "))
ntm=float(input("Ingrese la nota de la 1ra tarea de matemáticas: "))
nttm=float(input("Ingrese la nota de la 2da tarea de matemáticas: "))
ntttm=float(input("Ingrese la nota de la 3ra tarea de matemáticas: "))
nef=float(input("Ingrese la nota del examen de física: "))
ntf=float(input("Ingrese la nota de la 1ra tarea de física: "))
nttf=float(input("Ingrese la nota de la 2da tarea de física: "))
neq=float(input("Ingrese la nota del examen de química: "))
ntq=float(input("Ingrese la nota de la 1ra tarea de química: "))
nttq=float(input("Ingrese la nota de la 2da tarea de química: "))
ntttq=float(input("Ingrese la nota de la 3ra tarea de química: "))
#Caja negra
pm=(nem*0.90)+(((ntm+nttm+ntttm)/3)*0.1)
pf=(nef*0.8)+(((ntf+nttf)/2)*0.2)
pq=(neq*0.85)+(((ntq+nttq+ntttq)/3)*0.15)
pt=(pm+pf+pq)/3
#Salidas
print("El promedio de las tres materias es: ", pt)
print("El promedio de matemáticas es: ", pm)
print("El promedio de física es: ", pf)
print("El promedio de química es: ", pq) | 38.463415 | 70 | 0.726062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,191 | 0.748586 |
c0c491c66e363814a85776c34ddeffc5e419a0b3 | 9,667 | py | Python | xraydb/materials.py | chemmatcars/XModFit | 7d1298448d1908d78797fd67ce0a00ecfaf17629 | [
"MIT"
]
| null | null | null | xraydb/materials.py | chemmatcars/XModFit | 7d1298448d1908d78797fd67ce0a00ecfaf17629 | [
"MIT"
]
| null | null | null | xraydb/materials.py | chemmatcars/XModFit | 7d1298448d1908d78797fd67ce0a00ecfaf17629 | [
"MIT"
]
| null | null | null | import os
import numpy as np
from collections import namedtuple
from .chemparser import chemparse
from .xray import mu_elam, atomic_mass
from .utils import get_homedir
_materials = None
Material = namedtuple('Material', ('formula', 'density', 'name', 'categories'))
def get_user_materialsfile():
"""return name for user-specific materials.dat file
With $HOME being the users home directory, this will be
$HOME/.config/xraydb/materials.dat
"""
return os.path.join(get_homedir(), '.config', 'xraydb', 'materials.dat')
def _read_materials_db():
"""return _materials dictionary, creating it if needed"""
global _materials
if _materials is None:
# initialize materials table
_materials = {}
def read_materialsfile(fname):
with open(fname, 'r') as fh:
lines = fh.readlines()
for line in lines:
line = line.strip()
if len(line) > 2 and not line.startswith('#'):
words = [i.strip() for i in line.split('|')]
name = words[0].lower()
formula = None
if len(words) == 3: # older style
# "name | formula | density" or "name | density | formula"
iformula = 1
try:
density = float(words[2])
except ValueError:
density = float(words[1])
iformula = 2
formula = words[iformula]
categories = []
elif len(words) == 4: # newer style, with categories
density = float(words[1])
categories = [w.strip() for w in words[2].split(',')]
formula = words[3]
if formula is not None:
formula = formula.replace(' ', '')
_materials[name] = Material(formula, density, name, categories)
# first, read from standard list
local_dir, _ = os.path.split(__file__)
fname = os.path.join(local_dir, 'materials.dat')
if os.path.exists(fname):
read_materialsfile(fname)
# next, read from users materials file
fname = get_user_materialsfile()
if os.path.exists(fname):
read_materialsfile(fname)
return _materials
def material_mu(name, energy, density=None, kind='total'):
"""X-ray attenuation length (in 1/cm) for a material by name or formula
Args:
name (str): chemical formul or name of material from materials list.
energy (float or ndarray): energy or array of energies in eV
density (None or float): material density (gr/cm^3).
kind (str): 'photo' or 'total' for whether to return the
photo-absorption or total cross-section ['total']
Returns:
absorption length in 1/cm
Notes:
1. material names are not case sensitive,
chemical compounds are case sensitive.
2. mu_elam() is used for mu calculation.
3. if density is None and material is known, that density will be used.
Examples:
>>> material_mu('H2O', 10000.0)
5.32986401658495
"""
global _materials
if _materials is None:
_materials = _read_materials_db()
formula = None
_density = None
mater = _materials.get(name.lower(), None)
if mater is None:
for key, val in _materials.items():
if name.lower() == val[0].lower(): # match formula
mater = val
break
# default to using passed in name as a formula
if formula is None:
if mater is None:
formula = name
else:
formula = mater.formula
if density is None and mater is not None:
density = mater.density
if density is None:
raise Warning('material_mu(): must give density for unknown materials')
mass_tot, mu = 0.0, 0.0
for elem, frac in chemparse(formula).items():
mass = frac * atomic_mass(elem)
mu += mass * mu_elam(elem, energy, kind=kind)
mass_tot += mass
return density*mu/mass_tot
def material_mu_components(name, energy, density=None, kind='total'):
"""material_mu_components: absorption coefficient (in 1/cm) for a compound
Args:
name (str): chemical formul or name of material from materials list.
energy (float or ndarray): energy or array of energies in eV
density (None or float): material density (gr/cm^3).
kind (str): 'photo' or 'total'for whether to
return photo-absorption or total cross-section ['total']
Returns:
dict for constructing mu per element,
with elements 'mass' (total mass), 'density', and
'elements' (list of atomic symbols for elements in material).
For each element, there will be an item (atomic symbol as key)
with tuple of (stoichiometric fraction, atomic mass, mu)
Examples:
>>> xraydb.material_mu('quartz', 10000)
50.36774553547068
>>> xraydb.material_mu_components('quartz', 10000)
{'mass': 60.0843, 'density': 2.65, 'elements': ['Si', 'O'],
'Si': (1, 28.0855, 33.87943243018506), 'O': (2.0, 15.9994, 5.952824815297084)}
"""
global _materials
if _materials is None:
_materials = _read_materials_db()
mater = _materials.get(name.lower(), None)
if mater is None:
formula = name
if density is None:
raise Warning('material_mu(): must give density for unknown materials')
else:
formula = mater.formula
density = mater.density
out = {'mass': 0.0, 'density': density, 'elements':[]}
for atom, frac in chemparse(formula).items():
mass = atomic_mass(atom)
mu = mu_elam(atom, energy, kind=kind)
out['mass'] += frac*mass
out[atom] = (frac, mass, mu)
out['elements'].append(atom)
return out
def get_material(name):
"""look up material name, return formula and density
Args:
name (str): name of material or chemical formula
Returns:
chemical formula, density of material
Examples:
>>> xraydb.get_material('kapton')
('C22H10N2O5', 1.43)
See Also:
find_material()
"""
material = find_material(name)
if material is None:
return None
return material.formula, material.density
def find_material(name):
"""look up material name, return material instance
Args:
name (str): name of material or chemical formula
Returns:
material instance
Examples:
>>> xraydb.find_material('kapton')
Material(formula='C22H10N2O5', density=1.42, name='kapton', categories=['polymer'])
See Also:
get_material()
"""
global _materials
if _materials is None:
_materials = _read_materials_db()
mat = _materials.get(name.lower(), None)
if mat is not None:
return mat
for mat in _materials.values():
if mat.formula == name:
return mat
return None
def get_materials(force_read=False, categories=None):
"""get dictionary of all available materials
Args:
force_read (bool): whether to force a re-reading of the
materials database [False]
categories (list of strings or None): restrict results
to those that match category names
Returns:
dict with keys of material name and values of Materials instances
Examples:
>>> for name, m in xraydb.get_materials().items():
... print(name, m)
...
water H2O 1.0
lead Pb 11.34
aluminum Al 2.7
kapton C22H10N2O5 1.42
polyimide C22H10N2O5 1.42
nitrogen N 0.00125
argon Ar 0.001784
...
"""
global _materials
if force_read or _materials is None:
_materials = _read_materials_db()
return _materials
def add_material(name, formula, density, categories=None):
"""add a material to the users local material database
Args:
name (str): name of material
formula (str): chemical formula
density (float): density
categories (list of strings or None): list of category names
Returns:
None
Notes:
the data will be saved to $HOME/.config/xraydb/materials.dat
in the users home directory, and will be useful in subsequent sessions.
Examples:
>>> xraydb.add_material('becopper', 'Cu0.98e0.02', 8.3, categories=['metal'])
"""
global _materials
if _materials is None:
_materials = _read_materials_db()
formula = formula.replace(' ', '')
if categories is None:
categories = []
_materials[name.lower()] = Material(formula, float(density), name, categories)
fname = get_user_materialsfile()
if os.path.exists(fname):
fh = open(fname, 'r')
text = fh.readlines()
fh.close()
else:
parent, _ = os.path.split(fname)
if not os.path.exists(parent):
try:
os.makedirs(parent)
except FileExistsError:
pass
text = ['# user-specific database of materials\n',
'# name | density | categories | formulan']
catstring = ', '.join(categories)
text.append(" %s | %g | %s | %s\n" % (name, density, catstring, formula))
with open(fname, 'w') as fh:
fh.write(''.join(text))
| 31.90429 | 91 | 0.58529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,664 | 0.482466 |
c0c62d4eee91d75a65403ff152657c9c03089c57 | 1,069 | py | Python | client.py | simondlevy/sockets | f49dd677b6508859f01c9c54101b38e802d6370e | [
"MIT"
]
| null | null | null | client.py | simondlevy/sockets | f49dd677b6508859f01c9c54101b38e802d6370e | [
"MIT"
]
| null | null | null | client.py | simondlevy/sockets | f49dd677b6508859f01c9c54101b38e802d6370e | [
"MIT"
]
| 1 | 2018-06-12T03:32:26.000Z | 2018-06-12T03:32:26.000Z | #!/usr/bin/env python3
'''
Server script for simple client/server example
Copyright (C) Simon D. Levy 2021
MIT License
'''
from threading import Thread
from time import sleep
import socket
from struct import unpack
from header import ADDR, PORT
def comms(data):
'''
Communications thread
'''
# Connect to the client
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ADDR, PORT))
# Loop until main thread quits
while True:
# Receive and unpack three floating-point numbers
data[0], data[1], data[2] = unpack('=fff', sock.recv(12))
# Yield to the main thread
sleep(0.001)
def main():
# Create a list to receiver the data
data = [0, 0, 0]
# Start the client on its own thread
t = Thread(target=comms, args=(data,))
t.setDaemon(True)
t.start()
# Loop until user hits CTRL-C
while True:
try:
print('%3.3f %3.3f %3.3f ' % tuple(data))
sleep(.01)
except KeyboardInterrupt:
break
main()
| 18.431034 | 65 | 0.613658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.39102 |
c0c65039eac0d1c182008b9f53dbb8727df88022 | 151 | py | Python | home/views.py | kerol/kerolgaodotcom | 7993bb5f40dd1f6b3ebdef4d90728cd77651c026 | [
"BSD-3-Clause"
]
| 1 | 2016-03-02T02:49:00.000Z | 2016-03-02T02:49:00.000Z | home/views.py | kerol/kerolgaodotcom | 7993bb5f40dd1f6b3ebdef4d90728cd77651c026 | [
"BSD-3-Clause"
]
| null | null | null | home/views.py | kerol/kerolgaodotcom | 7993bb5f40dd1f6b3ebdef4d90728cd77651c026 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf8 -*-
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'about.html')
| 15.1 | 40 | 0.682119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.397351 |
c0c6c96cefa40fab2593e89ee811e26649ffff4f | 15,126 | py | Python | old/compute_T.py | azhan137/cylinder_t_matrix | 73a496c07dbbb02896b2baf727d452765da9aac3 | [
"MIT"
]
| 1 | 2022-03-18T11:52:36.000Z | 2022-03-18T11:52:36.000Z | old/compute_T.py | AmosEgel/cylinder_t_matrix | 78f6607993af5babdda384969c45cf3ac6461257 | [
"MIT"
]
| null | null | null | old/compute_T.py | AmosEgel/cylinder_t_matrix | 78f6607993af5babdda384969c45cf3ac6461257 | [
"MIT"
]
| 1 | 2020-12-07T13:11:00.000Z | 2020-12-07T13:11:00.000Z | import numpy as np
from numpy.polynomial import legendre
from smuthi import spherical_functions as sf
import bessel_functions as bf
##Codebase for computing the T-matrix and its derivative with respect to height and radius for a cylindrical scatterer
# with circular cross-section in spherical coordinates.
#
# inputs:
# lmax: maximum orbital angular momentum expansion order, an integer
# Ntheta: number of sections for discretization
# geometric_params: radius (0) and height (1) in an array
# n0: refractive index of medium
# ns: refractive index of scatterer
# wavelength: excitation wavelength
# particle_type: shape of particle (cylinder, ellipsoid, etc)
def compute_T(lmax, Ntheta, geometric_params, n0, ns, wavelength, particle_type):
[Q, dQ] = compute_Q(lmax, Ntheta, geometric_params, n0, ns, wavelength, 3, particle_type)
[rQ, drQ] = compute_Q(lmax, Ntheta, geometric_params, n0, ns, wavelength, 1, particle_type)
Qinv = np.linalg.inv(Q)
T = rQ*Qinv
dT = np.zeros((np.shape(drQ)))
num_geometric_params = np.size(geometric_params)
for geometric_idx in np.arange(0, num_geometric_params):
dT[:, :, geometric_idx] = np.matmul(drQ[:, :, geometric_idx] - np.matmul(T, dQ[:, :, geometric_idx]), Qinv)
return T, dT
def compute_Q(lmax, Ntheta, geometric_params, n0, ns, wavelength, nu, particle_type):
if particle_type is 'cylinder':
a = geometric_params[0]
h = geometric_params[1]
[J11, J12, J21, J22, dJ11, dJ12, dJ21, dJ22] = compute_J_cyl(lmax, Ntheta, a, h, n0, ns, wavelength, nu)
elif particle_type is 'ellipsoid':
print('ellipsoid not supported')
else:
print('particle type ' + particle_type + ' not supported.')
return 0
ki = 2*np.pi*n0/wavelength
ks = 2*np.pi*ns/wavelength
P = -1j * ki * (ks * J21 + ki * J12)
R = -1j * ki * (ks * J11 + ki * J22)
S = -1j * ki * (ks * J22 + ki * J11)
U = -1j * ki * (ks * J12 + ki * J21)
dP = -1j * ki * (ks * dJ21 + ki * dJ12)
dR = -1j * ki * (ks * dJ11 + ki * dJ22)
dS = -1j * ki * (ks * dJ22 + ki * dJ11)
dU = -1j * ki * (ks * dJ12 + ki * dJ21)
Q = np.block([
[P, R],
[S, U]
])
nmax = np.size(Q[:, 1])
num_geometric_params = np.size(geometric_params)
dQ = np.zeros((nmax, nmax, num_geometric_params))
for geometric_idx in np.arange(0, num_geometric_params):
dQ[:, :, geometric_idx] = np.block([
[dP[:, :, geometric_idx], dR[:, :, geometric_idx]],
[dS[:, :, geometric_idx], dU[:, :, geometric_idx]]
])
return Q, dQ
#function that computes the J surface integrals and their derivatives with respect to cylinder radius (a) and cylinder
# height (h). Expands up to a specified lmax, and approximates the integrals using gaussian quadrature with Ntheta
# points for the two integrals required.
# n0 is refractive index of medium
# ns is refractive index of scatterer
# wavelength is illumination wavelength
# nu = 1 or 3
# 1: b_li are the spherical Bessel functions of the first kind (j_n(x))
# involved in rQ and drQ computation
# 3: b_li are the spherical Hankel functions of the first kind (h_n(x))
# involved in Q and dQ computation
#care should be taken to expand lmax to sufficient order,
#where lmax should be greater than (ns-n_0)*max(2*a,h)/wavelength
def compute_J_cyl(lmax, Ntheta, a, h, n0, ns, wavelength, nu):
#dimension of final T-matrix is 2*nmax x 2*nmax for each individual matrix
nmax = int(lmax*(lmax+2))
#preallocate space for both J and dJ matrices of size nmax x nmax for J matrices
#and dJ matrices are nmax x nmax x 2
#dJ[:,:,0] is dJ/da
#dJ[:,:,1] is dJ/dh
J11 = np.zeros((nmax, nmax), dtype=np.complex_)
J12 = np.zeros((nmax, nmax), dtype=np.complex_)
J21 = np.zeros((nmax, nmax), dtype=np.complex_)
J22 = np.zeros((nmax, nmax), dtype=np.complex_)
dJ11 = np.zeros((nmax, nmax, 2), dtype=np.complex_)
dJ12 = np.zeros((nmax, nmax, 2), dtype=np.complex_)
dJ21 = np.zeros((nmax, nmax, 2), dtype=np.complex_)
dJ22 = np.zeros((nmax, nmax, 2), dtype=np.complex_)
#find the angle theta at which the corner of the cylinder is at
theta_edge = np.arctan(2*a/h)
#prepare gauss-legendre quadrature for interval of [-1,1] to perform numerical integral
[x_norm, wt_norm] = legendre.leggauss(Ntheta)
#rescale integration points and weights to match actual bounds:
# circ covers the circular surface of the cylinder (end caps)
# body covers the rectangular surface of the cylinder (body area)
#circ integral goes from 0 to theta_edge, b = theta_edge, a = 0
theta_circ = theta_edge/2*x_norm+theta_edge/2
wt_circ = theta_edge/2*wt_norm
#body integral goes from theta_edge to pi/2, b = pi/2, a = theta_edge
theta_body = (np.pi/2-theta_edge)/2*x_norm+(np.pi/2+theta_edge)/2
wt_body = (np.pi/2-theta_edge)/2*wt_norm
#merge the circ and body lists into a single map
theta_map = np.concatenate((theta_circ, theta_body), axis=0)
weight_map = np.concatenate((wt_circ, wt_body), axis=0)
#identify indices corresponding to the circular end caps and rectangular body
circ_idx = np.arange(0, Ntheta)
body_idx = np.arange(Ntheta, 2*Ntheta)
#k vectors of the light in medium (ki) and in scatterer (ks)
ki = 2*np.pi*n0/wavelength
ks = 2*np.pi*ns/wavelength
#precompute trig functions
ct = np.cos(theta_map)
st = np.sin(theta_map)
#normal vector for circular surface (circ) requires tangent
tant = np.tan(theta_map[circ_idx])
#normal vector for rectangular surface (body) requires cotangent
cott = 1/np.tan(theta_map[body_idx])
#precompute spherical angular polynomials
[p_lm, pi_lm, tau_lm] = sf.legendre_normalized(ct, st, lmax)
#radial coordinate of the surface, and the derivatives with respect to a and h
#r_c: radial coordinate of circular end cap
#r_b: radial coordinate of rectangular body
r_c = h/2/ct[circ_idx]
dr_c = r_c/h
r_b = a/st[body_idx]
dr_b = r_b/a
#merge radial coordiantes into a single vector
r = np.concatenate((r_c, r_b), axis=0)
#derivatives of the integration limits for performing derivatives
da_edge = 2*h/(h**2+4*a**2)
dh_edge = -2*a/(h**2+4*a**2)
#loop through each individual element of the J11, J12, J21, J22 matrices
for li in np.arange(1, lmax+1):
#precompute bessel functiosn and derivatives
b_li = bf.sph_bessel(nu, li, ki*r)
db_li = bf.d1Z_Z_sph_bessel(nu, li, ki*r)
db2_li = bf.d2Z_Z_sph_bessel(nu, li, ki*r)
d1b_li = bf.d1Z_sph_bessel(nu, li, ki*r)
for lp in np.arange(1, lmax+1):
#precompute bessel functions and derivatives
j_lp = bf.sph_bessel(1, lp, ks*r)
dj_lp = bf.d1Z_Z_sph_bessel(1, lp, ks*r)
dj2_lp = bf.d2Z_Z_sph_bessel(1, lp, ks*r)
d1j_lp = bf.d1Z_sph_bessel(1, lp, ks*r)
#compute normalization factor
lfactor = 1/np.sqrt(li*(li+1)*lp*(lp+1))
for mi in np.arange(-li, li+1):
#compute row index where element is placed
n_i = compute_n(lmax, 1, li, mi)-1
#precompute spherical harmonic functions
p_limi = p_lm[li][abs(mi)]
pi_limi = pi_lm[li][abs(mi)]
tau_limi = tau_lm[li][abs(mi)]
for mp in np.arange(-lp, lp+1):
#compute col index where element is placed
n_p = compute_n(lmax, 1, lp, mp)-1
#precompute spherical harmonic functions
p_lpmp = p_lm[lp][abs(mp)]
pi_lpmp = pi_lm[lp][abs(mp)]
tau_lpmp = tau_lm[lp][abs(mp)]
#compute selection rules that includes symmetries
sr_1122 = selection_rules(li, mi, lp, mp, 1)
sr_1221 = selection_rules(li, mi, lp, mp, 2)
#perform integral about phi analytically. This is roughly a sinc function
if mi == mp:
phi_exp = np.pi
else:
phi_exp = -1j*(np.exp(1j*(mp-mi)*np.pi)-1)/(mp-mi)
#for J11 and J22 integrals
if sr_1122 != 0:
prefactor = sr_1122*lfactor*phi_exp
ang = mp*pi_lpmp*tau_limi+mi*pi_limi*tau_lpmp
J11_r = -1j*weight_map*prefactor*r**2*st*j_lp*b_li*ang
J11[n_i, n_p] = np.sum(J11_r)
dJ11dr = 2*r*j_lp*b_li+r**2*(ks*d1j_lp*b_li+ki*d1b_li*j_lp)
dJ11[n_i, n_p, 0] = np.sum(-1j*prefactor*weight_map[body_idx]*st[body_idx]*dJ11dr[body_idx]*ang[body_idx]*dr_b)
dJ11[n_i, n_p, 1] = np.sum(-1j*prefactor*weight_map[circ_idx]*st[circ_idx]*dJ11dr[circ_idx]*ang[circ_idx]*dr_c)
J22_r = -1j*prefactor*weight_map*st/ki/ks*dj_lp*db_li*ang
J22_db = lp*(lp+1)*mi*pi_limi*p_lpmp
J22_dj = li*(li+1)*mp*pi_lpmp*p_limi
J22_t = -1j*prefactor*weight_map*st/ki/ks*(J22_db*j_lp*db_li+J22_dj*b_li*dj_lp)
J22[n_i, n_p] = sum(J22_r)+sum(J22_t[circ_idx]*tant)+sum(J22_t[body_idx]*-cott)
dJ22edge = st[Ntheta]*(J22_db[Ntheta]*j_lp[Ntheta]*db_li[Ntheta]+J22_dj[Ntheta]*dj_lp[Ntheta]*b_li[Ntheta])*(st[Ntheta]/ct[Ntheta]+ct[Ntheta]/st[Ntheta])
dJ22da1 = -1j/ki/ks*(ks*dj2_lp[body_idx]*db_li[body_idx]+ki*db2_li[body_idx]*dj_lp[body_idx])*dr_b*st[body_idx]*ang[body_idx]
dJ22da2 = 1j/ki/ks*cott*st[body_idx]*dr_b*(J22_db[body_idx]*(ks*d1j_lp[body_idx]*db_li[body_idx]+ki*j_lp[body_idx]*db2_li[body_idx])+J22_dj[body_idx]*(ki*d1b_li[body_idx]*dj_lp[body_idx]+ks*dj2_lp[body_idx]*b_li[body_idx]))
dJ22dh1 = -1j/ki/ks*(ks*dj2_lp[circ_idx]*db_li[circ_idx]+ki*db2_li[circ_idx]*dj_lp[circ_idx])*dr_c*st[circ_idx]*ang[circ_idx]
dJ22dh2 = -1j/ki/ks*tant*st[circ_idx]*dr_c*(J22_db[circ_idx]*(ks*d1j_lp[circ_idx]*db_li[circ_idx]+ki*j_lp[circ_idx]*db2_li[circ_idx])+J22_dj[circ_idx]*(ki*d1b_li[circ_idx]*dj_lp[circ_idx]+ks*dj2_lp[circ_idx]*b_li[circ_idx]))
dJ22[n_i, n_p, 0] = np.sum(prefactor*weight_map[body_idx]*dJ22da1)+np.sum(prefactor*weight_map[body_idx]*dJ22da2)+prefactor*dJ22edge*da_edge
dJ22[n_i, n_p, 1] = np.sum(prefactor*weight_map[circ_idx]*dJ22dh1)+np.sum(prefactor*weight_map[circ_idx]*dJ22dh2)+prefactor*dJ22edge*dh_edge
#for J12 and J21 integrals
if sr_1221 != 0:
prefactor = sr_1221*lfactor*phi_exp
ang = mi*mp*pi_limi*pi_lpmp+tau_limi*tau_lpmp
J12_r = prefactor*weight_map/ki*r*st*j_lp*db_li*ang
J12_t = prefactor*weight_map/ki*r*st*li*(li+1)*j_lp*b_li*p_limi*tau_lpmp
J12[n_i, n_p] = np.sum(J12_r)+np.sum(J12_t[circ_idx]*tant)+np.sum(J12_t[body_idx]*-cott)
dJ12edge = li*(li+1)/ki/r[Ntheta]*st[Ntheta]*j_lp[Ntheta]*b_li[Ntheta]*tau_lpmp[Ntheta]*p_limi[Ntheta]*(st[Ntheta]/ct[Ntheta]+ct[Ntheta]/st[Ntheta])
dJ12da1 = dr_b/ki*(j_lp[body_idx]*db_li[body_idx]+r_b*(ks*d1j_lp[body_idx]*b_li[body_idx]+ki*j_lp[body_idx]*d1b_li[body_idx]))*st[body_idx]*ang[body_idx]
dJ12da2 = -li*(li+1)/ki*dr_b*(j_lp[body_idx]*b_li[body_idx]+r_b*(ks*d1j_lp[body_idx]*b_li[body_idx]+ki*j_lp[body_idx]*d1b_li[body_idx]))*cott*st[body_idx]*tau_lpmp[body_idx]*p_limi[body_idx]
dJ12dh1 = dr_c/ki*(j_lp[circ_idx]*db_li[circ_idx]+r_c*(ks*d1j_lp[circ_idx]*b_li[circ_idx]+ki*j_lp[circ_idx]*d1b_li[circ_idx]))*st[circ_idx]*ang[circ_idx]
dJ12dh2 = li*(li+1)/ki*dr_c*(j_lp[circ_idx]*b_li[circ_idx]+r_c*(ks*d1j_lp[circ_idx]*b_li[circ_idx]+ki*j_lp[circ_idx]*d1b_li[circ_idx]))*tant*st[circ_idx]*tau_lpmp[circ_idx]*p_limi[circ_idx]
dJ12[n_i, n_p, 0] = np.sum(prefactor*weight_map[body_idx]*dJ12da1)+np.sum(prefactor*weight_map[body_idx]*dJ12da2)+prefactor*dJ12edge*da_edge
dJ12[n_i, n_p, 1] = np.sum(prefactor*weight_map[circ_idx]*dJ12dh1)+np.sum(prefactor*weight_map[body_idx]*dJ12da2)+prefactor*dJ12edge*dh_edge
J21_r = -prefactor*weight_map/ks*r*st*dj_lp*b_li*ang
J21_t = -prefactor*weight_map/ks*r*st*lp*(lp+1)*j_lp*b_li*p_lpmp*tau_limi
J21[n_i, n_p] = np.sum(J21_r)+np.sum(J21_t[circ_idx]*tant)+np.sum(J21_t[body_idx]*-cott)
dJ21edge = -lp*(lp+1)/ks/r[Ntheta]*st[Ntheta]*j_lp[Ntheta]*b_li[Ntheta]*tau_lpmp[Ntheta]*p_limi[Ntheta]*(st[Ntheta]/ct[Ntheta]+ct[Ntheta]/st[Ntheta])
dJ21da1 = -dr_b/ks*(b_li[body_idx]*dj_lp[body_idx]+r_b*(ki*d1b_li[body_idx]*dj_lp[body_idx]+ks*dj2_lp[body_idx]*b_li[body_idx]))*st[body_idx]*ang[body_idx]
dJ21da2 = lp*(lp+1)/ks*dr_b*(j_lp[body_idx]*b_li[body_idx]+r_b*(ks*d1j_lp[body_idx]*b_li[body_idx]+ki*d1b_li[body_idx]*j_lp[body_idx]))*cott*st[body_idx]*tau_limi[body_idx]*p_lpmp[body_idx]
dJ21dh1 = -dr_c/ks*(b_li[circ_idx]*dj_lp[circ_idx]+r_c*(ki*d1b_li[circ_idx]*dj_lp[circ_idx]+ks*dj2_lp[circ_idx]*b_li[circ_idx]))*st[circ_idx]*ang[circ_idx]
dJ21dh2 = -lp*(lp+1)/ks*dr_c*(j_lp[circ_idx]*b_li[circ_idx]+r_c*(ks*d1j_lp[circ_idx]*b_li[circ_idx]+ki*d1b_li[circ_idx]*j_lp[circ_idx]))*tant*st[circ_idx]*tau_limi[circ_idx]*p_lpmp[circ_idx]
dJ21[n_i, n_p, 0] = np.sum(prefactor*weight_map[body_idx]*dJ21da1)+np.sum(prefactor*weight_map[body_idx]*dJ21da2)+prefactor*dJ21edge*da_edge
dJ21[n_i, n_p, 1] = np.sum(prefactor*weight_map[circ_idx]*dJ21dh1)+np.sum(prefactor*weight_map[circ_idx]*dJ21dh2)+prefactor*dJ21edge*dh_edge
return J11, J12, J21, J22, dJ11, dJ12, dJ21, dJ22
#compute n index (single index) for matrix element given its p (polarization), l (orbital angular momementum index),
# and m (azimuthal angular momentum index.
def compute_n(lmax, p, l, m):
return (p-1)*lmax*(lmax+2)+(l-1)*(l+1)+m+l+1
#selection rules taking into account different symmetries for an axisymmetric particle
def selection_rules(li, mi, lp, mp, diag_switch):
if diag_switch == 1:
return np.float_power(-1, mi)*(1+np.float_power(-1, mp-mi))*(1+(-1)**(lp+li+1))
elif diag_switch == 2:
return np.float_power(-1, mi)*(1+np.float_power(-1, mp-mi))*(1+(-1)**(lp+li))
else:
return 0
if __name__ == '__main__':
import matplotlib.pyplot as plt
cyl_params = np.array([500,860])
[J11, J12, J21, J22, dJ11, dJ12, dJ21, dJ22] = compute_J_cyl(3,30,200,460,1,1.52,1000,3)
[T, dT] = compute_T(6,30,cyl_params,1,4,1000,'cylinder')
img1 = plt.imshow(np.abs(T))
plt.colorbar()
plt.title('T')
plt.show()
| 52.703833 | 248 | 0.628719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,519 | 0.232646 |
c0c7d98ec94365b9cf9f0e166a19f7b2371bc3ed | 982 | py | Python | run_tests.py | aquarioos/dvik-print | b897936168dab51c9e0f9fd84993065428896be4 | [
"MIT"
]
| 1 | 2018-09-19T22:27:32.000Z | 2018-09-19T22:27:32.000Z | run_tests.py | aquarioos/dvik-print | b897936168dab51c9e0f9fd84993065428896be4 | [
"MIT"
]
| null | null | null | run_tests.py | aquarioos/dvik-print | b897936168dab51c9e0f9fd84993065428896be4 | [
"MIT"
]
| null | null | null | # -*- coding: utf8 -*-
from __future__ import division, absolute_import, print_function
import os
import sys
import datetime as dt
import dvik_print as dvp
if __name__ == '__main__':
print(sys.version)
O = {
'lista': ['el1', 'el2', 1, 2, 3, 4, None, False],
'zbiór': {1, 2, 1, 2, 'a', 'a', 'b', 'b'},
'krotka': ('oto', 'elementy', 'naszej', 'krotki'),
('krotka', 'klucz'): {
'klucz1': ['jakaś', 'lista', 123],
'klucz2': dt.datetime.now(),
'klucz3': dt
},
(123, 'asd'): {123, 234, 345},
(123, 'asd1'): (123, 234, 345)
}
# deklarujemy obiekt dvp.PrettyPrint
pp = dvp.PrettyPrint(tab=2, head=3, tail=2, max_str_len=50, show_line=True, filename=__file__)
# obiekt jest wywoływalny
# w ten sposób wypisze na
# standardowe wyjście obiekt O
pp(O, var='zmienna')
# można użyć wartości domyślnych
pp_domyslny = dvp.PrettyPrint()
pp_domyslny(O)
| 26.540541 | 98 | 0.566191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.340726 |
c0c85207554af0054a2d3560e6e8d9cb080608eb | 6,200 | py | Python | nwb_conversion_tools/datainterfaces/ecephys/basesortingextractorinterface.py | miketrumpis/nwb-conversion-tools | 4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1 | [
"BSD-3-Clause"
]
| null | null | null | nwb_conversion_tools/datainterfaces/ecephys/basesortingextractorinterface.py | miketrumpis/nwb-conversion-tools | 4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1 | [
"BSD-3-Clause"
]
| null | null | null | nwb_conversion_tools/datainterfaces/ecephys/basesortingextractorinterface.py | miketrumpis/nwb-conversion-tools | 4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1 | [
"BSD-3-Clause"
]
| null | null | null | """Authors: Cody Baker and Ben Dichter."""
from abc import ABC
from pathlib import Path
import spikeextractors as se
import numpy as np
from pynwb import NWBFile, NWBHDF5IO
from pynwb.ecephys import SpikeEventSeries
from jsonschema import validate
from ...basedatainterface import BaseDataInterface
from ...utils.json_schema import (
get_schema_from_hdmf_class,
get_base_schema,
get_schema_from_method_signature,
fill_defaults,
)
from ...utils.common_writer_tools import default_export_ops, default_export_ops_schema
from ...utils import export_ecephys_to_nwb
from .baserecordingextractorinterface import BaseRecordingExtractorInterface, map_si_object_to_writer, OptionalPathType
class BaseSortingExtractorInterface(BaseDataInterface, ABC):
"""Primary class for all SortingExtractor intefaces."""
SX = None
def __init__(self, **source_data):
super().__init__(**source_data)
self.sorting_extractor = self.SX(**source_data)
self.writer_class = map_si_object_to_writer(self.sorting_extractor)(self.sorting_extractor)
def get_metadata_schema(self):
"""Compile metadata schema for the RecordingExtractor."""
metadata_schema = super().get_metadata_schema()
# Initiate Ecephys metadata
metadata_schema["properties"]["Ecephys"] = get_base_schema(tag="Ecephys")
metadata_schema["properties"]["Ecephys"]["required"] = []
metadata_schema["properties"]["Ecephys"]["properties"] = dict(
UnitProperties=dict(
type="array",
minItems=0,
renderForm=False,
items={"$ref": "#/properties/Ecephys/properties/definitions/UnitProperties"},
),
)
# Schema definition for arrays
metadata_schema["properties"]["Ecephys"]["properties"]["definitions"] = dict(
UnitProperties=dict(
type="object",
additionalProperties=False,
required=["name"],
properties=dict(
name=dict(type="string", description="name of this units column"),
description=dict(type="string", description="description of this units column"),
),
),
)
return metadata_schema
def subset_sorting(self):
"""
Subset a recording extractor according to stub and channel subset options.
Parameters
----------
stub_test : bool, optional (default False)
"""
self.writer_class = map_si_object_to_writer(self.sorting_extractor)(
self.sorting_extractor,
stub=True,
)
def run_conversion(
self,
nwbfile: NWBFile,
metadata: dict,
stub_test: bool = False,
write_ecephys_metadata: bool = False,
save_path: OptionalPathType = None,
overwrite: bool = False,
**kwargs,
):
"""
Primary function for converting the data in a SortingExtractor to the NWB standard.
Parameters
----------
nwbfile: NWBFile
nwb file to which the recording information is to be added
metadata: dict
metadata info for constructing the nwb file (optional).
Should be of the format
metadata['Ecephys']['UnitProperties'] = dict(name=my_name, description=my_description)
stub_test: bool, optional (default False)
If True, will truncate the data to run the conversion faster and take up less memory.
write_ecephys_metadata: bool (optional, defaults to False)
Write electrode information contained in the metadata.
save_path: PathType
Required if an nwbfile is not passed. Must be the path to the nwbfile
being appended, otherwise one is created and written.
overwrite: bool
If using save_path, whether or not to overwrite the NWBFile if it already exists.
skip_unit_features: list
list of unit feature names to skip writing to units table.
skip_unit_properties: list
list of unit properties to skip writing to units table.
unit_property_descriptions: dict
custom descriptions for unit properties:
>>> dict(prop_name='description')
the Other way to add custom descrptions is to override the default metadata:
>>> metadata = self.get_metadata()
>>> metadata["Ecephys"] = dict()
>>> metadata["Ecephys"].update(UnitProperties=[dict(name='prop_name1', description='description1'),
>>> dict(name='prop_name1', description='description1')])
"""
if stub_test:
self.subset_sorting()
if write_ecephys_metadata and "Ecephys" in metadata:
class TempEcephysInterface(BaseRecordingExtractorInterface):
RX = se.NumpyRecordingExtractor
n_channels = max([len(x["data"]) for x in metadata["Ecephys"]["Electrodes"]])
temp_ephys = TempEcephysInterface(timeseries=np.array(range(n_channels)), sampling_frequency=1)
temp_ephys.run_conversion(nwbfile=nwbfile, metadata=metadata, write_electrical_series=False)
conversion_opts = default_export_ops()
conversion_opts.update(**kwargs)
# construct unit property descriptions:
property_descriptions = dict()
for metadata_column in metadata.get("Ecephys", dict()).get("UnitProperties", []):
property_descriptions.update({metadata_column["name"]: metadata_column["description"]})
conversion_opts["unit_property_descriptions"].update(property_descriptions)
conversion_opt_schema = default_export_ops_schema()
validate(instance=conversion_opts, schema=conversion_opt_schema)
self.writer_class.add_to_nwb(nwbfile, metadata, **conversion_opts)
if save_path is not None:
if overwrite:
if Path(save_path).exists():
Path(save_path).unlink()
with NWBHDF5IO(str(save_path), mode="w") as io:
io.write(self.writer_class.nwbfile)
| 42.465753 | 119 | 0.647581 | 5,501 | 0.887258 | 0 | 0 | 0 | 0 | 0 | 0 | 2,634 | 0.424839 |
c0c8cb69c19ab4dd40d043117a7822abefc679ef | 1,711 | py | Python | buildscripts/resmokelib/testing/testcases/cpp_libfuzzer_test.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
]
| null | null | null | buildscripts/resmokelib/testing/testcases/cpp_libfuzzer_test.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
]
| null | null | null | buildscripts/resmokelib/testing/testcases/cpp_libfuzzer_test.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
]
| null | null | null | """The libfuzzertest.TestCase for C++ libfuzzer tests."""
import datetime
import os
from buildscripts.resmokelib import core
from buildscripts.resmokelib import utils
from buildscripts.resmokelib.testing.fixtures import interface as fixture_interface
from buildscripts.resmokelib.testing.testcases import interface
class CPPLibfuzzerTestCase(interface.ProcessTestCase):
"""A C++ libfuzzer test to execute."""
REGISTERED_NAME = "cpp_libfuzzer_test"
DEFAULT_TIMEOUT = datetime.timedelta(hours=1)
def __init__( # pylint: disable=too-many-arguments
self, logger, program_executable, program_options=None, runs=1000000,
corpus_directory_stem="corpora"):
"""Initialize the CPPLibfuzzerTestCase with the executable to run."""
interface.ProcessTestCase.__init__(self, logger, "C++ libfuzzer test", program_executable)
self.program_executable = program_executable
self.program_options = utils.default_if_none(program_options, {}).copy()
self.runs = runs
self.corpus_directory = f"{corpus_directory_stem}/corpus-{self.short_name()}"
self.merged_corpus_directory = f"{corpus_directory_stem}-merged/corpus-{self.short_name()}"
os.makedirs(self.corpus_directory, exist_ok=True)
def _make_process(self):
default_args = [
self.program_executable,
"-max_len=100000",
"-rss_limit_mb=5000",
"-max_total_time=3600", # 1 hour is the maximum amount of time to allow a fuzzer to run
f"-runs={self.runs}",
self.corpus_directory,
]
return core.programs.make_process(self.logger, default_args, **self.program_options)
| 38.022222 | 100 | 0.707189 | 1,391 | 0.812975 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.294565 |
c0c904cad48edbd6699de73edf6362e41250b47a | 509 | py | Python | app/exceptions.py | syedwaseemjan/RiskManager | c788daf533b125740ccd1fb09acebe023ca286b7 | [
"MIT"
]
| null | null | null | app/exceptions.py | syedwaseemjan/RiskManager | c788daf533b125740ccd1fb09acebe023ca286b7 | [
"MIT"
]
| null | null | null | app/exceptions.py | syedwaseemjan/RiskManager | c788daf533b125740ccd1fb09acebe023ca286b7 | [
"MIT"
]
| null | null | null | class RiskManagerError(Exception):
"""Base application error class."""
def __init__(self, msg):
self.msg = msg
class RiskDoesNotExist(RiskManagerError):
def __init__(self):
super(RiskDoesNotExist, self).__init__(
"No risk record found for the provided ID. Are you sure you have provided correct ID?")
class RiskManagerFormError(Exception):
"""Raise when an error processing a form occurs."""
def __init__(self, errors=None):
self.errors = errors
| 25.45 | 99 | 0.681729 | 502 | 0.986248 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.337917 |
c0c9967167f2ebbfb12ea4280bc6aa6f0ee2cebd | 1,278 | py | Python | data_curation/genome_annotations/preprocess_SEA.py | talkowski-lab/rCNV2 | fcc1142d8c13b58d18a37fe129e9bb4d7bd6641d | [
"MIT"
]
| 7 | 2021-01-28T15:46:46.000Z | 2022-02-07T06:50:40.000Z | data_curation/genome_annotations/preprocess_SEA.py | talkowski-lab/rCNV2 | fcc1142d8c13b58d18a37fe129e9bb4d7bd6641d | [
"MIT"
]
| 1 | 2021-03-02T01:33:53.000Z | 2021-03-02T01:33:53.000Z | data_curation/genome_annotations/preprocess_SEA.py | talkowski-lab/rCNV2 | fcc1142d8c13b58d18a37fe129e9bb4d7bd6641d | [
"MIT"
]
| 3 | 2021-02-21T19:49:12.000Z | 2021-12-22T15:56:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Ryan L. Collins <[email protected]>
# and the Talkowski Laboratory
# Distributed under terms of the MIT license.
"""
Parse simple SEA super-enhancer BED by cell types
"""
import argparse
import csv
import subprocess
def main():
"""
Main block
"""
# Parse command line arguments and options
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('bed', help='Path to BED4 of super enhancers')
parser.add_argument('outdir', help='Output directory')
args = parser.parse_args()
outfiles = {}
with open(args.bed) as fin:
for chrom, start, end, source in csv.reader(fin, delimiter='\t'):
source = source.replace(' ', '_').replace('(', '').replace(')', '')
if source not in outfiles.keys():
outfiles[source] = open('{}/SEA.{}.bed'.format(args.outdir, source), 'w')
outfiles[source].write('\t'.join([chrom, start, end]) + '\n')
for outfile in outfiles.values():
outpath = outfile.name
outfile.close()
subprocess.run(['bgzip', '-f', outpath])
if __name__ == '__main__':
main()
| 26.081633 | 89 | 0.622066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 439 | 0.343505 |
c0cba6784c6a4d07543a90ca7bc4b5a773c81fe7 | 2,461 | py | Python | src/transforms/imageCropDivide/dev/generate_nodenk.py | MrLixm/Foundry_Nuke | 078115043b6a4c09bdcf1b5031e995ef296bd604 | [
"Apache-2.0"
]
| null | null | null | src/transforms/imageCropDivide/dev/generate_nodenk.py | MrLixm/Foundry_Nuke | 078115043b6a4c09bdcf1b5031e995ef296bd604 | [
"Apache-2.0"
]
| null | null | null | src/transforms/imageCropDivide/dev/generate_nodenk.py | MrLixm/Foundry_Nuke | 078115043b6a4c09bdcf1b5031e995ef296bd604 | [
"Apache-2.0"
]
| null | null | null | """
python>3
"""
import os.path
import re
from pathlib import Path
VERSION = 7
BASE = r"""
set cut_paste_input [stack 0]
version 12.2 v5
push $cut_paste_input
Group {
name imageCropDivide
tile_color 0x5c3d84ff
note_font_size 25
note_font_color 0xffffffff
selected true
xpos 411
ypos -125
addUserKnob {20 User}
addUserKnob {3 width_max}
addUserKnob {3 height_max -STARTLINE}
addUserKnob {3 width_source}
addUserKnob {3 height_source -STARTLINE}
addUserKnob {26 "" +STARTLINE}
addUserKnob {22 icd_script l "Copy Setup to ClipBoard" T "$SCRIPT$" +STARTLINE}
addUserKnob {26 info l " " T "press ctrl+v in the nodegraph after clicking the above button"}
addUserKnob {20 Info}
addUserKnob {26 infotext l "" +STARTLINE T "2022 - Liam Collod<br> Visit <a style=\"color:#fefefe;\" href=\"https://github.com/MrLixm/Foundry_Nuke/tree/main/src/transforms/imageCropDivide\">the GitHub repo</a> "}
addUserKnob {26 "" +STARTLINE}
addUserKnob {26 versiontext l "" T "version $VERSION$"}
}
Input {
inputs 0
name Input1
xpos 0
}
Output {
name Output1
xpos 0
ypos 300
}
end_group
"""
MODULE_BUTTON_PATH = Path("..") / "button.py"
NODENK_PATH = Path("..") / "node.nk"
def increment_version():
this = Path(__file__)
this_code = this.read_text(encoding="utf-8")
version = re.search(r"VERSION\s*=\s*(\d+)", this_code)
assert version, f"Can't find <VERSION> in <{this}> !"
new_version = int(version.group(1)) + 1
new_code = f"VERSION = {new_version}"
new_code = this_code.replace(version.group(0), str(new_code))
this.write_text(new_code, encoding="utf-8")
print(f"[{__name__}][increment_version] Incremented {this} to {new_version}.")
return
def run():
increment_version()
btnscript = MODULE_BUTTON_PATH.read_text(encoding="utf-8")
# sanitize for nuke
btnscript = btnscript.replace("\\", r'\\')
btnscript = btnscript.split("\n")
btnscript = r"\n".join(btnscript)
btnscript = btnscript.replace("\"", r'\"')
btnscript = btnscript.replace("{", r'\{')
btnscript = btnscript.replace("}", r'\}')
node_content = BASE.replace("$SCRIPT$", btnscript)
node_content = node_content.replace("$VERSION$", str(VERSION+1))
NODENK_PATH.write_text(node_content, encoding="utf-8")
print(f"[{__name__}][run] node.nk file written to {NODENK_PATH}")
print(f"[{__name__}][run] Finished.")
return
if __name__ == '__main__':
# print(__file__)
run()
| 25.905263 | 213 | 0.683056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,439 | 0.584722 |
c0cd5d7d340b27b3217620ef4b12a1391841820b | 2,294 | py | Python | workflow/tests/test_experiment_qc.py | JAMKuttan/chipseq_analysis | f8e4853bfdb4de8540026ae0b23235d72a1114ad | [
"MIT"
]
| null | null | null | workflow/tests/test_experiment_qc.py | JAMKuttan/chipseq_analysis | f8e4853bfdb4de8540026ae0b23235d72a1114ad | [
"MIT"
]
| null | null | null | workflow/tests/test_experiment_qc.py | JAMKuttan/chipseq_analysis | f8e4853bfdb4de8540026ae0b23235d72a1114ad | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import pytest
import os
import pandas as pd
from io import StringIO
import experiment_qc
test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
'/../output/experimentQC/'
DESIGN_STRING = """sample_id\texperiment_id\tbiosample\tfactor\ttreatment\treplicate\tcontrol_id\tbam_reads
A_1\tA\tLiver\tH3K27ac\tNone\t1\tB_1\tA_1.bam
A_2\tA\tLiver\tH3K27ac\tNone\t2\tB_2\tA_2.bam
B_1\tB\tLiver\tInput\tNone\t1\tB_1\tB_1.bam
B_2\tB\tLiver\tInput\tNone\t2\tB_2\tB_2.bam
"""
@pytest.fixture
def design_bam():
design_file = StringIO(DESIGN_STRING)
design_df = pd.read_csv(design_file, sep="\t")
return design_df
@pytest.mark.unit
def test_check_update_controls(design_bam):
new_design = experiment_qc.update_controls(design_bam)
assert new_design.loc[0, 'control_reads'] == "B_1.bam"
@pytest.mark.singleend
def test_coverage_singleend():
assert os.path.exists(os.path.join(test_output_path, 'sample_mbs.npz'))
assert os.path.exists(os.path.join(test_output_path, 'coverage.pdf'))
@pytest.mark.singleend
def test_spearman_singleend():
assert os.path.exists(os.path.join(test_output_path, 'heatmap_SpearmanCorr.pdf'))
@pytest.mark.singleend
def test_pearson_singleend():
assert os.path.exists(os.path.join(test_output_path, 'heatmap_PearsonCorr.pdf'))
@pytest.mark.singleend
def test_fingerprint_singleend():
assert os.path.exists(os.path.join(test_output_path, 'ENCLB144FDT_fingerprint.pdf'))
assert os.path.exists(os.path.join(test_output_path, 'ENCLB831RUI_fingerprint.pdf'))
@pytest.mark.pairdend
def test_coverage_pairedend():
assert os.path.exists(os.path.join(test_output_path, 'sample_mbs.npz'))
assert os.path.exists(os.path.join(test_output_path, 'coverage.pdf'))
@pytest.mark.pairdend
def test_spearman_pairedend():
assert os.path.exists(os.path.join(test_output_path, 'heatmap_SpearmanCorr.pdf'))
@pytest.mark.pairdend
def test_pearson_pairedend():
assert os.path.exists(os.path.join(test_output_path, 'heatmap_PearsonCorr.pdf'))
@pytest.mark.pairdend
def test_fingerprint_pairedend():
assert os.path.exists(os.path.join(test_output_path, 'ENCLB568IYX_fingerprint.pdf'))
assert os.path.exists(os.path.join(test_output_path, 'ENCLB637LZP_fingerprint.pdf'))
| 30.586667 | 107 | 0.773322 | 0 | 0 | 0 | 0 | 1,748 | 0.761988 | 0 | 0 | 629 | 0.274194 |
c0d13bb4fa90665ac270a6c1d4953230e7cffcc2 | 371 | py | Python | sensors/routing.py | edisondotme/motoPi | 18ddd46d97a4db0918fd2c3cc1ffc60600158124 | [
"MIT"
]
| 2 | 2021-05-10T22:04:36.000Z | 2022-01-10T03:23:04.000Z | sensors/routing.py | edisondotme/motoPi | 18ddd46d97a4db0918fd2c3cc1ffc60600158124 | [
"MIT"
]
| 1 | 2017-02-13T08:18:49.000Z | 2017-02-14T07:11:18.000Z | sensors/routing.py | edisondotme/motoPi | 18ddd46d97a4db0918fd2c3cc1ffc60600158124 | [
"MIT"
]
| 1 | 2021-05-10T22:04:57.000Z | 2021-05-10T22:04:57.000Z | from channels.routing import route
from .consumers import ws_message, ws_connect, ws_disconnect
# TODO: Edit this to make proper use of channels.routing.route() or not
channel_routing = {
# route("websocket.receive", ws_message, path=r"^/chat/"),
"websocket.connect": ws_connect,
"websocket.receive": ws_message,
"websocket.disconnect": ws_disconnect,
}
| 30.916667 | 72 | 0.74124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.514825 |
c0d1e420d8a5ef2c04e4e14f531037003c9ed4f0 | 3,626 | py | Python | native_client_sdk/src/build_tools/tests/test_generate_make.py | junmin-zhu/chromium-rivertrail | eb1a57aca71fe68d96e48af8998dcfbe45171ee1 | [
"BSD-3-Clause"
]
| 5 | 2018-03-10T13:08:42.000Z | 2021-07-26T15:02:11.000Z | native_client_sdk/src/build_tools/tests/test_generate_make.py | quisquous/chromium | b25660e05cddc9d0c3053b3514f07037acc69a10 | [
"BSD-3-Clause"
]
| 1 | 2015-07-21T08:02:01.000Z | 2015-07-21T08:02:01.000Z | native_client_sdk/src/build_tools/tests/test_generate_make.py | jianglong0156/chromium.src | d496dfeebb0f282468827654c2b3769b3378c087 | [
"BSD-3-Clause"
]
| 6 | 2016-11-14T10:13:35.000Z | 2021-01-23T15:29:53.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import datetime
import os
import posixpath
import subprocess
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import generate_make
BASIC_DESC = {
'TOOLS': ['newlib', 'glibc'],
'TARGETS': [
{
'NAME' : 'hello_world',
'TYPE' : 'main',
'SOURCES' : ['hello_world.c'],
},
],
'DEST' : 'examples'
}
class TestFunctions(unittest.TestCase):
def testPatsubst(self):
val = generate_make.GenPatsubst(32, 'FOO', 'cc', 'CXX')
gold = '$(patsubst %.cc,%_32.o,$(FOO_CXX))'
self.assertEqual(val, gold)
def testPatsubst(self):
val = generate_make.GenPatsubst(32, 'FOO', 'cc', 'CXX')
gold = '$(patsubst %.cc,%_32.o,$(FOO_CXX))'
self.assertEqual(val, gold)
def testSetVar(self):
val = generate_make.SetVar('FOO',[])
self.assertEqual(val, 'FOO:=\n')
val = generate_make.SetVar('FOO',['BAR'])
self.assertEqual(val, 'FOO:=BAR\n')
items = ['FOO_' + 'x' * (i % 13) for i in range(50)]
for i in range(10):
wrapped = generate_make.SetVar('BAR_' + 'x' * i, items)
lines = wrapped.split('\n')
for line in lines:
if len(line) > 79:
self.assertEqual(line, 'Less than 80 at ' + str(i))
class TestValidateFormat(unittest.TestCase):
def _append_result(self, msg):
self.result += msg
return self.result
def _validate(self, src, msg):
format = generate_make.DSC_FORMAT
self.result = ''
result = generate_make.ValidateFormat(src, format,
lambda msg: self._append_result(msg))
if msg:
self.assertEqual(self.result, msg)
else:
self.assertEqual(result, True)
def testGoodDesc(self):
testdesc = copy.deepcopy(BASIC_DESC)
self._validate(testdesc, None)
def testMissingKey(self):
testdesc = copy.deepcopy(BASIC_DESC)
del testdesc['TOOLS']
self._validate(testdesc, 'Missing required key TOOLS.')
testdesc = copy.deepcopy(BASIC_DESC)
del testdesc['TARGETS'][0]['NAME']
self._validate(testdesc, 'Missing required key NAME.')
def testNonEmpty(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = []
self._validate(testdesc, 'Expected non-empty value for TOOLS.')
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TARGETS'] = []
self._validate(testdesc, 'Expected non-empty value for TARGETS.')
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TARGETS'][0]['NAME'] = ''
self._validate(testdesc, 'Expected non-empty value for NAME.')
def testBadValue(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = ['newlib', 'glibc', 'badtool']
self._validate(testdesc, 'Value badtool not expected in TOOLS.')
def testExpectStr(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = ['newlib', True, 'glibc']
self._validate(testdesc, 'Value True not expected in TOOLS.')
def testExpectList(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = 'newlib'
self._validate(testdesc, 'Key TOOLS expects LIST not STR.')
# TODO(noelallen): Add test which generates a real make and runs it.
def main():
suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__])
result = unittest.TextTestRunner(verbosity=2).run(suite)
return int(not result.wasSuccessful())
if __name__ == '__main__':
sys.exit(main())
| 29.008 | 79 | 0.671539 | 2,667 | 0.735521 | 0 | 0 | 0 | 0 | 0 | 0 | 941 | 0.259515 |
c0d2686a32c098e3878691628a43110629043f78 | 1,089 | py | Python | main.py | YasunoriMATSUOKA/photo-hunt | e3ca9e8f42c8a6c6b02c9fdcee9ea44a30d18f66 | [
"MIT"
]
| null | null | null | main.py | YasunoriMATSUOKA/photo-hunt | e3ca9e8f42c8a6c6b02c9fdcee9ea44a30d18f66 | [
"MIT"
]
| null | null | null | main.py | YasunoriMATSUOKA/photo-hunt | e3ca9e8f42c8a6c6b02c9fdcee9ea44a30d18f66 | [
"MIT"
]
| null | null | null | from PhotoHunt import PhotoHunt
# Todo: 各URLあたり1~2個の未検出、誤検出等の課題はあるが、概ね意図通り動作する状態となった
url_list = [
"https://www.saizeriya.co.jp/entertainment/images/1710/body.png",
"https://www.saizeriya.co.jp/entertainment/images/1801/body.png",
"https://www.saizeriya.co.jp/entertainment/images/1804/body.png",
"https://www.saizeriya.co.jp/entertainment/images/1806/body.png",
"https://www.saizeriya.co.jp/entertainment/images/1810/body.png",
"https://www.saizeriya.co.jp/entertainment/images/1812/body.png",
"https://www.saizeriya.co.jp/entertainment/images/1904/body.png",
"https://www.saizeriya.co.jp/entertainment/images/1907/body.png",
"https://www.saizeriya.co.jp/entertainment/images/1910/body.png",
"https://www.saizeriya.co.jp/entertainment/images/1912/body.png",
"https://www.saizeriya.co.jp/entertainment/images/2003/body.png",
"https://www.saizeriya.co.jp/entertainment/images/2007/body.png",
"https://www.saizeriya.co.jp/entertainment/images/2009/body.png"
]
for url in url_list:
photo_hunt = PhotoHunt(url)
photo_hunt.execute()
| 47.347826 | 69 | 0.733701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 962 | 0.824336 |
c0d317f2e8f8665da9e599f1dc02201ed251fea1 | 568 | py | Python | Curso_em_Video_py3/ex069.py | Rodrigo98Matos/Projetos_py | 6428e2c09d28fd8a717743f4434bc788e7d7d3cc | [
"MIT"
]
| 1 | 2021-05-11T12:39:43.000Z | 2021-05-11T12:39:43.000Z | Curso_em_Video_py3/ex069.py | Rodrigo98Matos/Projetos_py | 6428e2c09d28fd8a717743f4434bc788e7d7d3cc | [
"MIT"
]
| null | null | null | Curso_em_Video_py3/ex069.py | Rodrigo98Matos/Projetos_py | 6428e2c09d28fd8a717743f4434bc788e7d7d3cc | [
"MIT"
]
| null | null | null | a = b = c = 0
while True:
flag = ''
i = -1
s = ''
while i < 0:
i = int(input('idade:\t'))
while s != 'M' and s != 'F':
s = str(input('Sexo [M] [F]:\t')).strip().upper()[0]
if i > 18:
a += 1
if s == 'M':
b += 1
elif i < 20:
c += 1
while flag != 'S' and flag != 'N':
flag = str(input('Você quer cadastrar mais pessoas? [S] [N]\t')).strip().upper()[0]
if flag == 'N':
break
print(f'Tem {a} pessoas maior de 18 anos!\nTem {b} homens!\nTem {c} mulheres com menos de 20 anos!')
| 27.047619 | 100 | 0.452465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.330404 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.