code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
from rest_framework import generics, permissions, viewsets
from django.core.management import call_command
from rest_framework.response import Response
from rest_framework.views import APIView
from ..serializers import JobCategorySerializer, JobPostSerializer, RecruiterSerializer, JobPostAllSerializer, \
MatchedResumeSerializer
from ..models import JobPost, JobCategory, MatchedResumes
from accounts.models import Recruiter
class JobCategoryListView(generics.ListAPIView):
queryset = JobCategory.objects.all()
serializer_class = JobCategorySerializer
lookup_field = 'id'
class RecruiterView(generics.RetrieveAPIView):
permission_classes = [
permissions.IsAuthenticated
]
serializer_class = RecruiterSerializer
def get_object(self):
return Recruiter.objects.get(user=self.request.user.id)
class JobPostAllView(generics.ListAPIView):
serializer_class = JobPostAllSerializer
queryset = JobPost.objects.all()
lookup_field = 'id'
class JobPostAllDetailView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = JobPostAllSerializer
queryset = JobPost.objects.all()
lookup_field = 'id'
class JobPostCreateView(APIView):
permission_classes = [
permissions.IsAuthenticated
]
model = Recruiter
def post(self, request):
recruiter = Recruiter.objects.get(user=self.request.user.id)
print(recruiter)
data = request.data
data["recruiter"] = recruiter.id
print(data["recruiter"])
serializer = JobPostSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
class JobPostListView(generics.ListAPIView):
permission_classes = [
permissions.IsAuthenticated
]
serializer_class = JobPostAllSerializer
lookup_field = 'id'
def get_queryset(self):
queryset = JobPost.objects.filter(recruiter=self.request.user.recruiter)
return queryset
class JobPostDetailView(APIView):
permission_classes = [
permissions.IsAuthenticated
]
def get_object(self, id):
try:
return JobPost.objects.get(id=id)
except JobPost.DoesNotExist as e:
return Response({"error": "Given question object not found."}, status=404)
def get(self, request, id=None):
instance = self.get_object(id)
serializer = JobPostAllSerializer(instance)
return Response(serializer.data)
def put(self, request, id=None):
data = request.data
instance = self.get_object(id)
serializer = JobPostSerializer(instance, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
return Response(serializer.errors, status=400)
def delete(self, request, id=None):
instance = self.get_object(id)
instance.delete()
return Response(status=204)
class EvaluatePostListView(generics.ListAPIView):
permission_classes = [
permissions.IsAuthenticated
]
serializer_class = MatchedResumeSerializer
def get_queryset(self):
call_command('matching', self.request.user.recruiter.id)
queryset = MatchedResumes.objects.filter(recruiter=self.request.user.recruiter)
return queryset
class MatchedResumesListView(generics.ListAPIView):
permission_classes = [
permissions.IsAuthenticated
]
serializer_class = MatchedResumeSerializer
def get_queryset(self):
queryset = MatchedResumes.objects.filter(recruiter=self.request.user.recruiter)
return queryset
# class JobPostListView(generics.ListAPIView):
# queryset = JobPost.objects.all()
# # permission_classes = [permissions.IsAuthenticated, ]
# serializer_class = JobPostSerializer
# lookup_field = 'id'
#
# filter_backends = [SearchFilter, OrderingFilter, ]
# search_fields = [
# 'job_title',
# ]
| [
"accounts.models.Recruiter.objects.get",
"rest_framework.response.Response",
"django.core.management.call_command"
] | [((795, 843), 'accounts.models.Recruiter.objects.get', 'Recruiter.objects.get', ([], {'user': 'self.request.user.id'}), '(user=self.request.user.id)\n', (816, 843), False, 'from accounts.models import Recruiter\n'), ((1346, 1394), 'accounts.models.Recruiter.objects.get', 'Recruiter.objects.get', ([], {'user': 'self.request.user.id'}), '(user=self.request.user.id)\n', (1367, 1394), False, 'from accounts.models import Recruiter\n'), ((1708, 1747), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': '(400)'}), '(serializer.errors, status=400)\n', (1716, 1747), False, 'from rest_framework.response import Response\n'), ((2534, 2559), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (2542, 2559), False, 'from rest_framework.response import Response\n'), ((2861, 2900), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': '(400)'}), '(serializer.errors, status=400)\n', (2869, 2900), False, 'from rest_framework.response import Response\n'), ((3022, 3042), 'rest_framework.response.Response', 'Response', ([], {'status': '(204)'}), '(status=204)\n', (3030, 3042), False, 'from rest_framework.response import Response\n'), ((3248, 3304), 'django.core.management.call_command', 'call_command', (['"""matching"""', 'self.request.user.recruiter.id'], {}), "('matching', self.request.user.recruiter.id)\n", (3260, 3304), False, 'from django.core.management import call_command\n'), ((1655, 1692), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': '(201)'}), '(serializer.data, status=201)\n', (1663, 1692), False, 'from rest_framework.response import Response\n'), ((2808, 2845), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': '(200)'}), '(serializer.data, status=200)\n', (2816, 2845), False, 'from rest_framework.response import Response\n'), ((2322, 2389), 'rest_framework.response.Response', 'Response', (["{'error': 'Given question object not found.'}"], {'status': '(404)'}), "({'error': 'Given question object not found.'}, status=404)\n", (2330, 2389), False, 'from rest_framework.response import Response\n')] |
from tensorflow.keras.preprocessing.image import img_to_array
from PIL import Image
import numpy as np
import streamlit as st
from huggingface_hub import from_pretrained_keras
@st.cache(persist=True,allow_output_mutation=True,show_spinner=False,suppress_st_warning=True)
def instantiate_model():
model = from_pretrained_keras("keras-io/lowlight-enhance-mirnet", compile=False)
return model
@st.cache(persist=True,allow_output_mutation=True,show_spinner=False,suppress_st_warning=True)
def enhance_image(uploaded_image, downloaded_image):
model = instantiate_model()
low_light_img = Image.open(uploaded_image).convert('RGB')
#width, height = low_light_img.size
#low_light_img = low_light_img.resize((256,256),Image.NEAREST)
image = img_to_array(low_light_img)
image = image.astype('float32') / 255.0
image = np.expand_dims(image, axis = 0)
output = model.predict(image)
output_image = output[0] * 255.0
output_image = output_image.clip(0,255)
output_image = output_image.reshape((np.shape(output_image)[0],np.shape(output_image)[1],3))
output_image = np.uint32(output_image)
final_image = Image.fromarray(output_image.astype('uint8'),'RGB')
final_image.save(downloaded_image)
@st.cache(persist=True,allow_output_mutation=True,show_spinner=False,suppress_st_warning=True)
def download_success():
st.balloons()
st.success('✅ Download Successful !!')
| [
"numpy.shape",
"streamlit.cache",
"PIL.Image.open",
"streamlit.balloons",
"huggingface_hub.from_pretrained_keras",
"numpy.uint32",
"streamlit.success",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.img_to_array"
] | [((186, 286), 'streamlit.cache', 'st.cache', ([], {'persist': '(True)', 'allow_output_mutation': '(True)', 'show_spinner': '(False)', 'suppress_st_warning': '(True)'}), '(persist=True, allow_output_mutation=True, show_spinner=False,\n suppress_st_warning=True)\n', (194, 286), True, 'import streamlit as st\n'), ((416, 516), 'streamlit.cache', 'st.cache', ([], {'persist': '(True)', 'allow_output_mutation': '(True)', 'show_spinner': '(False)', 'suppress_st_warning': '(True)'}), '(persist=True, allow_output_mutation=True, show_spinner=False,\n suppress_st_warning=True)\n', (424, 516), True, 'import streamlit as st\n'), ((1283, 1383), 'streamlit.cache', 'st.cache', ([], {'persist': '(True)', 'allow_output_mutation': '(True)', 'show_spinner': '(False)', 'suppress_st_warning': '(True)'}), '(persist=True, allow_output_mutation=True, show_spinner=False,\n suppress_st_warning=True)\n', (1291, 1383), True, 'import streamlit as st\n'), ((319, 391), 'huggingface_hub.from_pretrained_keras', 'from_pretrained_keras', (['"""keras-io/lowlight-enhance-mirnet"""'], {'compile': '(False)'}), "('keras-io/lowlight-enhance-mirnet', compile=False)\n", (340, 391), False, 'from huggingface_hub import from_pretrained_keras\n'), ((784, 811), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['low_light_img'], {}), '(low_light_img)\n', (796, 811), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), ((870, 899), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (884, 899), True, 'import numpy as np\n'), ((1142, 1165), 'numpy.uint32', 'np.uint32', (['output_image'], {}), '(output_image)\n', (1151, 1165), True, 'import numpy as np\n'), ((1407, 1420), 'streamlit.balloons', 'st.balloons', ([], {}), '()\n', (1418, 1420), True, 'import streamlit as st\n'), ((1426, 1464), 'streamlit.success', 'st.success', (['"""✅ Download Successful !!"""'], {}), "('✅ Download Successful !!')\n", (1436, 1464), True, 'import streamlit as st\n'), ((618, 644), 'PIL.Image.open', 'Image.open', (['uploaded_image'], {}), '(uploaded_image)\n', (628, 644), False, 'from PIL import Image\n'), ((1066, 1088), 'numpy.shape', 'np.shape', (['output_image'], {}), '(output_image)\n', (1074, 1088), True, 'import numpy as np\n'), ((1092, 1114), 'numpy.shape', 'np.shape', (['output_image'], {}), '(output_image)\n', (1100, 1114), True, 'import numpy as np\n')] |
import contextlib
import shutil
import tempfile
@contextlib.contextmanager
def tempdir():
"""
This function creates, yields and removes a temporary directory.
:yields: str
"""
tempdir_ = tempfile.mkdtemp()
try:
yield tempdir_
finally:
shutil.rmtree(tempdir_)
| [
"tempfile.mkdtemp",
"shutil.rmtree"
] | [((211, 229), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (227, 229), False, 'import tempfile\n'), ((284, 307), 'shutil.rmtree', 'shutil.rmtree', (['tempdir_'], {}), '(tempdir_)\n', (297, 307), False, 'import shutil\n')] |
#
# This file is part of stac2odc
# Copyright (C) 2020 INPE.
#
# stac2odc is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
import os
import click
import stac
from datacube.index import MissingRecordError
from datacube.index.hl import Doc2Dataset
from datacube.scripts.dataset import remap_uri_from_doc, dataset_stream
from datacube.ui.common import ui_path_doc_stream
from datacube.utils import InvalidDocException
from datacube.utils.documents import read_documents
from loguru import logger
import stac2odc.collection
import stac2odc.item
from stac2odc.logger import logger_message
from stac2odc.toolbox import write_odc_element_in_yaml_file, datacube_index, prepare_advanced_filter, \
create_feature_collection_from_stac_elements
@click.group()
def cli():
"""
:return:
"""
pass
@cli.command(name="collection2product", help="Function to convert a STAC Collection JSON to ODC Product YAML")
@click.option('-c', '--collection', required=True, help='Collection name (Ex. CB4MOSBR_64_3M_STK).')
@click.option('--url', default='https://brazildatacube.dpi.inpe.br/stac/', help='BDC STAC url.')
@click.option('-o', '--outdir', help='Output directory', required=True)
@click.option('-e', '--engine-file', required=True,
help='Mapper configurations to convert STAC Collection to ODC Product')
@click.option('--datacube-config', '-dconfig', default=None, required=False)
@click.option('--access-token', default=None, is_flag=False, help='Personal Access Token of the BDC Auth')
@click.option('--verbose', default=False, is_flag=True, help='Enable verbose mode')
def collection2product_cli(collection: str, url: str, outdir: str, engine_file: str, datacube_config: str,
access_token, verbose: bool):
collection_definition = stac.STAC(url, False, access_token=access_token).collection(collection)
odc_element = stac2odc.collection.collection2product(engine_file, collection_definition, verbose=verbose)
product_definition_file = write_odc_element_in_yaml_file(odc_element, os.path.join(outdir, f'{collection}.yaml'))
# code adapted from: https://github.com/opendatacube/datacube-core/blob/develop/datacube/scripts/product.py
for path_descriptor, parsed_doc in read_documents(*[product_definition_file]):
try:
dc_index = datacube_index(datacube_config)
_type = dc_index.products.from_doc(parsed_doc)
logger_message(f'Adding {_type.name}', logger.info, verbose)
dc_index.products.add(_type)
except InvalidDocException as e:
logger_message(f'Error to add product: {str(e)}', logger.warning, True)
@cli.command(name="item2dataset", help="Function to convert a STAC Collection JSON to ODC Dataset YAML")
@click.option('-sc', '--stac-collection', required=True, help='Collection name (e.g. CB4MOSBR_64_3M_STK).')
@click.option('-dp', '--dc-product', required=True, help='Product name in Open Data Cube (e.g. CB4MOSBR_64_3M_STK)')
@click.option('--url', default='https://brazildatacube.dpi.inpe.br/stac/', help='BDC STAC url.')
@click.option('-o', '--outdir', default='./', help='Output directory')
@click.option('-m', '--max-items', help='Max items', required=True)
@click.option('-e', '--engine-file', required=True,
help='Mapper configurations to convert STAC Collection to ODC Product')
@click.option('--datacube-config', '-dconfig', default=None, required=False)
@click.option('--verbose', default=False, is_flag=True, help='Enable verbose mode')
@click.option('--access-token', default=None, is_flag=False, help='Personal Access Token of the BDC Auth')
@click.option('--advanced-filter', default=None, help='Search STAC Items with specific parameters')
def item2dataset_cli(stac_collection, dc_product, url, outdir, max_items, engine_file, datacube_config, verbose,
access_token, advanced_filter):
_filter = {"collections": [stac_collection]}
if advanced_filter:
_filter = {
**_filter, **prepare_advanced_filter(advanced_filter)
}
stac_service = stac.STAC(url, False, access_token=access_token)
dc_index = datacube_index(datacube_config)
features = create_feature_collection_from_stac_elements(stac_service, int(max_items), _filter)
odc_datasets = stac2odc.item.item2dataset(engine_file, dc_product, features, dc_index, verbose=verbose)
odc_datasets_definition_files = write_odc_element_in_yaml_file(odc_datasets, outdir)
# add datasets definitions on datacube index
# code adapted from: https://github.com/opendatacube/datacube-core/blob/develop/datacube/scripts/dataset.py
ds_resolve = Doc2Dataset(dc_index, [dc_product])
doc_stream = remap_uri_from_doc(ui_path_doc_stream(odc_datasets_definition_files, uri=True))
datasets_on_stream = dataset_stream(doc_stream, ds_resolve)
logger_message(f"Adding datasets", logger.info, True)
for dataset in datasets_on_stream:
try:
dc_index.datasets.add(dataset, with_lineage=True)
except (ValueError, MissingRecordError):
logger_message(f"Error to add dataset ({dataset.local_uri})", logger.warning, True)
| [
"stac2odc.toolbox.datacube_index",
"click.group",
"click.option",
"datacube.ui.common.ui_path_doc_stream",
"os.path.join",
"stac2odc.toolbox.write_odc_element_in_yaml_file",
"stac2odc.logger.logger_message",
"datacube.index.hl.Doc2Dataset",
"stac.STAC",
"stac2odc.toolbox.prepare_advanced_filter",
"datacube.utils.documents.read_documents",
"datacube.scripts.dataset.dataset_stream"
] | [((828, 841), 'click.group', 'click.group', ([], {}), '()\n', (839, 841), False, 'import click\n'), ((1005, 1109), 'click.option', 'click.option', (['"""-c"""', '"""--collection"""'], {'required': '(True)', 'help': '"""Collection name (Ex. CB4MOSBR_64_3M_STK)."""'}), "('-c', '--collection', required=True, help=\n 'Collection name (Ex. CB4MOSBR_64_3M_STK).')\n", (1017, 1109), False, 'import click\n'), ((1106, 1205), 'click.option', 'click.option', (['"""--url"""'], {'default': '"""https://brazildatacube.dpi.inpe.br/stac/"""', 'help': '"""BDC STAC url."""'}), "('--url', default='https://brazildatacube.dpi.inpe.br/stac/',\n help='BDC STAC url.')\n", (1118, 1205), False, 'import click\n'), ((1203, 1273), 'click.option', 'click.option', (['"""-o"""', '"""--outdir"""'], {'help': '"""Output directory"""', 'required': '(True)'}), "('-o', '--outdir', help='Output directory', required=True)\n", (1215, 1273), False, 'import click\n'), ((1275, 1402), 'click.option', 'click.option', (['"""-e"""', '"""--engine-file"""'], {'required': '(True)', 'help': '"""Mapper configurations to convert STAC Collection to ODC Product"""'}), "('-e', '--engine-file', required=True, help=\n 'Mapper configurations to convert STAC Collection to ODC Product')\n", (1287, 1402), False, 'import click\n'), ((1413, 1488), 'click.option', 'click.option', (['"""--datacube-config"""', '"""-dconfig"""'], {'default': 'None', 'required': '(False)'}), "('--datacube-config', '-dconfig', default=None, required=False)\n", (1425, 1488), False, 'import click\n'), ((1490, 1600), 'click.option', 'click.option', (['"""--access-token"""'], {'default': 'None', 'is_flag': '(False)', 'help': '"""Personal Access Token of the BDC Auth"""'}), "('--access-token', default=None, is_flag=False, help=\n 'Personal Access Token of the BDC Auth')\n", (1502, 1600), False, 'import click\n'), ((1597, 1684), 'click.option', 'click.option', (['"""--verbose"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Enable verbose mode"""'}), "('--verbose', default=False, is_flag=True, help=\n 'Enable verbose mode')\n", (1609, 1684), False, 'import click\n'), ((2843, 2954), 'click.option', 'click.option', (['"""-sc"""', '"""--stac-collection"""'], {'required': '(True)', 'help': '"""Collection name (e.g. CB4MOSBR_64_3M_STK)."""'}), "('-sc', '--stac-collection', required=True, help=\n 'Collection name (e.g. CB4MOSBR_64_3M_STK).')\n", (2855, 2954), False, 'import click\n'), ((2951, 3071), 'click.option', 'click.option', (['"""-dp"""', '"""--dc-product"""'], {'required': '(True)', 'help': '"""Product name in Open Data Cube (e.g. CB4MOSBR_64_3M_STK)"""'}), "('-dp', '--dc-product', required=True, help=\n 'Product name in Open Data Cube (e.g. CB4MOSBR_64_3M_STK)')\n", (2963, 3071), False, 'import click\n'), ((3068, 3167), 'click.option', 'click.option', (['"""--url"""'], {'default': '"""https://brazildatacube.dpi.inpe.br/stac/"""', 'help': '"""BDC STAC url."""'}), "('--url', default='https://brazildatacube.dpi.inpe.br/stac/',\n help='BDC STAC url.')\n", (3080, 3167), False, 'import click\n'), ((3165, 3234), 'click.option', 'click.option', (['"""-o"""', '"""--outdir"""'], {'default': '"""./"""', 'help': '"""Output directory"""'}), "('-o', '--outdir', default='./', help='Output directory')\n", (3177, 3234), False, 'import click\n'), ((3236, 3302), 'click.option', 'click.option', (['"""-m"""', '"""--max-items"""'], {'help': '"""Max items"""', 'required': '(True)'}), "('-m', '--max-items', help='Max items', required=True)\n", (3248, 3302), False, 'import click\n'), ((3304, 3431), 'click.option', 'click.option', (['"""-e"""', '"""--engine-file"""'], {'required': '(True)', 'help': '"""Mapper configurations to convert STAC Collection to ODC Product"""'}), "('-e', '--engine-file', required=True, help=\n 'Mapper configurations to convert STAC Collection to ODC Product')\n", (3316, 3431), False, 'import click\n'), ((3442, 3517), 'click.option', 'click.option', (['"""--datacube-config"""', '"""-dconfig"""'], {'default': 'None', 'required': '(False)'}), "('--datacube-config', '-dconfig', default=None, required=False)\n", (3454, 3517), False, 'import click\n'), ((3519, 3606), 'click.option', 'click.option', (['"""--verbose"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Enable verbose mode"""'}), "('--verbose', default=False, is_flag=True, help=\n 'Enable verbose mode')\n", (3531, 3606), False, 'import click\n'), ((3603, 3713), 'click.option', 'click.option', (['"""--access-token"""'], {'default': 'None', 'is_flag': '(False)', 'help': '"""Personal Access Token of the BDC Auth"""'}), "('--access-token', default=None, is_flag=False, help=\n 'Personal Access Token of the BDC Auth')\n", (3615, 3713), False, 'import click\n'), ((3710, 3813), 'click.option', 'click.option', (['"""--advanced-filter"""'], {'default': 'None', 'help': '"""Search STAC Items with specific parameters"""'}), "('--advanced-filter', default=None, help=\n 'Search STAC Items with specific parameters')\n", (3722, 3813), False, 'import click\n'), ((2324, 2366), 'datacube.utils.documents.read_documents', 'read_documents', (['*[product_definition_file]'], {}), '(*[product_definition_file])\n', (2338, 2366), False, 'from datacube.utils.documents import read_documents\n'), ((4164, 4212), 'stac.STAC', 'stac.STAC', (['url', '(False)'], {'access_token': 'access_token'}), '(url, False, access_token=access_token)\n', (4173, 4212), False, 'import stac\n'), ((4228, 4259), 'stac2odc.toolbox.datacube_index', 'datacube_index', (['datacube_config'], {}), '(datacube_config)\n', (4242, 4259), False, 'from stac2odc.toolbox import write_odc_element_in_yaml_file, datacube_index, prepare_advanced_filter, create_feature_collection_from_stac_elements\n'), ((4504, 4556), 'stac2odc.toolbox.write_odc_element_in_yaml_file', 'write_odc_element_in_yaml_file', (['odc_datasets', 'outdir'], {}), '(odc_datasets, outdir)\n', (4534, 4556), False, 'from stac2odc.toolbox import write_odc_element_in_yaml_file, datacube_index, prepare_advanced_filter, create_feature_collection_from_stac_elements\n'), ((4736, 4771), 'datacube.index.hl.Doc2Dataset', 'Doc2Dataset', (['dc_index', '[dc_product]'], {}), '(dc_index, [dc_product])\n', (4747, 4771), False, 'from datacube.index.hl import Doc2Dataset\n'), ((4894, 4932), 'datacube.scripts.dataset.dataset_stream', 'dataset_stream', (['doc_stream', 'ds_resolve'], {}), '(doc_stream, ds_resolve)\n', (4908, 4932), False, 'from datacube.scripts.dataset import remap_uri_from_doc, dataset_stream\n'), ((4938, 4991), 'stac2odc.logger.logger_message', 'logger_message', (['f"""Adding datasets"""', 'logger.info', '(True)'], {}), "(f'Adding datasets', logger.info, True)\n", (4952, 4991), False, 'from stac2odc.logger import logger_message\n'), ((2128, 2170), 'os.path.join', 'os.path.join', (['outdir', 'f"""{collection}.yaml"""'], {}), "(outdir, f'{collection}.yaml')\n", (2140, 2170), False, 'import os\n'), ((4808, 4867), 'datacube.ui.common.ui_path_doc_stream', 'ui_path_doc_stream', (['odc_datasets_definition_files'], {'uri': '(True)'}), '(odc_datasets_definition_files, uri=True)\n', (4826, 4867), False, 'from datacube.ui.common import ui_path_doc_stream\n'), ((1872, 1920), 'stac.STAC', 'stac.STAC', (['url', '(False)'], {'access_token': 'access_token'}), '(url, False, access_token=access_token)\n', (1881, 1920), False, 'import stac\n'), ((2404, 2435), 'stac2odc.toolbox.datacube_index', 'datacube_index', (['datacube_config'], {}), '(datacube_config)\n', (2418, 2435), False, 'from stac2odc.toolbox import write_odc_element_in_yaml_file, datacube_index, prepare_advanced_filter, create_feature_collection_from_stac_elements\n'), ((2508, 2568), 'stac2odc.logger.logger_message', 'logger_message', (['f"""Adding {_type.name}"""', 'logger.info', 'verbose'], {}), "(f'Adding {_type.name}', logger.info, verbose)\n", (2522, 2568), False, 'from stac2odc.logger import logger_message\n'), ((4093, 4133), 'stac2odc.toolbox.prepare_advanced_filter', 'prepare_advanced_filter', (['advanced_filter'], {}), '(advanced_filter)\n', (4116, 4133), False, 'from stac2odc.toolbox import write_odc_element_in_yaml_file, datacube_index, prepare_advanced_filter, create_feature_collection_from_stac_elements\n'), ((5167, 5255), 'stac2odc.logger.logger_message', 'logger_message', (['f"""Error to add dataset ({dataset.local_uri})"""', 'logger.warning', '(True)'], {}), "(f'Error to add dataset ({dataset.local_uri})', logger.\n warning, True)\n", (5181, 5255), False, 'from stac2odc.logger import logger_message\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
综合案例3:井字棋游戏
"""
import os
def print_board(board):
print(board['TL'] + '|' + board['TM'] + '|' + board['TR'])
print('-+-+-')
print(board['ML'] + '|' + board['MM'] + '|' + board['MR'])
print('-+-+-')
print(board['BL'] + '|' + board['BM'] + '|' + board['BR'])
def judge(board):
"""
判断哪个玩家取胜
:param board:
:return:
"""
values = list(board.values())
win = ''
# 取胜的所有组合
groups = ['123', '456', '789', '147', '258', '369', '159', '357']
for ele in groups:
index = list(ele)
if values[int(index[0])-1] == values[int(index[1])-1] == values[int(index[2])-1] == 'x':
win = 'x'
break
elif values[int(index[0])-1] == values[int(index[1])-1] == values[int(index[2])-1] == 'o':
win = 'o'
break
return win
def main():
init_board = {
'TL': ' ', 'TM': ' ', 'TR': ' ',
'ML': ' ', 'MM': ' ', 'MR': ' ',
'BL': ' ', 'BM': ' ', 'BR': ' '
}
begin = True
while begin:
curr_board = init_board.copy()
begin = False
turn = 'x'
counter = 0
os.system('cls')
print_board(curr_board)
result = ''
while counter < 9:
move = input('轮到%s走棋, 请输入位置: ' % turn)
if curr_board[move] == ' ':
counter += 1
curr_board[move] = turn
if turn == 'x':
turn = 'o'
else:
turn = 'x'
os.system('cls')
print_board(curr_board)
result = judge(curr_board)
if result:
break
if result:
print(f'{result}玩家取胜。')
else:
print('和局。')
choice = input('再玩一局?(yes|no)')
begin = choice == 'yes'
if __name__ == '__main__':
main()
| [
"os.system"
] | [((1183, 1199), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (1192, 1199), False, 'import os\n'), ((1567, 1583), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (1576, 1583), False, 'import os\n')] |
# Create jobs
from apscheduler.schedulers.background import BackgroundScheduler
scheduler = BackgroundScheduler() # APScheduler()
def job_0():
for e in range(3):
print("task: ", e)
print("-----end-----")
def job_1():
print("updated")
def start_job():
# run job once a day at 3:20 AM
scheduler.add_job(id = "Task 1", func=job_0, trigger='cron', hour=3, minute=20, second=0)
# run job every 30 seconds
scheduler.add_job(id = "Task 1", func=job_1, trigger = 'interval', seconds = 30)
scheduler.start() | [
"apscheduler.schedulers.background.BackgroundScheduler"
] | [((94, 115), 'apscheduler.schedulers.background.BackgroundScheduler', 'BackgroundScheduler', ([], {}), '()\n', (113, 115), False, 'from apscheduler.schedulers.background import BackgroundScheduler\n')] |
import asyncio
import json
import logging
from aiohttp import WSMsgType
from .yandex_session import YandexSession
_LOGGER = logging.getLogger(__name__)
IOT_TYPES = {
'on': 'devices.capabilities.on_off',
'temperature': 'devices.capabilities.range',
'fan_speed': 'devices.capabilities.mode',
'thermostat': 'devices.capabilities.mode',
'volume': 'devices.capabilities.range',
'pause': 'devices.capabilities.toggle',
'mute': 'devices.capabilities.toggle',
'channel': 'devices.capabilities.range',
'input_source': 'devices.capabilities.mode',
'brightness': 'devices.capabilities.range',
'color': 'devices.capabilities.color_setting',
'work_speed': 'devices.capabilities.mode',
'humidity': 'devices.capabilities.range',
'ionization': 'devices.capabilities.toggle',
'backlight': 'devices.capabilities.toggle',
# don't work
'hsv': 'devices.capabilities.color_setting',
'rgb': 'devices.capabilities.color_setting',
'temperature_k': 'devices.capabilities.color_setting',
}
MASK_EN = '0123456789abcdef-'
MASK_RU = 'оеаинтсрвлкмдпуяы'
URL_USER = 'https://iot.quasar.yandex.ru/m/user'
def encode(uid: str) -> str:
"""Кодируем UID в рус. буквы. Яндекс привередливый."""
return 'ХА ' + ''.join([MASK_RU[MASK_EN.index(s)] for s in uid])
def decode(uid: str) -> str:
"""Раскодируем UID из рус.букв."""
return ''.join([MASK_EN[MASK_RU.index(s)] for s in uid[3:]])
class YandexQuasar:
# all devices
devices = None
online_updated: asyncio.Event = None
updates_task: asyncio.Task = None
def __init__(self, session: YandexSession):
self.session = session
self.online_updated = asyncio.Event()
self.online_updated.set()
@property
def hass_id(self):
for device in self.devices:
if device['name'] == "Yandex Intents":
return device['id']
return None
async def init(self):
"""Основная функция. Возвращает список колонок."""
_LOGGER.debug("Получение списка устройств.")
r = await self.session.get(f"{URL_USER}/devices")
resp = await r.json()
assert resp['status'] == 'ok', resp
self.devices = [device for room in resp['rooms']
for device in room['devices']]
self.devices += resp['speakers'] + resp['unconfigured_devices']
@property
def speakers(self):
return [
d for d in self.devices
if d['type'].startswith("devices.types.smart_speaker")
]
@property
def modules(self):
# modules don't have cloud scenarios
return [d for d in self.devices if ".yandex.module" in d["type"]]
async def load_speakers(self) -> list:
speakers = self.speakers
# Яндекс начали добавлять device_id и platform с полным списком
# устройств
# for speaker in speakers:
# await self.load_speaker_config(speaker)
scenarios = await self.load_scenarios()
for speaker in speakers:
device_id = speaker['id']
if device_id not in scenarios:
await self.add_scenario(device_id)
scenarios = await self.load_scenarios()
speaker['scenario_id'] = scenarios[device_id]['id']
return speakers
async def load_speaker_config(self, device: dict):
"""Загружаем device_id и platform для колонок. Они не приходят с полным
списком устройств.
"""
r = await self.session.get(
f"{URL_USER}/devices/{device['id']}/configuration")
resp = await r.json()
assert resp['status'] == 'ok', resp
# device_id and platform
device.update(resp['quasar_info'])
async def load_scenarios(self) -> dict:
"""Получает список сценариев, которые мы ранее создали."""
r = await self.session.get(f"{URL_USER}/scenarios")
resp = await r.json()
assert resp['status'] == 'ok', resp
return {
decode(d['name']): d
for d in resp['scenarios']
if d['name'].startswith('ХА ')
}
async def add_scenario(self, device_id: str):
"""Добавляет сценарий-пустышку."""
name = encode(device_id)
payload = {
'name': name,
'icon': 'home',
'triggers': [{
'type': 'scenario.trigger.voice',
'value': name[3:]
}],
'requested_speaker_capabilities': [],
'devices': [{
'id': device_id,
'capabilities': [{
'type': 'devices.capabilities.quasar.server_action',
'state': {
'instance': 'phrase_action',
'value': 'пустышка'
}
}]
}]
}
r = await self.session.post(f"{URL_USER}/scenarios", json=payload)
resp = await r.json()
assert resp['status'] == 'ok', resp
async def add_intent(self, name: str, text: str, num: int):
speaker = [{
'type': 'devices.capabilities.quasar.server_action',
'state': {
'instance': 'phrase_action',
'value': text
}
}] if text else [{
'type': 'devices.capabilities.quasar.server_action',
'state': {
'instance': 'text_action',
'value': "Yandex Intents громкость 100"
}
}]
payload = {
'name': name,
'icon': 'home',
'triggers': [{
'type': 'scenario.trigger.voice',
'value': name
}],
'requested_speaker_capabilities': speaker,
'devices': [{
'id': self.hass_id,
'capabilities': [{
'type': 'devices.capabilities.range',
'state': {
'instance': 'volume',
'relative': False,
'value': num
}
}]
}]
}
r = await self.session.post(f"{URL_USER}/scenarios", json=payload)
resp = await r.json()
assert resp['status'] == 'ok', resp
async def send(self, device: dict, text: str, is_tts: bool = False):
"""Запускает сценарий на выполнение команды или TTS."""
# skip send for yandex modules
if "scenario_id" not in device:
return
_LOGGER.debug(f"{device['name']} => cloud | {text}")
action = 'phrase_action' if is_tts else 'text_action'
name = encode(device['id'])
payload = {
'name': name,
'icon': 'home',
'triggers': [{
'type': 'scenario.trigger.voice',
'value': name[3:]
}],
'requested_speaker_capabilities': [],
'devices': [{
'id': device['id'],
'capabilities': [{
'type': 'devices.capabilities.quasar.server_action',
'state': {
'instance': action,
'value': text
}
}]
}]
}
sid = device['scenario_id']
r = await self.session.put(f"{URL_USER}/scenarios/{sid}", json=payload)
resp = await r.json()
assert resp['status'] == 'ok', resp
r = await self.session.post(f"{URL_USER}/scenarios/{sid}/actions")
resp = await r.json()
assert resp['status'] == 'ok', resp
async def load_local_speakers(self):
"""Загружает список локальных колонок. Не используется."""
try:
r = await self.session.get(
'https://quasar.yandex.net/glagol/device_list')
resp = await r.json()
return [{
'device_id': d['id'],
'name': d['name'],
'platform': d['platform']
} for d in resp['devices']]
except:
_LOGGER.exception("Load local speakers")
return None
async def get_device_config(self, device: dict) -> dict:
payload = {'device_id': device['quasar_info']['device_id'],
'platform': device['quasar_info']['platform']}
r = await self.session.get(
'https://quasar.yandex.ru/get_device_config', params=payload
)
resp = await r.json()
assert resp['status'] == 'ok', resp
return resp['config']
async def set_device_config(self, device: dict, device_config: dict):
_LOGGER.debug(f"Меняем конфиг станции: {device_config}")
payload = {'device_id': device['quasar_info']['device_id'],
'platform': device['quasar_info']['platform']}
r = await self.session.post(
'https://quasar.yandex.ru/set_device_config', params=payload,
json=device_config
)
resp = await r.json()
assert resp['status'] == 'ok', resp
async def get_device(self, deviceid: str):
r = await self.session.get(f"{URL_USER}/devices/{deviceid}")
resp = await r.json()
assert resp['status'] == 'ok', resp
return resp
async def device_action(self, deviceid: str, **kwargs):
_LOGGER.debug(f"Device action: {kwargs}")
actions = []
for k, v in kwargs.items():
type_ = (
'devices.capabilities.custom.button'
if k.isdecimal() else IOT_TYPES[k]
)
state = (
{'instance': k, 'value': v, 'relative': True}
if k in ('volume', 'channel')
else {'instance': k, 'value': v}
)
actions.append({'type': type_, 'state': state})
r = await self.session.post(
f"{URL_USER}/devices/{deviceid}/actions", json={'actions': actions}
)
resp = await r.json()
assert resp['status'] == 'ok', resp
async def update_online_stats(self):
if not self.online_updated.is_set():
await self.online_updated.wait()
return
self.online_updated.clear()
# _LOGGER.debug(f"Update speakers online status")
try:
r = await self.session.get(
'https://quasar.yandex.ru/devices_online_stats')
resp = await r.json()
assert resp['status'] == 'ok', resp
except:
return
finally:
self.online_updated.set()
for speaker in resp['items']:
for device in self.devices:
if 'quasar_info' not in device or \
device['quasar_info']['device_id'] != speaker['id']:
continue
device["online"] = speaker["online"]
break
async def _updates_connection(self, handler):
r = await self.session.get(
'https://iot.quasar.yandex.ru/m/v3/user/devices'
)
resp = await r.json()
assert resp['status'] == 'ok', resp
ws = await self.session.ws_connect(resp['updates_url'], heartbeat=60)
_LOGGER.debug("Start quasar updates connection")
async for msg in ws:
if msg.type != WSMsgType.TEXT:
break
resp = msg.json()
# "ping", "update_scenario_list"
if resp.get("operation") != "update_states":
continue
try:
resp = json.loads(resp['message'])
for upd in resp['updated_devices']:
if not upd.get('capabilities'):
continue
for cap in upd['capabilities']:
state = cap.get('state')
if not state:
continue
if cap['type'] == \
'devices.capabilities.quasar.server_action':
for speaker in self.speakers:
if speaker['id'] == upd['id']:
entity = speaker.get('entity')
if not entity:
break
state['entity_id'] = entity.entity_id
state['name'] = entity.name
await handler(state)
break
except:
_LOGGER.debug(f"Parse quasar update error: {msg.data}")
async def _updates_loop(self, handler):
while True:
try:
await self._updates_connection(handler)
except Exception as e:
_LOGGER.debug(f"Quasar update error: {e}")
await asyncio.sleep(30)
def handle_updates(self, handler):
self.updates_task = asyncio.create_task(self._updates_loop(handler))
def stop(self):
if self.updates_task:
self.updates_task.cancel()
async def set_account_config(self, key: str, value):
kv = ACCOUNT_CONFIG.get(key)
assert kv and value in kv['values'], f"{key}={value}"
if kv.get("api") == "user/settings":
# https://iot.quasar.yandex.ru/m/user/settings
r = await self.session.post(URL_USER + "/settings", json={
kv["key"]: kv["values"][value]
})
else:
r = await self.session.get(
'https://quasar.yandex.ru/get_account_config'
)
resp = await r.json()
assert resp['status'] == 'ok', resp
payload: dict = resp['config']
payload[kv['key']] = kv['values'][value]
r = await self.session.post(
'https://quasar.yandex.ru/set_account_config', json=payload
)
resp = await r.json()
assert resp['status'] == 'ok', resp
BOOL_CONFIG = {'да': True, 'нет': False}
ACCOUNT_CONFIG = {
'без лишних слов': {
'api': 'user/settings',
'key': 'iot',
'values': {
'да': {'response_reaction_type': 'sound'},
'нет': {'response_reaction_type': 'nlg'},
}
},
'ответить шепотом': {
'api': 'user/settings',
'key': 'tts_whisper',
'values': BOOL_CONFIG
},
'звук активации': {
'key': 'jingle', # /get_account_config
'values': BOOL_CONFIG
},
'одним устройством': {
'key': 'smartActivation', # /get_account_config
'values': BOOL_CONFIG
},
'понимать детей': {
'key': 'useBiometryChildScoring', # /get_account_config
'values': BOOL_CONFIG
},
'рассказывать о навыках': {
'key': 'aliceProactivity', # /get_account_config
'values': BOOL_CONFIG
},
'взрослый голос': {
'key': 'contentAccess', # /get_account_config
'values': {
'умеренный': 'medium',
'семейный': 'children',
'безопасный': 'safe',
'без ограничений': 'without',
}
},
'детский голос': {
'key': 'childContentAccess', # /get_account_config
'values': {
'безопасный': 'safe',
'семейный': 'children',
}
},
'имя': {
'key': 'spotter', # /get_account_config
'values': {
'алиса': 'alisa',
'яндекс': 'yandex',
}
},
}
| [
"logging.getLogger",
"json.loads",
"asyncio.Event",
"asyncio.sleep"
] | [((127, 154), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (144, 154), False, 'import logging\n'), ((1696, 1711), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (1709, 1711), False, 'import asyncio\n'), ((11587, 11614), 'json.loads', 'json.loads', (["resp['message']"], {}), "(resp['message'])\n", (11597, 11614), False, 'import json\n'), ((12913, 12930), 'asyncio.sleep', 'asyncio.sleep', (['(30)'], {}), '(30)\n', (12926, 12930), False, 'import asyncio\n')] |
from django.test import TestCase
from base.repository import BaseRepository
from film import models, repositories
class RepositoryTest(TestCase):
def test_movie_repository_class(self):
repo = repositories.MovieRepository()
self.assertEqual(type(models.Movie), type(repo.get_model()))
def test_series_repository_class(self):
repo = repositories.SeriesRepository()
self.assertEqual(type(models.Series), type(repo.get_model()))
| [
"film.repositories.MovieRepository",
"film.repositories.SeriesRepository"
] | [((206, 236), 'film.repositories.MovieRepository', 'repositories.MovieRepository', ([], {}), '()\n', (234, 236), False, 'from film import models, repositories\n'), ((366, 397), 'film.repositories.SeriesRepository', 'repositories.SeriesRepository', ([], {}), '()\n', (395, 397), False, 'from film import models, repositories\n')] |
import logging
import random
import time
from django.conf import settings
from django_aws_xray import records, xray
class XRayMiddleware:
def __init__(self, get_response):
self.get_response = get_response
self.sampling_rate = getattr(settings, 'AWS_XRAY_SAMPLING_RATE', 100)
self.exclude_paths = getattr(settings, 'AWS_XRAY_EXCLUDE_PATHS', [])
self.logger = logging.getLogger(__name__)
def __call__(self, request):
trace = self._create_trace(request)
# Set the thread local trace object
xray.set_current_trace(trace)
with trace.track('django.request') as record:
response = self.get_response(request)
record.http = self._create_http_record(request, response)
# Send out the traces
trace.send()
# Set the HTTP header
response['X-Amzn-Trace-Id'] = trace.http_header
# Cleanup the thread local trace object
xray.set_current_trace(None)
return response
def _create_trace(self, request):
# Decide if we need to sample this request
sampled = random.randint(0, 100) <= self.sampling_rate
for path in self.exclude_paths:
if request.path.startswith(path):
sampled = False
trace_header = request.META.get('HTTP_X_AMZN_TRACE_ID')
if trace_header:
trace = xray.Trace.from_http_header(trace_header, sampled)
else:
trace = xray.Trace.generate_new(sampled)
return trace
def _create_http_record(self, request, response):
return records.HttpRecord(
request_method=request.method,
request_url=request.get_full_path(),
request_user_agent=request.META.get('User-Agent'),
response_status_code=response.status_code)
| [
"logging.getLogger",
"django_aws_xray.xray.Trace.generate_new",
"django_aws_xray.xray.Trace.from_http_header",
"django_aws_xray.xray.set_current_trace",
"random.randint"
] | [((398, 425), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (415, 425), False, 'import logging\n'), ((557, 586), 'django_aws_xray.xray.set_current_trace', 'xray.set_current_trace', (['trace'], {}), '(trace)\n', (579, 586), False, 'from django_aws_xray import records, xray\n'), ((958, 986), 'django_aws_xray.xray.set_current_trace', 'xray.set_current_trace', (['None'], {}), '(None)\n', (980, 986), False, 'from django_aws_xray import records, xray\n'), ((1120, 1142), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1134, 1142), False, 'import random\n'), ((1393, 1443), 'django_aws_xray.xray.Trace.from_http_header', 'xray.Trace.from_http_header', (['trace_header', 'sampled'], {}), '(trace_header, sampled)\n', (1420, 1443), False, 'from django_aws_xray import records, xray\n'), ((1478, 1510), 'django_aws_xray.xray.Trace.generate_new', 'xray.Trace.generate_new', (['sampled'], {}), '(sampled)\n', (1501, 1510), False, 'from django_aws_xray import records, xray\n')] |
import os
import uuid
from flask import Flask, render_template, request, url_for, send_from_directory, flash
from flask_caching import Cache
from flask_service.service_predictor import SatellitePredictor
app = Flask(__name__)
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
app.config['UPLOAD_FOLDER'] = os.path.join(os.getcwd(), 'uploads')
app.config['RESULTS_FOLDER'] = os.path.join(os.getcwd(), 'results')
service = SatellitePredictor()
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
@app.route('/results/<filename>')
def results_file(filename):
return send_from_directory(app.config['RESULTS_FOLDER'],
filename)
# 设置允许的文件格式
ALLOWED_EXTENSIONS = ['png', 'jpg', 'JPG', 'PNG', 'bmp']
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
file = request.files['file']
if not file.filename:
flash('未提交图片!请选择一张下列格式的图片提交:{}'.format(ALLOWED_EXTENSIONS))
return render_template("index.html")
elif not allowed_file(file.filename):
flash('提交的图片不合法!支持的图片格式有:{}'.format(ALLOWED_EXTENSIONS))
return render_template("index.html")
threshold = request.form.get("threshold", "0.8")
threshold = float(threshold) if threshold else 0.8
top_k = request.form.get("top_k", "1")
top_k = int(top_k) if top_k else 1
# 获取缓存
cached_res = cache.get("{}:{}:{}".format(top_k, threshold, file.filename))
if cached_res:
source_img_url, result_img_url = cached_res.split(':')
cache.set(file.filename, "{}:{}".format(source_img_url, result_img_url))
return render_template("index.html", source_img=source_img_url, res_img=result_img_url)
_image_name = "{}.png".format(uuid.uuid4())
upload_path = os.path.join(app.config['UPLOAD_FOLDER'], _image_name)
file.save(upload_path)
source_img_url = url_for('uploaded_file', filename=_image_name)
result_image = service.predict(upload_path, threshold)
result_img_url = url_for('results_file', filename=result_image)
# 设置缓存
cache.set("{}:{}:{}".format(top_k, threshold, file.filename), "{}:{}".format(source_img_url, result_img_url))
return render_template("index.html", source_img=source_img_url, res_img=result_img_url)
else:
return render_template("index.html",
source_img="/uploads/4c510a77-1777-48c7-878a-0ef93c2438d8.png",
res_img="/results/4c510a77-1777-48c7-878a-0ef93c2438d8.png")
if __name__ == "__main__":
app.run(port=8009, host='0.0.0.0')
| [
"flask.render_template",
"flask_service.service_predictor.SatellitePredictor",
"flask.send_from_directory",
"flask.Flask",
"os.path.join",
"os.getcwd",
"flask.request.form.get",
"flask.url_for",
"flask_caching.Cache",
"uuid.uuid4"
] | [((213, 228), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (218, 228), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((237, 280), 'flask_caching.Cache', 'Cache', (['app'], {'config': "{'CACHE_TYPE': 'simple'}"}), "(app, config={'CACHE_TYPE': 'simple'})\n", (242, 280), False, 'from flask_caching import Cache\n'), ((468, 488), 'flask_service.service_predictor.SatellitePredictor', 'SatellitePredictor', ([], {}), '()\n', (486, 488), False, 'from flask_service.service_predictor import SatellitePredictor\n'), ((365, 376), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (374, 376), False, 'import os\n'), ((433, 444), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (442, 444), False, 'import os\n'), ((565, 623), 'flask.send_from_directory', 'send_from_directory', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (584, 623), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((730, 789), 'flask.send_from_directory', 'send_from_directory', (["app.config['RESULTS_FOLDER']", 'filename'], {}), "(app.config['RESULTS_FOLDER'], filename)\n", (749, 789), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((1464, 1500), 'flask.request.form.get', 'request.form.get', (['"""threshold"""', '"""0.8"""'], {}), "('threshold', '0.8')\n", (1480, 1500), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((1576, 1606), 'flask.request.form.get', 'request.form.get', (['"""top_k"""', '"""1"""'], {}), "('top_k', '1')\n", (1592, 1606), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((2099, 2153), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", '_image_name'], {}), "(app.config['UPLOAD_FOLDER'], _image_name)\n", (2111, 2153), False, 'import os\n'), ((2210, 2256), 'flask.url_for', 'url_for', (['"""uploaded_file"""'], {'filename': '_image_name'}), "('uploaded_file', filename=_image_name)\n", (2217, 2256), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((2345, 2391), 'flask.url_for', 'url_for', (['"""results_file"""'], {'filename': 'result_image'}), "('results_file', filename=result_image)\n", (2352, 2391), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((2542, 2627), 'flask.render_template', 'render_template', (['"""index.html"""'], {'source_img': 'source_img_url', 'res_img': 'result_img_url'}), "('index.html', source_img=source_img_url, res_img=result_img_url\n )\n", (2557, 2627), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((2648, 2812), 'flask.render_template', 'render_template', (['"""index.html"""'], {'source_img': '"""/uploads/4c510a77-1777-48c7-878a-0ef93c2438d8.png"""', 'res_img': '"""/results/4c510a77-1777-48c7-878a-0ef93c2438d8.png"""'}), "('index.html', source_img=\n '/uploads/4c510a77-1777-48c7-878a-0ef93c2438d8.png', res_img=\n '/results/4c510a77-1777-48c7-878a-0ef93c2438d8.png')\n", (2663, 2812), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((1250, 1279), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1265, 1279), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((1943, 2028), 'flask.render_template', 'render_template', (['"""index.html"""'], {'source_img': 'source_img_url', 'res_img': 'result_img_url'}), "('index.html', source_img=source_img_url, res_img=result_img_url\n )\n", (1958, 2028), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n'), ((2063, 2075), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2073, 2075), False, 'import uuid\n'), ((1414, 1443), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1429, 1443), False, 'from flask import Flask, render_template, request, url_for, send_from_directory, flash\n')] |
#!/usr/bin/env python3
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import pytest
import torch
import poptorch
import helpers
def slice_test_harness(tensor_x, tensor_y, start_fn, end_fn, step):
op = lambda x, y: x[start_fn(x):end_fn(x):step] + y
model = helpers.ModelWithWeights(op, tensor_x.shape)
# Run on CPU.
native_out, _ = model((tensor_x, tensor_y))
# Run on IPU.
poptorch_model = poptorch.trainingModel(model)
poptorch_out, _ = poptorch_model((tensor_x, tensor_y))
# Inference test - check outputs
helpers.assert_allclose(expected=native_out, actual=poptorch_out)
# Training test - check weights changed
poptorch_model.assert_weights_changed()
@pytest.mark.parametrize("step", [1, 2, 3])
def test_slice_idx_size_of(step):
def start_fn(tensor_in):
return tensor_in.shape[0] // 2
def end_fn(tensor_in):
return tensor_in.shape[0] - 1
slice_test_harness(torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([3.0]), start_fn, end_fn, step)
@pytest.mark.parametrize("step", [1, 2, 3])
def test_slice_with_sum(step):
def start_fn(tensor_in):
del tensor_in
return torch.sum(torch.tensor([1, 2, 3])) // 3 - 2
def end_fn(tensor_in):
del tensor_in
return torch.sum(torch.tensor([1, 2, 3])) // 3 + 1
slice_test_harness(torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([-3.0]), start_fn, end_fn, step)
@pytest.mark.parametrize("step", [1, 2, 3])
def test_slice_with_branch(step):
def start_fn(tensor_in):
del tensor_in
a = torch.sum(torch.tensor([1, 2, 3])) // 3 - 2
b = torch.sum(torch.tensor([3, 4, 5])) // 3 - 4
return a + b + 1
def end_fn(tensor_in):
del tensor_in
a = torch.sum(torch.tensor([3, 2, 1])) // 3 + 2
b = torch.sum(torch.tensor([3, 4, 5])) // 3 + 1
return a - 1 + b
slice_test_harness(torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([-3.0]), start_fn, end_fn, step)
def dynamic_slice_harness(tensor_in,
extra_in,
start_fn,
end_fn,
step,
test_training=True):
if test_training:
op = lambda t: t[start_fn(extra_in):end_fn(extra_in):step]
model = helpers.ModelWithWeights(op, tensor_in.shape)
# Run on CPU.
native_out, _ = model((tensor_in, ))
# Run on IPU.
poptorch_model = poptorch.trainingModel(model)
poptorch_out, _ = poptorch_model((tensor_in, ))
# Training test - check weights changed
poptorch_model.assert_weights_changed()
else:
model = torch.nn.Module()
model.forward = lambda t, e: t[start_fn(e):end_fn(e):step]
# Run on CPU.
native_out = model(tensor_in, extra_in)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(tensor_in, extra_in)
helpers.assert_allclose(expected=native_out, actual=poptorch_out)
@pytest.mark.parametrize("step", [1, 2, 3])
def test_dynamic_slice_one_dim_add(step):
def start_fn(extra_in):
return extra_in
def end_fn(extra_in):
return extra_in + 4
dynamic_slice_harness(
torch.tensor([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([1]), start_fn, end_fn, step)
@pytest.mark.parametrize("step", [1, 2, 3])
def test_dynamic_slice_one_dim_subtract(step):
def start_fn(extra_in):
return extra_in - 4
def end_fn(extra_in):
return extra_in
dynamic_slice_harness(
torch.tensor([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([5]), start_fn, end_fn, step)
@pytest.mark.parametrize("step", [1, 2, 3])
def test_dynamic_slice_one_dim_mix_up(step):
def start_fn(extra_in):
tmp = extra_in + 3
tmp = tmp - 10
tmp = tmp + 3
return tmp
def end_fn(extra_in):
tmp = extra_in - 6
tmp = tmp + 4
return tmp
dynamic_slice_harness(
torch.tensor([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([5]), start_fn, end_fn, step)
@pytest.mark.parametrize("step", [1, 2, 3])
def test_dynamic_slice_two_dims(step):
def start_fn(extra_in):
return extra_in.to(torch.int32)
def end_fn(extra_in):
return extra_in.to(torch.int32) + 1
dynamic_slice_harness(
torch.tensor([[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
[8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]]),
torch.tensor([0]), start_fn, end_fn, step)
@pytest.mark.parametrize("step", [1, 2, 3])
def test_dynamic_slice_two_dims_twice_sliced(step):
start_dim_one = torch.tensor([1])
start_dim_two = torch.tensor([0])
op = lambda t: t[start_dim_one:start_dim_one + 2:step, start_dim_two:
start_dim_two + 4:step]
tensor_in = torch.tensor([[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
[8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0],
[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
[8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]])
model = helpers.ModelWithWeights(op, tensor_in.shape)
# Run on CPU.
native_out, _ = model((tensor_in, ))
# Run on IPU.
poptorch_model = poptorch.trainingModel(model)
poptorch_out, _ = poptorch_model((tensor_in, ))
# Inference test - check outputs
helpers.assert_allclose(expected=native_out, actual=poptorch_out)
# Training test - check weights changed
poptorch_model.assert_weights_changed()
def test_dynamic_slice_one_dim_equal():
def start_fn(extra_in):
return extra_in
def end_fn(extra_in):
return extra_in
error_msg = r"The start and end of a slice must be different."
with pytest.raises(poptorch.Error, match=error_msg):
# Set test_training=False because we expect inference to fail
dynamic_slice_harness(torch.tensor(
[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([5]),
start_fn,
end_fn,
1,
test_training=False)
def test_dynamic_slice_one_dim_less_than():
def start_fn(extra_in):
return extra_in
def end_fn(extra_in):
return extra_in - 2
error_msg = (r"Taking a slice of a tensor with the end less than the " +
r"start is not supported.")
with pytest.raises(poptorch.Error, match=error_msg):
# Set test_training=False because we expect inference to fail
dynamic_slice_harness(torch.tensor(
[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([5]),
start_fn,
end_fn,
2,
test_training=False)
def test_dynamic_slice_one_dim_multiply():
def start_fn(extra_in):
return extra_in
def end_fn(extra_in):
return extra_in * 2
error_msg = (
r"The size of the sliced tensor must be a constant for each " +
r"execution of the model when running on the IPU\.")
with pytest.raises(poptorch.Error, match=error_msg):
# Set test_training=False because we expect inference to fail
dynamic_slice_harness(torch.tensor(
[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([5]),
start_fn,
end_fn,
3,
test_training=False)
def test_dynamic_slice_one_dim_add_non_factor():
def start_fn(extra_in):
return extra_in
def end_fn(extra_in):
return extra_in + 7
error_msg = (r"The size of the slice \(7\) must be a factor of the " +
r"slicing dimension \(8\)\.")
with pytest.raises(poptorch.Error, match=error_msg):
# Set test_training=False because we expect inference to fail
dynamic_slice_harness(torch.tensor(
[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([1]),
start_fn,
end_fn,
1,
test_training=False)
def test_dynamic_slice_one_dim_mix_up_float():
def start_fn(extra_in):
tmp = extra_in + 3
tmp = tmp - 10.5
tmp = tmp + 3.5
return tmp.to(torch.int32)
def end_fn(extra_in):
tmp = extra_in - 6.5
tmp = tmp + 4.5
return tmp.to(torch.int32)
error_msg = (
r"The size of the sliced tensor must be a constant for each " +
r"execution of the model when running on the IPU\. In this case, " +
r"there is a float added to the slice indices meaning it may change " +
r"between runs\.")
with pytest.raises(poptorch.Error, match=error_msg):
# Set test_training=False because we expect inference to fail
dynamic_slice_harness(torch.tensor(
[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
torch.tensor([5]),
start_fn,
end_fn,
2,
test_training=False)
@pytest.mark.parametrize("dim", [0, 1, 2])
def test_unbind(dim):
op = lambda x: torch.unbind(x, dim)
x = torch.randn(2, 3, 4)
model = helpers.ModelWithWeights(op, x.shape, out_fn=lambda x: x[0])
poptorch_model = poptorch.trainingModel(model)
native_out, _ = model((x, ))
poptorch_out, _ = poptorch_model((x, ))
# Check the unbound dim length is the same
assert len(native_out) == len(poptorch_out)
# Inference test - check outputs
for tensor_native, tensor_pop in zip(native_out, poptorch_out):
helpers.assert_allclose(expected=tensor_native, actual=tensor_pop)
# Training test - check weights changed
poptorch_model.assert_weights_changed()
def test_scalarslice():
class Model(torch.nn.Module):
def forward(self, x):
return (x / 2)[:]
model = Model()
poptorch_model = poptorch.inferenceModel(model)
input_tensor = torch.tensor([2])
assert poptorch_model(input_tensor) == model(input_tensor)
def test_dynamic_length_slice():
class Model(torch.nn.Module):
def forward(self, x, l):
return x[l:]
model = Model()
poptorch_model = poptorch.inferenceModel(model)
inp, l = torch.rand(10, 10), torch.LongTensor([2])
error_msg = (
r"The size of the sliced tensor must be a constant for each " +
r"execution of the model when running on the IPU\.")
with pytest.raises(poptorch.Error, match=error_msg):
# Set test_training=False because we expect inference to fail
poptorch_model(inp, l)
| [
"helpers.ModelWithWeights",
"torch.LongTensor",
"torch.unbind",
"torch.tensor",
"pytest.mark.parametrize",
"helpers.assert_allclose",
"poptorch.inferenceModel",
"poptorch.trainingModel",
"torch.nn.Module",
"pytest.raises",
"torch.randn",
"torch.rand"
] | [((720, 762), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""step"""', '[1, 2, 3]'], {}), "('step', [1, 2, 3])\n", (743, 762), False, 'import pytest\n'), ((1082, 1124), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""step"""', '[1, 2, 3]'], {}), "('step', [1, 2, 3])\n", (1105, 1124), False, 'import pytest\n'), ((1527, 1569), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""step"""', '[1, 2, 3]'], {}), "('step', [1, 2, 3])\n", (1550, 1569), False, 'import pytest\n'), ((3197, 3239), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""step"""', '[1, 2, 3]'], {}), "('step', [1, 2, 3])\n", (3220, 3239), False, 'import pytest\n'), ((3535, 3577), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""step"""', '[1, 2, 3]'], {}), "('step', [1, 2, 3])\n", (3558, 3577), False, 'import pytest\n'), ((3878, 3920), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""step"""', '[1, 2, 3]'], {}), "('step', [1, 2, 3])\n", (3901, 3920), False, 'import pytest\n'), ((4327, 4369), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""step"""', '[1, 2, 3]'], {}), "('step', [1, 2, 3])\n", (4350, 4369), False, 'import pytest\n'), ((4760, 4802), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""step"""', '[1, 2, 3]'], {}), "('step', [1, 2, 3])\n", (4783, 4802), False, 'import pytest\n'), ((9625, 9666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', '[0, 1, 2]'], {}), "('dim', [0, 1, 2])\n", (9648, 9666), False, 'import pytest\n'), ((279, 323), 'helpers.ModelWithWeights', 'helpers.ModelWithWeights', (['op', 'tensor_x.shape'], {}), '(op, tensor_x.shape)\n', (303, 323), False, 'import helpers\n'), ((431, 460), 'poptorch.trainingModel', 'poptorch.trainingModel', (['model'], {}), '(model)\n', (453, 460), False, 'import poptorch\n'), ((562, 627), 'helpers.assert_allclose', 'helpers.assert_allclose', ([], {'expected': 'native_out', 'actual': 'poptorch_out'}), '(expected=native_out, actual=poptorch_out)\n', (585, 627), False, 'import helpers\n'), ((3128, 3193), 'helpers.assert_allclose', 'helpers.assert_allclose', ([], {'expected': 'native_out', 'actual': 'poptorch_out'}), '(expected=native_out, actual=poptorch_out)\n', (3151, 3193), False, 'import helpers\n'), ((4875, 4892), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (4887, 4892), False, 'import torch\n'), ((4913, 4930), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (4925, 4930), False, 'import torch\n'), ((5068, 5259), 'torch.tensor', 'torch.tensor', (['[[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], [8.0, 7.0, 6.0, 5.0, 4.0, 3.0, \n 2.0, 1.0], [2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], [8.0, 7.0, 6.0, \n 5.0, 4.0, 3.0, 2.0, 1.0]]'], {}), '([[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], [8.0, 7.0, 6.0, 5.0,\n 4.0, 3.0, 2.0, 1.0], [2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], [8.0, \n 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]])\n', (5080, 5259), False, 'import torch\n'), ((5354, 5399), 'helpers.ModelWithWeights', 'helpers.ModelWithWeights', (['op', 'tensor_in.shape'], {}), '(op, tensor_in.shape)\n', (5378, 5399), False, 'import helpers\n'), ((5500, 5529), 'poptorch.trainingModel', 'poptorch.trainingModel', (['model'], {}), '(model)\n', (5522, 5529), False, 'import poptorch\n'), ((5624, 5689), 'helpers.assert_allclose', 'helpers.assert_allclose', ([], {'expected': 'native_out', 'actual': 'poptorch_out'}), '(expected=native_out, actual=poptorch_out)\n', (5647, 5689), False, 'import helpers\n'), ((9737, 9757), 'torch.randn', 'torch.randn', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (9748, 9757), False, 'import torch\n'), ((9770, 9830), 'helpers.ModelWithWeights', 'helpers.ModelWithWeights', (['op', 'x.shape'], {'out_fn': '(lambda x: x[0])'}), '(op, x.shape, out_fn=lambda x: x[0])\n', (9794, 9830), False, 'import helpers\n'), ((9852, 9881), 'poptorch.trainingModel', 'poptorch.trainingModel', (['model'], {}), '(model)\n', (9874, 9881), False, 'import poptorch\n'), ((10488, 10518), 'poptorch.inferenceModel', 'poptorch.inferenceModel', (['model'], {}), '(model)\n', (10511, 10518), False, 'import poptorch\n'), ((10539, 10556), 'torch.tensor', 'torch.tensor', (['[2]'], {}), '([2])\n', (10551, 10556), False, 'import torch\n'), ((10789, 10819), 'poptorch.inferenceModel', 'poptorch.inferenceModel', (['model'], {}), '(model)\n', (10812, 10819), False, 'import poptorch\n'), ((955, 1009), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (967, 1009), False, 'import torch\n'), ((1034, 1053), 'torch.tensor', 'torch.tensor', (['[3.0]'], {}), '([3.0])\n', (1046, 1053), False, 'import torch\n'), ((1399, 1453), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (1411, 1453), False, 'import torch\n'), ((1478, 1498), 'torch.tensor', 'torch.tensor', (['[-3.0]'], {}), '([-3.0])\n', (1490, 1498), False, 'import torch\n'), ((2003, 2057), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (2015, 2057), False, 'import torch\n'), ((2082, 2102), 'torch.tensor', 'torch.tensor', (['[-3.0]'], {}), '([-3.0])\n', (2094, 2102), False, 'import torch\n'), ((2458, 2503), 'helpers.ModelWithWeights', 'helpers.ModelWithWeights', (['op', 'tensor_in.shape'], {}), '(op, tensor_in.shape)\n', (2482, 2503), False, 'import helpers\n'), ((2620, 2649), 'poptorch.trainingModel', 'poptorch.trainingModel', (['model'], {}), '(model)\n', (2642, 2649), False, 'import poptorch\n'), ((2829, 2846), 'torch.nn.Module', 'torch.nn.Module', ([], {}), '()\n', (2844, 2846), False, 'import torch\n'), ((3033, 3063), 'poptorch.inferenceModel', 'poptorch.inferenceModel', (['model'], {}), '(model)\n', (3056, 3063), False, 'import poptorch\n'), ((3425, 3479), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (3437, 3479), False, 'import torch\n'), ((3489, 3506), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (3501, 3506), False, 'import torch\n'), ((3768, 3822), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (3780, 3822), False, 'import torch\n'), ((3832, 3849), 'torch.tensor', 'torch.tensor', (['[5]'], {}), '([5])\n', (3844, 3849), False, 'import torch\n'), ((4217, 4271), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (4229, 4271), False, 'import torch\n'), ((4281, 4298), 'torch.tensor', 'torch.tensor', (['[5]'], {}), '([5])\n', (4293, 4298), False, 'import torch\n'), ((4584, 4686), 'torch.tensor', 'torch.tensor', (['[[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], [8.0, 7.0, 6.0, 5.0, 4.0, 3.0, \n 2.0, 1.0]]'], {}), '([[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], [8.0, 7.0, 6.0, 5.0,\n 4.0, 3.0, 2.0, 1.0]])\n', (4596, 4686), False, 'import torch\n'), ((4714, 4731), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (4726, 4731), False, 'import torch\n'), ((6002, 6048), 'pytest.raises', 'pytest.raises', (['poptorch.Error'], {'match': 'error_msg'}), '(poptorch.Error, match=error_msg)\n', (6015, 6048), False, 'import pytest\n'), ((6716, 6762), 'pytest.raises', 'pytest.raises', (['poptorch.Error'], {'match': 'error_msg'}), '(poptorch.Error, match=error_msg)\n', (6729, 6762), False, 'import pytest\n'), ((7458, 7504), 'pytest.raises', 'pytest.raises', (['poptorch.Error'], {'match': 'error_msg'}), '(poptorch.Error, match=error_msg)\n', (7471, 7504), False, 'import pytest\n'), ((8177, 8223), 'pytest.raises', 'pytest.raises', (['poptorch.Error'], {'match': 'error_msg'}), '(poptorch.Error, match=error_msg)\n', (8190, 8223), False, 'import pytest\n'), ((9194, 9240), 'pytest.raises', 'pytest.raises', (['poptorch.Error'], {'match': 'error_msg'}), '(poptorch.Error, match=error_msg)\n', (9207, 9240), False, 'import pytest\n'), ((9708, 9728), 'torch.unbind', 'torch.unbind', (['x', 'dim'], {}), '(x, dim)\n', (9720, 9728), False, 'import torch\n'), ((10170, 10236), 'helpers.assert_allclose', 'helpers.assert_allclose', ([], {'expected': 'tensor_native', 'actual': 'tensor_pop'}), '(expected=tensor_native, actual=tensor_pop)\n', (10193, 10236), False, 'import helpers\n'), ((10833, 10851), 'torch.rand', 'torch.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (10843, 10851), False, 'import torch\n'), ((10853, 10874), 'torch.LongTensor', 'torch.LongTensor', (['[2]'], {}), '([2])\n', (10869, 10874), False, 'import torch\n'), ((11037, 11083), 'pytest.raises', 'pytest.raises', (['poptorch.Error'], {'match': 'error_msg'}), '(poptorch.Error, match=error_msg)\n', (11050, 11083), False, 'import pytest\n'), ((6150, 6204), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (6162, 6204), False, 'import torch\n'), ((6249, 6266), 'torch.tensor', 'torch.tensor', (['[5]'], {}), '([5])\n', (6261, 6266), False, 'import torch\n'), ((6864, 6918), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (6876, 6918), False, 'import torch\n'), ((6963, 6980), 'torch.tensor', 'torch.tensor', (['[5]'], {}), '([5])\n', (6975, 6980), False, 'import torch\n'), ((7606, 7660), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (7618, 7660), False, 'import torch\n'), ((7705, 7722), 'torch.tensor', 'torch.tensor', (['[5]'], {}), '([5])\n', (7717, 7722), False, 'import torch\n'), ((8325, 8379), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (8337, 8379), False, 'import torch\n'), ((8424, 8441), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (8436, 8441), False, 'import torch\n'), ((9342, 9396), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([2.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (9354, 9396), False, 'import torch\n'), ((9441, 9458), 'torch.tensor', 'torch.tensor', (['[5]'], {}), '([5])\n', (9453, 9458), False, 'import torch\n'), ((1232, 1255), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1244, 1255), False, 'import torch\n'), ((1341, 1364), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1353, 1364), False, 'import torch\n'), ((1677, 1700), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1689, 1700), False, 'import torch\n'), ((1733, 1756), 'torch.tensor', 'torch.tensor', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (1745, 1756), False, 'import torch\n'), ((1864, 1887), 'torch.tensor', 'torch.tensor', (['[3, 2, 1]'], {}), '([3, 2, 1])\n', (1876, 1887), False, 'import torch\n'), ((1920, 1943), 'torch.tensor', 'torch.tensor', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (1932, 1943), False, 'import torch\n')] |
import json
import click
from tqdm import tqdm
from discopy.parsers.pipeline import ParserPipeline
from discopy.utils import init_logger
from discopy_data.data.doc import Document
from discopy_data.nn.bert import get_sentence_embedder
@click.command()
@click.argument('bert-model', type=str)
@click.argument('model-path', type=str)
@click.option('-i', '--src', default='-', type=click.File('r'))
@click.option('-o', '--tgt', default='-', type=click.File('w'))
@click.option('-l', '--limit', default=0, type=int)
def main(bert_model, model_path, src, tgt, limit):
logger = init_logger()
logger.info('Init Parser...')
get_sentence_embeddings = get_sentence_embedder(bert_model)
parser = ParserPipeline.from_config(model_path)
parser.load(model_path)
logger.info('Load pre-trained Parser...')
for line_i, line in tqdm(enumerate(src)):
if limit and line_i >= limit:
break
doc = Document.from_json(json.loads(line))
if len(doc.sentences) == 0:
continue
for sent_i, sent in enumerate(doc.sentences):
sent_words = sent.tokens
embeddings = get_sentence_embeddings(sent_words)
doc.sentences[sent_i].embeddings = embeddings
doc = parser(doc)
tgt.write(json.dumps(doc.to_json()) + '\n')
if __name__ == '__main__':
main()
| [
"click.argument",
"json.loads",
"click.option",
"click.File",
"discopy.parsers.pipeline.ParserPipeline.from_config",
"click.command",
"discopy_data.nn.bert.get_sentence_embedder",
"discopy.utils.init_logger"
] | [((240, 255), 'click.command', 'click.command', ([], {}), '()\n', (253, 255), False, 'import click\n'), ((257, 295), 'click.argument', 'click.argument', (['"""bert-model"""'], {'type': 'str'}), "('bert-model', type=str)\n", (271, 295), False, 'import click\n'), ((297, 335), 'click.argument', 'click.argument', (['"""model-path"""'], {'type': 'str'}), "('model-path', type=str)\n", (311, 335), False, 'import click\n'), ((465, 515), 'click.option', 'click.option', (['"""-l"""', '"""--limit"""'], {'default': '(0)', 'type': 'int'}), "('-l', '--limit', default=0, type=int)\n", (477, 515), False, 'import click\n'), ((580, 593), 'discopy.utils.init_logger', 'init_logger', ([], {}), '()\n', (591, 593), False, 'from discopy.utils import init_logger\n'), ((658, 691), 'discopy_data.nn.bert.get_sentence_embedder', 'get_sentence_embedder', (['bert_model'], {}), '(bert_model)\n', (679, 691), False, 'from discopy_data.nn.bert import get_sentence_embedder\n'), ((705, 743), 'discopy.parsers.pipeline.ParserPipeline.from_config', 'ParserPipeline.from_config', (['model_path'], {}), '(model_path)\n', (731, 743), False, 'from discopy.parsers.pipeline import ParserPipeline\n'), ((383, 398), 'click.File', 'click.File', (['"""r"""'], {}), "('r')\n", (393, 398), False, 'import click\n'), ((447, 462), 'click.File', 'click.File', (['"""w"""'], {}), "('w')\n", (457, 462), False, 'import click\n'), ((953, 969), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (963, 969), False, 'import json\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from http.server import BaseHTTPRequestHandler, HTTPServer
import cgi
class WebServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
print("metho do_GET")
form_html = self.get_form_body()
if self.path.endswith("/hello"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
message = ""
message += "<html><body>"
message += "<h1>Hello!</h1>"
message += form_html
message += "</body></html>"
print("MESSAGE={}".format(message))
self.wfile.write(bytes(message, "utf-8"))
return
if self.path.endswith("/hola"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
message = ""
message += "<html><body>"
message += "<h1>Hola!</h1>"
message += form_html
message += "</body></html>"
print("MESSAGE={}".format(message))
self.wfile.write(bytes(message, "utf-8"))
return
else:
self.send_error(404, 'File Not Found: {}'.format(self.path))
def do_POST(self):
print("method do_POST")
form_html = self.get_form_body()
try:
self.send_response(200)
self.end_headers()
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
user_text = form.getvalue('message')
output = ""
output += "<html><body>"
output += "<h1>Okay, how about this:</h1>"
output += "<h2>{}</h2>".format(user_text)
output += form_html
output += "<html><body>"
print("OUTPUT={}".format(output))
self.wfile.write(bytes(output, "utf-8"))
except Exception as e:
print(e)
@staticmethod
def get_form_body():
form_body = (
"<form method='POST' enctype='multipart/form-data' "
"action='/hello'>"
"<h2>What would you like to say?</h2> "
"<input name='message' type='text'>"
"<input type='submit' value='Submit'></form >"
)
return form_body
def main():
try:
port = 9080
server = HTTPServer(('', port), WebServerHandler)
print("Web Server running on port {}".format(port))
server.serve_forever()
except KeyboardInterrupt:
print(" ^C entered, stopping web server....")
# sudo kill $(sudo lsof -t -i:9080)
server.socket.close()
# added server.server_close() because port was still open
server.server_close()
if __name__ == '__main__':
main() | [
"http.server.HTTPServer",
"cgi.FieldStorage"
] | [((2565, 2605), 'http.server.HTTPServer', 'HTTPServer', (["('', port)", 'WebServerHandler'], {}), "(('', port), WebServerHandler)\n", (2575, 2605), False, 'from http.server import BaseHTTPRequestHandler, HTTPServer\n'), ((1471, 1611), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {'fp': 'self.rfile', 'headers': 'self.headers', 'environ': "{'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']}"}), "(fp=self.rfile, headers=self.headers, environ={\n 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']})\n", (1487, 1611), False, 'import cgi\n')] |
import cv2 as cv
import numpy as np
img =cv.imread("Photos/Cat.jpeg")
cv.imshow("cat", img)
blank=np.zeros(img.shape[:2], dtype="uint8")
b,g,r=cv.split(img)
cv.imshow("b",b) # mavi yoğunluklu bölgeler daha beyaz içinde içermeyen yeerler siyaha yakın
cv.imshow("g",g) # yeşil yoğunluklu bölgeler daha beyaz içinde içermeyen yeerler siyaha yakın
cv.imshow("r",r) # kırmızı yoğunluklu bölgeler daha beyaz içinde içermeyen yeerler siyaha yakın
print(img.shape) #(168, 300, 3)
print(b.shape) # (168, 300)
print(g.shape) # (168, 300)
print(r.shape) # (168, 300)
merged =cv.merge([b,g,r])
cv.imshow("merged", merged)
blue = cv.merge([b,blank,blank])
green= cv.merge([blank,g,blank])
red = cv.merge([blank,blank,r])
cv.imshow("blue",blue)
cv.imshow("green",green)
cv.imshow("red",red)
cv.waitKey(0) | [
"cv2.merge",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.split",
"cv2.imread"
] | [((42, 70), 'cv2.imread', 'cv.imread', (['"""Photos/Cat.jpeg"""'], {}), "('Photos/Cat.jpeg')\n", (51, 70), True, 'import cv2 as cv\n'), ((71, 92), 'cv2.imshow', 'cv.imshow', (['"""cat"""', 'img'], {}), "('cat', img)\n", (80, 92), True, 'import cv2 as cv\n'), ((100, 138), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': '"""uint8"""'}), "(img.shape[:2], dtype='uint8')\n", (108, 138), True, 'import numpy as np\n'), ((146, 159), 'cv2.split', 'cv.split', (['img'], {}), '(img)\n', (154, 159), True, 'import cv2 as cv\n'), ((160, 177), 'cv2.imshow', 'cv.imshow', (['"""b"""', 'b'], {}), "('b', b)\n", (169, 177), True, 'import cv2 as cv\n'), ((253, 270), 'cv2.imshow', 'cv.imshow', (['"""g"""', 'g'], {}), "('g', g)\n", (262, 270), True, 'import cv2 as cv\n'), ((347, 364), 'cv2.imshow', 'cv.imshow', (['"""r"""', 'r'], {}), "('r', r)\n", (356, 364), True, 'import cv2 as cv\n'), ((569, 588), 'cv2.merge', 'cv.merge', (['[b, g, r]'], {}), '([b, g, r])\n', (577, 588), True, 'import cv2 as cv\n'), ((587, 614), 'cv2.imshow', 'cv.imshow', (['"""merged"""', 'merged'], {}), "('merged', merged)\n", (596, 614), True, 'import cv2 as cv\n'), ((623, 650), 'cv2.merge', 'cv.merge', (['[b, blank, blank]'], {}), '([b, blank, blank])\n', (631, 650), True, 'import cv2 as cv\n'), ((656, 683), 'cv2.merge', 'cv.merge', (['[blank, g, blank]'], {}), '([blank, g, blank])\n', (664, 683), True, 'import cv2 as cv\n'), ((688, 715), 'cv2.merge', 'cv.merge', (['[blank, blank, r]'], {}), '([blank, blank, r])\n', (696, 715), True, 'import cv2 as cv\n'), ((715, 738), 'cv2.imshow', 'cv.imshow', (['"""blue"""', 'blue'], {}), "('blue', blue)\n", (724, 738), True, 'import cv2 as cv\n'), ((738, 763), 'cv2.imshow', 'cv.imshow', (['"""green"""', 'green'], {}), "('green', green)\n", (747, 763), True, 'import cv2 as cv\n'), ((763, 784), 'cv2.imshow', 'cv.imshow', (['"""red"""', 'red'], {}), "('red', red)\n", (772, 784), True, 'import cv2 as cv\n'), ((785, 798), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (795, 798), True, 'import cv2 as cv\n')] |
from coap import coap
MOTE_IP = 'fc00:db20:35b:7399::5:92cc:0:2'
UDP_PORT = 61618 # can't be the port used in OV
c = coap.coap(udpPort=UDP_PORT)
# read the information about the board status
p = c.GET('coap://[{0}]/i'.format(MOTE_IP))
print(''.join([chr(b) for b in p]))
while True:
recv_in = input("Done. Press q to close. ")
if recv_in == 'q':
print('bye bye.')
c.close()
break
| [
"coap.coap.coap"
] | [((125, 152), 'coap.coap.coap', 'coap.coap', ([], {'udpPort': 'UDP_PORT'}), '(udpPort=UDP_PORT)\n', (134, 152), False, 'from coap import coap\n')] |
import maya.cmds as mc
from zen.iterable import iterable
from zen.isIterable import isIterable
from zen.listNodeConnections import listNodeConnections
def firstOpenPlug(*args):
sel=[]
for a in args:
sel.extend(iterable(a))
attributes=[]
for s in sel:
if mc.objExists(s):
attributes.append(mc.ls(s)[0])
objects=[]
indices=[]
for attr in attributes:
objects.append(mc.ls(attr,o=True)[0])
objConn=[]
for oc in listNodeConnections(objects[-1]):
objConn.extend(oc)
connections=[]
for conn in objConn:
if attr in conn:
connections.append(conn)
endPlug=-1
i=0
while endPlug==-1:
ic=False
for c in connections:
if (attr+'['+(str(i))+']') in c:
ic=True
break
if ic==False:
endPlug=i
i+=1
indices.append(endPlug)
if len(indices)>1:
return indices
elif len(indices)==1:
return indices[0]
else:
return
| [
"zen.iterable.iterable",
"zen.listNodeConnections.listNodeConnections",
"maya.cmds.ls",
"maya.cmds.objExists"
] | [((268, 283), 'maya.cmds.objExists', 'mc.objExists', (['s'], {}), '(s)\n', (280, 283), True, 'import maya.cmds as mc\n'), ((444, 476), 'zen.listNodeConnections.listNodeConnections', 'listNodeConnections', (['objects[-1]'], {}), '(objects[-1])\n', (463, 476), False, 'from zen.listNodeConnections import listNodeConnections\n'), ((217, 228), 'zen.iterable.iterable', 'iterable', (['a'], {}), '(a)\n', (225, 228), False, 'from zen.iterable import iterable\n'), ((393, 412), 'maya.cmds.ls', 'mc.ls', (['attr'], {'o': '(True)'}), '(attr, o=True)\n', (398, 412), True, 'import maya.cmds as mc\n'), ((307, 315), 'maya.cmds.ls', 'mc.ls', (['s'], {}), '(s)\n', (312, 315), True, 'import maya.cmds as mc\n')] |
import numpy as np
import time
start = time.time()
matrix = np.loadtxt('inst1_4.csv', dtype=int)
min_number = 0
assignment_function_cost = ''
assignment_cost = 0
high_number = 100000
while True:
# Finds the minimum number and the index in the matrix.
min_number = matrix.min()
min_index = np.unravel_index(matrix.argmin(), matrix.shape)
# If there's no minimum number left in the matrix, the answer has been found and the loop ends.
if min_number == high_number:
break
# Replaces the rows and columns when a minimum number has been found.
matrix[:, min_index[1]] = high_number
matrix[min_index[0], :] = high_number
assignment_cost += min_number
assignment_function_cost += '[{}, {}] + '.format(min_index[0], min_index[1])
print(assignment_function_cost[:-2])
print('Total cost:', assignment_cost)
end = time.time()
total_time = end - start
print('\nScript duration:', end - start, 'seconds')
| [
"numpy.loadtxt",
"time.time"
] | [((43, 54), 'time.time', 'time.time', ([], {}), '()\n', (52, 54), False, 'import time\n'), ((67, 103), 'numpy.loadtxt', 'np.loadtxt', (['"""inst1_4.csv"""'], {'dtype': 'int'}), "('inst1_4.csv', dtype=int)\n", (77, 103), True, 'import numpy as np\n'), ((895, 906), 'time.time', 'time.time', ([], {}), '()\n', (904, 906), False, 'import time\n')] |
# boot.py -- run on boot-up
# can run arbitrary Python, but best to keep it minimal
import pyb
from util import *
# press the user switch to select what to do
sel = selector()
if sel == 0:
pyb.main('main.py') # main script to run after this one
elif sel == 1:
pyb.usb_mode('CDC+HID') # act as a serial device and a mouse
pyb.main('mouse.py')
#elif sel == 1:
#pyb.usb_mode('CDC+MSC') # act as a serial and a storage device
| [
"pyb.usb_mode",
"pyb.main"
] | [((204, 223), 'pyb.main', 'pyb.main', (['"""main.py"""'], {}), "('main.py')\n", (212, 223), False, 'import pyb\n'), ((281, 304), 'pyb.usb_mode', 'pyb.usb_mode', (['"""CDC+HID"""'], {}), "('CDC+HID')\n", (293, 304), False, 'import pyb\n'), ((347, 367), 'pyb.main', 'pyb.main', (['"""mouse.py"""'], {}), "('mouse.py')\n", (355, 367), False, 'import pyb\n')] |
import pytest
@pytest.fixture(scope='function')
def bootstrap_test_manager(session_manager):
"""Prepares a bootstrappable manager."""
session_manager.wait_for_ssh()
# We don't bootstrap here because bootstrapping in the test means that
# --pdb will actually be useful as it'll allow investigation before
# teardown on bootstrap failure
yield session_manager
session_manager.teardown()
| [
"pytest.fixture"
] | [((17, 49), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (31, 49), False, 'import pytest\n')] |
import psycopg2
connection = psycopg2.connect('dbname=example')
cursor = connection.cursor()
cur = connection.cursor()
cur.execute("DROP TABLE IF EXISTS table2;")
cur.execute('''
CREATE TABLE table2(
id INTEGER PRIMARY KEY,
completed BOOLEAN NOT NULL DEFAULT False
);
''')
#method 1 - passing in as a string composition as a tuple
cursor.execute('INSERT INTO table2 (id, completed) VALUES (%s, %s);', (1,True))
#method 2 - SQL using a template and data variable as a dictionary
SQL = 'INSERT INTO table2 (id, completed) VALUES (%(id)s, %(completed)s);'
data = {
'id': 2,
'completed': False,
'id': 3,
'completed': False,
}
cursor.execute(SQL, data)
cursor.execute('SELECT * from table2;')
result = cursor.fetchone()
print('fetchone', result)
result2 = cursor.fetchmany(2)
print('fetchmany(2)', result2)
result3 = cursor.fetchall()
print('fetchall', result3)
connection.commit()
connection.close()
cursor.close()
| [
"psycopg2.connect"
] | [((30, 64), 'psycopg2.connect', 'psycopg2.connect', (['"""dbname=example"""'], {}), "('dbname=example')\n", (46, 64), False, 'import psycopg2\n')] |
# from https://towardsdatascience.com/the-poisson-process-everything-you-need-to-know-322aa0ab9e9a
# queuing processing stats generator
# TODO - clean up as a event generator
import random
import math
_lambda = 5
_num_arrivals = 100
_arrival_time = 0
print('RAND,INTER_ARRV_T,ARRV_T')
for i in range(_num_arrivals):
#Get the next probability value from Uniform(0,1)
p = random.random()
#Plug it into the inverse of the CDF of Exponential(_lamnbda)
_inter_arrival_time = -math.log(1.0 - p)/_lambda
#Add the inter-arrival time to the running sum
_arrival_time = _arrival_time + _inter_arrival_time
#print it all out
print(str(p)+','+str(_inter_arrival_time)+','+str(_arrival_time)) | [
"random.random",
"math.log"
] | [((377, 392), 'random.random', 'random.random', ([], {}), '()\n', (390, 392), False, 'import random\n'), ((481, 498), 'math.log', 'math.log', (['(1.0 - p)'], {}), '(1.0 - p)\n', (489, 498), False, 'import math\n')] |
import requests
import json
from tkinter import *
from tkinter.messagebox import showinfo,showerror
def send_sms(number,message):
url='https://www.fast2sms.com/dev/bulk'
params={
'authorization':'<KEY>',
'sender_id':'FSTSMS',
'message':message,
'language':'english',
'route':'p',
'numbers':number
}
response=requests.get(url,params=params)
dic=response.json()
print(dic)
return dic.get('return')
def btn_click():
num=textNumber.get()
msg=textMsg.get("1.0",END)
r=send_sms(num,msg)
if r==True:
showinfo("send success","successfully sent")
else:
showerror("error","something went wrong..")
#creating GUI
root=Tk()
root.title('message sender')
root.geometry('400x550')
font=("Helvetica",22,"bold")
textNumber = Entry(root,font=font)
textNumber.pack(fill=X,pady=20)
textMsg=Text(root)
textMsg.pack(fill=X)
sendBtn=Button(root,text="send sms",command=btn_click)
sendBtn.pack()
root.mainloop() | [
"tkinter.messagebox.showerror",
"tkinter.messagebox.showinfo",
"requests.get"
] | [((391, 423), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (403, 423), False, 'import requests\n'), ((645, 690), 'tkinter.messagebox.showinfo', 'showinfo', (['"""send success"""', '"""successfully sent"""'], {}), "('send success', 'successfully sent')\n", (653, 690), False, 'from tkinter.messagebox import showinfo, showerror\n'), ((718, 762), 'tkinter.messagebox.showerror', 'showerror', (['"""error"""', '"""something went wrong.."""'], {}), "('error', 'something went wrong..')\n", (727, 762), False, 'from tkinter.messagebox import showinfo, showerror\n')] |
__author__ = 'croman'
# -*- coding: utf-8 -*-
from polyglot.detect import Detector
from polyglot.text import Text, Word
import rdflib
from lxml import etree
import subprocess
import tweetstotxt
import codecs
import re
def ner(datasetfile, format, language):
tweetids = []
tweets = ''
tweetdict = {}
if format == 'xml-collection':
dataset = etree.parse(datasetfile)
for tweet in dataset.xpath('//Tweet'):
tweetText = tweet.xpath('./TweetText/text()')[0]
tweets += ' '.join(re.findall(r"[\w:/!#$%&*+,\-:;?@^_`{|}~.]+|[\"'()[\]<=>]", tweetText))+"\n"
tweetids.append(tweet.xpath('./TweetId/text()')[0])
tweets = tweets.encode('utf-8')
with codecs.open(datasetfile.split('.xml')[0]+'.txt', 'wb', encoding='utf-8') as txt:
tweets = tweets.decode('utf-8')
txt.write(tweets)
elif format == 'xml-socialtv':
dataset = etree.parse(datasetfile)
for tweet in dataset.xpath('//tweet'):
tweetText = tweet.xpath('./text()')[0]
tweets += ' '.join(re.findall(r"[\w:/!#$%&*+,\-:;?@^_`{|}~.]+|[\"'()[\]<=>]", tweetText))+"\n"
tweetids.append(tweet.get('id'))
tweets = tweets.encode('utf-8')
with codecs.open(datasetfile.split('.xml')[0]+'.txt', 'wb', encoding='utf-8') as txt:
tweets = tweets.decode('utf-8')
txt.write(tweets)
elif format == "nif":
a = rdflib.Graph()
a.parse(datasetfile, format='n3')
for s, p, o in a:
if s.endswith(',') and p.endswith('isString'):
tweetid = s.split('#')[0].split('.xml/')[1]
tweetdict[tweetid] = ' '.join(re.findall(r"[\w:/!#$%&*+,\-:;?@^_`{|}~.]+|[\"'()[\]<=>]", o))
for key in sorted(tweetdict):
tweetids.append(key)
tweets += tweetdict[key]+'\n'
tweets = tweets.encode('utf-8')
with codecs.open(datasetfile.split('.ttl')[0]+'.txt', 'wb', encoding='utf-8') as txt:
tweets = tweets.decode('utf-8')
txt.write(tweets)
elif format == "text":
filename = 'nerdy-input.txt'
with codecs.open(filename, 'wb', encoding='utf-8') as txt:
txt.write(datasetfile)
datasetfile = 'nerdy-input.ttl'
"""for t in tweets.split('\n'):
text = Text(t)
detector = Detector(t.decode('utf-8'))
print text.string.encode('utf-8')
print (detector.language)"""
p = subprocess.Popen(['polyglot', '--lang', language, 'ner', '--input', datasetfile.split('.ttl')[0]+'.txt'], stdout=subprocess.PIPE)
output,err = p.communicate()
results = ''
tweetoutput = output.split('\n\n')
tweetoutput.pop()
for x in range(0, len(tweetoutput)):
inEntity = False
for line in tweetoutput[x].splitlines():
if len(line.split()) < 2:
word = line.split('O')[0].decode('utf-8')
entity = u'O'
else:
word = line.split()[0].decode('utf-8')
entity = line.split()[1].decode('utf-8')
if entity != 'O' and inEntity:
entity = 'I-'+entity.split('I-')[1]
elif entity != 'O' and inEntity == False:
entity = 'B-'+entity.split('I-')[1]
inEntity = True
else:
inEntity = False
results += word + u'/' + entity + u' '
if tweetids:
results += u"||"+tweetids[x]
results += u"\n"
return results
#print ner("Xavi marco un gol a Cristiano y Casillas es de Apple Inc", "text", "es")
#print ner("<NAME> is the president of the United States of America and the leader of The Beatles", "text", "en")
#print ner('El gobierno de Brasil condecoro a Ronaldo en Rio de Janeiro', 'text', 'es')
#print ner('Messi scored three goals against Chelsea. Mourinho must be angry.', 'text', 'en') | [
"rdflib.Graph",
"re.findall",
"lxml.etree.parse",
"codecs.open"
] | [((370, 394), 'lxml.etree.parse', 'etree.parse', (['datasetfile'], {}), '(datasetfile)\n', (381, 394), False, 'from lxml import etree\n'), ((937, 961), 'lxml.etree.parse', 'etree.parse', (['datasetfile'], {}), '(datasetfile)\n', (948, 961), False, 'from lxml import etree\n'), ((1461, 1475), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (1473, 1475), False, 'import rdflib\n'), ((534, 607), 're.findall', 're.findall', (['"""[\\\\w:/!#$%&*+,\\\\-:;?@^_`{|}~.]+|[\\\\"\'()[\\\\]<=>]"""', 'tweetText'], {}), '(\'[\\\\w:/!#$%&*+,\\\\-:;?@^_`{|}~.]+|[\\\\"\\\'()[\\\\]<=>]\', tweetText)\n', (544, 607), False, 'import re\n'), ((1091, 1164), 're.findall', 're.findall', (['"""[\\\\w:/!#$%&*+,\\\\-:;?@^_`{|}~.]+|[\\\\"\'()[\\\\]<=>]"""', 'tweetText'], {}), '(\'[\\\\w:/!#$%&*+,\\\\-:;?@^_`{|}~.]+|[\\\\"\\\'()[\\\\]<=>]\', tweetText)\n', (1101, 1164), False, 'import re\n'), ((2173, 2218), 'codecs.open', 'codecs.open', (['filename', '"""wb"""'], {'encoding': '"""utf-8"""'}), "(filename, 'wb', encoding='utf-8')\n", (2184, 2218), False, 'import codecs\n'), ((1710, 1775), 're.findall', 're.findall', (['"""[\\\\w:/!#$%&*+,\\\\-:;?@^_`{|}~.]+|[\\\\"\'()[\\\\]<=>]"""', 'o'], {}), '(\'[\\\\w:/!#$%&*+,\\\\-:;?@^_`{|}~.]+|[\\\\"\\\'()[\\\\]<=>]\', o)\n', (1720, 1775), False, 'import re\n')] |
import redis
import random
from . import _connector
class Connector(
_connector.Connector,
):
name = 'redis_cluster'
def __init__(
self,
nodes,
):
super().__init__()
self.nodes = nodes
self.connections = [
redis.StrictRedis(
host=node['host'],
port=node['port'],
password=node['password'],
db=node['database'],
retry_on_timeout=True,
socket_keepalive=True,
socket_connect_timeout=10,
socket_timeout=60,
)
for node in nodes
]
self.master_connection = self.connections[0]
random.shuffle(self.connections)
def rotate_connections(
self,
):
self.connections = self.connections[1:] + self.connections[:1]
def key_set(
self,
key,
value,
):
is_new = self.master_connection.set(
name=key,
value=value,
nx=True,
)
return is_new is True
def key_get(
self,
key,
):
return self.master_connection.get(
name=key,
)
def key_delete(
self,
key,
):
return self.master_connection.delete(key)
def queue_pop(
self,
queue_name,
):
connections = self.connections
for connection in connections:
value = connection.lpop(
name=queue_name,
)
self.rotate_connections()
if value:
return value
return None
def queue_pop_bulk(
self,
queue_name,
number_of_items,
):
values = []
connections = self.connections
current_count = number_of_items
for connection in connections:
pipeline = connection.pipeline()
pipeline.lrange(queue_name, 0, current_count - 1)
pipeline.ltrim(queue_name, current_count, -1)
value = pipeline.execute()
self.rotate_connections()
values += value[0]
if len(values) == number_of_items:
return values
current_count = number_of_items - len(values)
return values
def queue_push(
self,
queue_name,
item,
priority='NORMAL',
):
if priority == 'HIGH':
push_returned_value = self.connections[0].lpush(queue_name, item)
else:
push_returned_value = self.connections[0].rpush(queue_name, item)
self.rotate_connections()
return push_returned_value
def queue_push_bulk(
self,
queue_name,
items,
priority='NORMAL',
):
if priority == 'HIGH':
push_returned_value = self.connections[0].lpush(queue_name, *items)
else:
push_returned_value = self.connections[0].rpush(queue_name, *items)
self.rotate_connections()
return push_returned_value
def queue_length(
self,
queue_name,
):
total_len = 0
for connection in self.connections:
total_len += connection.llen(
name=queue_name,
)
return total_len
def queue_delete(
self,
queue_name,
):
for connection in self.connections:
connection.delete(queue_name)
def __getstate__(
self,
):
state = {
'nodes': self.nodes,
}
return state
def __setstate__(
self,
value,
):
self.__init__(
nodes=value['nodes'],
)
| [
"random.shuffle",
"redis.StrictRedis"
] | [((722, 754), 'random.shuffle', 'random.shuffle', (['self.connections'], {}), '(self.connections)\n', (736, 754), False, 'import random\n'), ((280, 484), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': "node['host']", 'port': "node['port']", 'password': "node['password']", 'db': "node['database']", 'retry_on_timeout': '(True)', 'socket_keepalive': '(True)', 'socket_connect_timeout': '(10)', 'socket_timeout': '(60)'}), "(host=node['host'], port=node['port'], password=node[\n 'password'], db=node['database'], retry_on_timeout=True,\n socket_keepalive=True, socket_connect_timeout=10, socket_timeout=60)\n", (297, 484), False, 'import redis\n')] |
from datetime import timedelta
import pytest
from pyramid.config import Configurator
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.renderers import JSON
from pyramid.response import Response
from pyramid.security import Allow, Authenticated, remember, forget
from webtest import TestApp
def login_view(request):
return {"token": request.create_jwt_token(1)}
def login_cookie_view(request):
headers = remember(request, 1)
return Response(status=200, headers=headers, body="OK")
def logout_cookie_view(request):
headers = forget(request)
return Response(status=200, headers=headers, body="OK")
def suspicious_behaviour_view(request):
request._jwt_cookie_reissue_revoked = True
return Response(
status=200, body="Suspicious behaviour detected! Revoking cookie reissue"
)
def secure_view(request):
return "OK"
def dump_claims(request):
return request.jwt_claims
class Root:
__acl__ = [
(Allow, Authenticated, ("read",)),
]
def __init__(self, request):
pass
class NonSerializable(object):
pass
class Serializable(object):
def __json__(self):
return "This is JSON Serializable"
def extra_claims(request):
return {
"token": request.create_jwt_token(principal=1, extra_claim=NonSerializable())
}
@pytest.fixture(scope="function")
def base_config() -> Configurator:
config = Configurator()
config.set_authorization_policy(ACLAuthorizationPolicy())
config.include("pyramid_jwt")
config.set_root_factory(Root)
config.add_route("secure", "/secure")
config.add_view(
secure_view, route_name="secure", renderer="string", permission="read"
)
config.add_route("extra_claims", "/extra_claims")
config.add_view(extra_claims, route_name="extra_claims", renderer="json")
config.add_route("dump_claims", "/dump_claims")
config.add_view(
dump_claims, route_name="dump_claims", renderer="json", permission="read"
)
return config
@pytest.fixture(scope="function")
def app_config(base_config) -> Configurator:
base_config.add_route("login", "/login")
base_config.add_view(login_view, route_name="login", renderer="json")
# Enable JWT authentication.
base_config.set_jwt_authentication_policy("secret", http_header="X-Token")
return base_config
@pytest.fixture(scope="function")
def cookie_config(base_config):
base_config.add_route("login", "/login")
base_config.add_view(login_cookie_view, route_name="login", renderer="json")
base_config.add_route("logout", "/logout")
base_config.add_view(
logout_cookie_view, route_name="logout", renderer="string", permission="read"
)
base_config.add_route("suspicious", "/suspicious")
base_config.add_view(
suspicious_behaviour_view,
route_name="suspicious",
renderer="string",
permission="read",
)
# Enable JWT authentication on Cookies.
reissue_time = timedelta(seconds=1)
base_config.set_jwt_cookie_authentication_policy(
"secret",
cookie_name="Token",
expiration=5,
reissue_time=reissue_time,
https_only=False,
)
return base_config
@pytest.fixture(scope="function")
def app(app_config):
app = app_config.make_wsgi_app()
return TestApp(app)
@pytest.fixture(scope="function")
def cookie_app(cookie_config):
app = cookie_config.make_wsgi_app()
return TestApp(app)
def test_secure_view_requires_auth(app):
app.get("/secure", status=403)
def test_login(app):
r = app.get("/login")
token = str(r.json_body["token"]) # Must be str on all Python versions
r = app.get("/secure", headers={"X-Token": token})
assert r.unicode_body == "OK"
def test_pyramid_json_encoder_fail(app):
with pytest.raises(TypeError) as e:
app.get("/extra_claims")
assert "NonSerializable" in str(e.value)
assert "is not JSON serializable" in str(e.value)
def test_pyramid_json_encoder_with_adapter(app):
"""Test we can define a custom adapter using global json_renderer_factory"""
from pyramid.renderers import json_renderer_factory
def serialize_anyclass(obj, request):
return obj.__class__.__name__
json_renderer_factory.add_adapter(NonSerializable, serialize_anyclass)
response = app.get("/extra_claims")
token = str(response.json_body["token"])
response = app.get("/dump_claims", headers={"X-Token": token})
assert response.json_body["extra_claim"] == "NonSerializable"
def test_pyramid_custom_json_encoder(app_config: Configurator):
"""Test we can still use user-defined custom adapter"""
from pyramid.renderers import json_renderer_factory
def serialize_anyclass(obj, request):
assert False # This asserts this method will not be called
json_renderer_factory.add_adapter(NonSerializable, serialize_anyclass)
def other_serializer(obj, request):
return "other_serializer"
my_renderer = JSON()
my_renderer.add_adapter(NonSerializable, other_serializer)
app_config.add_renderer("json", my_renderer)
app = TestApp(app_config.make_wsgi_app())
response = app.get("/extra_claims")
token = str(response.json_body["token"])
response = app.get("/dump_claims", headers={"X-Token": token})
assert response.json_body["extra_claim"] == "other_serializer"
def test_cookie_secured(cookie_app):
response = cookie_app.get("/secure", expect_errors=True)
assert response.status_int == 403
def test_cookie_login(cookie_app):
response = cookie_app.get("/login")
assert "Token" in cookie_app.cookies
assert response.body == b"OK"
response = cookie_app.get("/secure")
assert response.body == b"OK"
def test_cookie_logout(cookie_app):
response = cookie_app.get("/login")
assert "Token" in cookie_app.cookies
assert response.body == b"OK"
response = cookie_app.get("/secure")
assert response.body == b"OK"
response = cookie_app.get("/logout")
assert response.body == b"OK"
assert "Token" not in cookie_app.cookies
response = cookie_app.get("/secure", expect_errors=True)
assert response.status_int == 403
@pytest.mark.freeze_time
def test_cookie_reissue(cookie_app, freezer):
cookie_app.get("/login")
token = cookie_app.cookies.get("Token")
freezer.tick(delta=4)
cookie_app.get("/secure")
other_token = cookie_app.cookies.get("Token")
assert token != other_token
@pytest.mark.freeze_time
def test_cookie_reissue_revoke(cookie_app, freezer):
cookie_app.get("/login")
token = cookie_app.cookies.get("Token")
freezer.tick(delta=4)
cookie_app.get("/suspicious")
other_token = cookie_app.cookies.get("Token")
assert token == other_token
| [
"pyramid.renderers.JSON",
"webtest.TestApp",
"pyramid.security.forget",
"pyramid.config.Configurator",
"pyramid.security.remember",
"pyramid.renderers.json_renderer_factory.add_adapter",
"pytest.raises",
"pyramid.authorization.ACLAuthorizationPolicy",
"pytest.fixture",
"datetime.timedelta",
"pyramid.response.Response"
] | [((1345, 1377), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1359, 1377), False, 'import pytest\n'), ((2034, 2066), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (2048, 2066), False, 'import pytest\n'), ((2370, 2402), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (2384, 2402), False, 'import pytest\n'), ((3237, 3269), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3251, 3269), False, 'import pytest\n'), ((3355, 3387), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3369, 3387), False, 'import pytest\n'), ((437, 457), 'pyramid.security.remember', 'remember', (['request', '(1)'], {}), '(request, 1)\n', (445, 457), False, 'from pyramid.security import Allow, Authenticated, remember, forget\n'), ((469, 517), 'pyramid.response.Response', 'Response', ([], {'status': '(200)', 'headers': 'headers', 'body': '"""OK"""'}), "(status=200, headers=headers, body='OK')\n", (477, 517), False, 'from pyramid.response import Response\n'), ((567, 582), 'pyramid.security.forget', 'forget', (['request'], {}), '(request)\n', (573, 582), False, 'from pyramid.security import Allow, Authenticated, remember, forget\n'), ((594, 642), 'pyramid.response.Response', 'Response', ([], {'status': '(200)', 'headers': 'headers', 'body': '"""OK"""'}), "(status=200, headers=headers, body='OK')\n", (602, 642), False, 'from pyramid.response import Response\n'), ((743, 831), 'pyramid.response.Response', 'Response', ([], {'status': '(200)', 'body': '"""Suspicious behaviour detected! Revoking cookie reissue"""'}), "(status=200, body=\n 'Suspicious behaviour detected! Revoking cookie reissue')\n", (751, 831), False, 'from pyramid.response import Response\n'), ((1426, 1440), 'pyramid.config.Configurator', 'Configurator', ([], {}), '()\n', (1438, 1440), False, 'from pyramid.config import Configurator\n'), ((3000, 3020), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (3009, 3020), False, 'from datetime import timedelta\n'), ((3339, 3351), 'webtest.TestApp', 'TestApp', (['app'], {}), '(app)\n', (3346, 3351), False, 'from webtest import TestApp\n'), ((3470, 3482), 'webtest.TestApp', 'TestApp', (['app'], {}), '(app)\n', (3477, 3482), False, 'from webtest import TestApp\n'), ((4265, 4335), 'pyramid.renderers.json_renderer_factory.add_adapter', 'json_renderer_factory.add_adapter', (['NonSerializable', 'serialize_anyclass'], {}), '(NonSerializable, serialize_anyclass)\n', (4298, 4335), False, 'from pyramid.renderers import json_renderer_factory\n'), ((4854, 4924), 'pyramid.renderers.json_renderer_factory.add_adapter', 'json_renderer_factory.add_adapter', (['NonSerializable', 'serialize_anyclass'], {}), '(NonSerializable, serialize_anyclass)\n', (4887, 4924), False, 'from pyramid.renderers import json_renderer_factory\n'), ((5019, 5025), 'pyramid.renderers.JSON', 'JSON', ([], {}), '()\n', (5023, 5025), False, 'from pyramid.renderers import JSON\n'), ((1477, 1501), 'pyramid.authorization.ACLAuthorizationPolicy', 'ACLAuthorizationPolicy', ([], {}), '()\n', (1499, 1501), False, 'from pyramid.authorization import ACLAuthorizationPolicy\n'), ((3827, 3851), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3840, 3851), False, 'import pytest\n')] |
# Copyright 2017 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ctypes import *
MAX_CAMERAS = 64
class CameraNotFoundError(Exception):
pass
class TcamCamera(Structure):
_fields_ = [("model_name", c_char * 64),
("serial_number", c_char * 64),
("current_ip", c_char * 16),
("current_gateway", c_char * 16),
("current_netmask", c_char * 16),
("persistent_ip", c_char * 16),
("persistent_gateway", c_char * 16),
("persistent_netmask", c_char * 16),
("user_defined_name", c_char * 64),
("firmware_version", c_char * 64),
("mac_address", c_char * 64),
("interface_name", c_char * 64),
("is_static_ip", c_int),
("is_dhcp_enabled", c_int),
("is_reachable", c_int),
("is_busy", c_int)
]
DISCOVER_CALLBACK_FUNC = CFUNCTYPE(None, TcamCamera)
UPLOAD_CALLBACK_FUNC = CFUNCTYPE(None, c_char_p, c_int)
def _tobytes(value):
if bytes == str:
return bytes(value)
else:
return bytes(value, "utf-8")
class CameraController:
def __init__(self):
try:
self.dll = cdll.LoadLibrary("libtcam_gigewrapper.so")
except OSError:
_path = os.path.dirname(__file__)
if not _path:
_path = "."
self.dll = cdll.LoadLibrary(os.path.join(_path, "libtcam_gigewrapper.so"))
self.dll.init()
self.dll.set_persistent_parameter_s.argtypes = [c_char_p, c_char_p, c_char_p]
self.dll.set_persistent_parameter_i.argtypes = [c_char_p, c_char_p, c_int]
self.dll.rescue.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p]
self.dll.upload_firmware.argtypes = [c_char_p, c_char_p, UPLOAD_CALLBACK_FUNC]
self.cameras = []
SUCCESS = 0x0
FAILURE = 0x8000
NO_DEVICE = 0x8001
INVALID_PARAMETER = 0x8002
@staticmethod
def __getdict(struct):
d = dict((field, getattr(struct, field)) for field, _ in struct._fields_)
for f in d:
if type(d[f]) == bytes:
d[f] = d[f].decode("utf-8")
return d
def __discover_callback(self, camera):
self.cameras.append(self.__getdict(camera))
def discover(self, get_persistent_values=False):
self.cameras = []
self.dll.get_camera_list(DISCOVER_CALLBACK_FUNC(self.__discover_callback), get_persistent_values)
return self.cameras
def set_persistent_parameter(self, identifier, key, value):
if type(value) == str:
return self.dll.set_persistent_parameter_s(_tobytes(identifier),
_tobytes(key),
_tobytes(value))
else:
return self.dll.set_persistent_parameter_i(_tobytes(identifier),
_tobytes(key), value)
def upload_firmware(self, identifier, _path, callback):
ret = self.dll.upload_firmware(_tobytes(identifier), _tobytes(_path), UPLOAD_CALLBACK_FUNC(callback))
if ret == -1:
raise RuntimeError("DeviceNotRecognized")
elif ret == -2:
raise RuntimeError("DeviceSupportsFwOnly")
elif ret == -3:
raise IOError("File not found, corrupt or not matching the camera model")
elif ret == -4:
raise RuntimeError("NoMatchFoundInPackage")
elif ret == -5:
raise RuntimeError("WriteError")
elif ret == -6:
raise RuntimeError("WriteVerificationError")
elif ret == -7:
raise RuntimeError("DeviceAccessFailed")
elif ret == -8:
raise RuntimeError("MotorFirmwareUpdateFailed")
elif ret == -9:
raise RuntimeError("FocusTableUpdateFailed")
elif ret == -10:
raise RuntimeError("MachXO2UpdateFailed")
return ret
def get_camera_details(self, identifier):
tcam = TcamCamera()
self.dll.get_camera_details(_tobytes(identifier), byref(tcam))
cam = self.__getdict(tcam)
if not cam["serial_number"]:
raise CameraNotFoundError("No such camera: '%s'" % (identifier))
return cam
def rescue(self, identifier, ip, netmask, gateway):
mac = None
for cam in self.cameras:
if identifier in [cam["serial_number"], cam["user_defined_name"], cam["mac_address"]]:
if mac is not None:
print("Camera identifier is ambiguous")
return -2
mac = cam["mac_address"]
if mac is None:
raise CameraNotFoundError("No such camera: '%s'" % (identifier))
self.dll.rescue(_tobytes(mac), _tobytes(ip), _tobytes(netmask), _tobytes(gateway))
| [
"os.path.dirname",
"os.path.join"
] | [((1892, 1917), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1907, 1917), False, 'import os\n'), ((2012, 2057), 'os.path.join', 'os.path.join', (['_path', '"""libtcam_gigewrapper.so"""'], {}), "(_path, 'libtcam_gigewrapper.so')\n", (2024, 2057), False, 'import os\n')] |
from parsimonious import NodeVisitor, Grammar, VisitationError
from . import grammars
from . import nodes
from .exceptions import OutOfContextNodeError
class TextOnlySymlParser(NodeVisitor):
grammar = Grammar(grammars.text_only_syml_grammar)
def reduce_children(self, children):
children = [c for c in children if c is not None]
if children:
return children if len(children) > 1 else children[0]
else:
return None
def visit_blank(self, node, children):
return None
def visit_line(self, node, children):
indent, value, _ = children
if value is not None:
value.level = indent
return value
def generic_visit(self, node, children):
return self.reduce_children(children)
def get_text(self, node, children):
return nodes.TextLeafNode(node, node.text)
def visit_comment(self, node, children):
_, text = children
return nodes.Comment(text)
visit_text = get_text
visit_key = get_text
def visit_indent(self, node, children):
return len(node.text.replace('\t', ' ' * 4).strip('\n'))
def visit_key_value(self, node, children):
section, _, value = children
section.incorporate_node(value)
return section
def visit_section(self, node, children):
key, _ = children
return nodes.KeyValue(node, key)
def visit_list_item(self, node, children):
_, _, value = children
li = nodes.ListItem(node)
li.incorporate_node(value)
return li
def visit_lines(self, node, children):
root = nodes.Root(node)
current = root
children = self.reduce_children(children)
if isinstance(children, nodes.LeafNode):
children = [children]
for child in children:
if isinstance(child, nodes.Comment):
current.comments.append(child)
else:
current = current.incorporate_node(child)
return root
def parse(self, *args, **kwargs):
try:
return super().parse(*args, **kwargs)
except VisitationError as e:
# Parsimonious swallows errors inside of `visit_` handlers and
# wraps them in VisitationError cruft.
if e.args[0].startswith('OutOfContextNodeError'):
# Extract the original error message, ignoring the cruft.
msg = e.args[0].split('\n\n\n')[0].split(':', 1)[1]
raise OutOfContextNodeError(msg)
else:
raise # pragma: no cover
class BooleanSymlParser(TextOnlySymlParser):
"""Syml with support for YAML-like boolean values.
"""
grammar = Grammar(grammars.boolean_syml_grammar)
def visit_truthy(self, node, children):
return nodes.RawValueLeafNode(node, node.text, value=True)
def visit_falsey(self, node, children):
return nodes.RawValueLeafNode(node, node.text, value=False)
def parse(source_syml, filename='', raw=True, booleans=False):
parser = BooleanSymlParser if booleans else TextOnlySymlParser
return parser().parse(source_syml).as_data(filename, raw=raw)
| [
"parsimonious.Grammar"
] | [((208, 248), 'parsimonious.Grammar', 'Grammar', (['grammars.text_only_syml_grammar'], {}), '(grammars.text_only_syml_grammar)\n', (215, 248), False, 'from parsimonious import NodeVisitor, Grammar, VisitationError\n'), ((2745, 2783), 'parsimonious.Grammar', 'Grammar', (['grammars.boolean_syml_grammar'], {}), '(grammars.boolean_syml_grammar)\n', (2752, 2783), False, 'from parsimonious import NodeVisitor, Grammar, VisitationError\n')] |
import logging
from abc import ABC
from .parameters import ATR_COLUMNS
class AverageTrueRangeMixin(ABC):
@classmethod
def _sanitize_atr_columns_mapping(cls, columns: list = None):
if columns is None:
return ATR_COLUMNS
logging.warning(f"ATR cannot verify column accuracy, only their types.")
for c in columns:
assert isinstance(c, str), f"Columns must be provided as a list of strings."
return columns
| [
"logging.warning"
] | [((260, 332), 'logging.warning', 'logging.warning', (['f"""ATR cannot verify column accuracy, only their types."""'], {}), "(f'ATR cannot verify column accuracy, only their types.')\n", (275, 332), False, 'import logging\n')] |
import unittest
import requests
from datetime import datetime, timedelta
from project import app
import pytz
class TestWeather(unittest.TestCase):
def setUp(self):
pass
def test_weather_by_city(self):
city = 'kowloon'
r = requests.get('http://127.0.0.1/api/v1/weather?city='+city)
self.assertEqual(r.status_code,200)
response = r.json()
self.assertIsNone(response['error'])
self.assertIn(city,response['data']['city'])
def test_weather_by_range(self):
hk = pytz.timezone('Asia/Hong_Kong')
city = 'kowloon'
last_hour_date_time = datetime.now(hk) - timedelta(hours = 1)
start = last_hour_date_time.strftime(app.dateformat)
end = datetime.now(hk).strftime(app.dateformat)
r = requests.get('http://127.0.0.1/api/v1/weather?city='+city+'&start='+start+'&end='+end)
self.assertEqual(r.status_code,200)
response = r.json()
self.assertIsNone(response['error'])
if __name__ == '__main__':
unittest.main()
| [
"pytz.timezone",
"requests.get",
"datetime.datetime.now",
"unittest.main",
"datetime.timedelta"
] | [((1028, 1043), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1041, 1043), False, 'import unittest\n'), ((256, 316), 'requests.get', 'requests.get', (["('http://127.0.0.1/api/v1/weather?city=' + city)"], {}), "('http://127.0.0.1/api/v1/weather?city=' + city)\n", (268, 316), False, 'import requests\n'), ((536, 567), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Hong_Kong"""'], {}), "('Asia/Hong_Kong')\n", (549, 567), False, 'import pytz\n'), ((792, 892), 'requests.get', 'requests.get', (["('http://127.0.0.1/api/v1/weather?city=' + city + '&start=' + start +\n '&end=' + end)"], {}), "('http://127.0.0.1/api/v1/weather?city=' + city + '&start=' +\n start + '&end=' + end)\n", (804, 892), False, 'import requests\n'), ((623, 639), 'datetime.datetime.now', 'datetime.now', (['hk'], {}), '(hk)\n', (635, 639), False, 'from datetime import datetime, timedelta\n'), ((642, 660), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (651, 660), False, 'from datetime import datetime, timedelta\n'), ((738, 754), 'datetime.datetime.now', 'datetime.now', (['hk'], {}), '(hk)\n', (750, 754), False, 'from datetime import datetime, timedelta\n')] |
"""
Looter, Web-Scraping for Humans.
Usage:
looter genspider <name> [--async]
looter shell [<url>]
looter (-h | --help | --version)
Options:
-h --help Show this screen.
--version Show version.
--async Use async instead of concurrent.
"""
import os
import json
import code
import webbrowser
from operator import itemgetter
from itertools import groupby
from concurrent import futures
from pathlib import Path
from typing import Callable
import tempfile
import requests
import aiohttp
from parsel import Selector
from docopt import docopt
from tqdm import tqdm
VERSION = '2.21'
DEFAULT_HEADERS = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
DEFAULT_ENCODING = 'utf-8'
BANNER = """
Available objects:
url The url of the site.
res The response of the site.
tree The DOM selector tree.
Available functions:
fetch Send HTTP request and parse it as a DOM selector. [has async version]
view View the page in your browser. (test rendering)
save Save what you crawled as a file. (json or csv)
Examples:
Get all the <li> elements of a <ul> table:
>>> items = tree.css('ul li').extract()
Get all the links of a page:
>>> items = tree.css('a::attr(href)').extract()
For more info, plz refer to documentation:
[looter]: https://looter.readthedocs.io/en/latest/
"""
def fetch(url: str, **kwargs) -> Selector:
"""
Send HTTP request and parse it as a DOM selector.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions.
"""
kwargs.setdefault('headers', DEFAULT_HEADERS)
try:
res = requests.get(url, **kwargs)
res.encoding = kwargs.get('encoding', DEFAULT_ENCODING)
res.raise_for_status()
except requests.RequestException as e:
print(e)
else:
html = res.text
tree = Selector(text=html)
return tree
async def async_fetch(url: str, **kwargs) -> Selector:
"""
Do the fetch in an async style.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions.
"""
kwargs.setdefault('headers', DEFAULT_HEADERS)
async with aiohttp.ClientSession(**kwargs) as ses:
async with ses.get(url, **kwargs) as res:
html = await res.text()
tree = Selector(text=html)
return tree
def view(url: str, **kwargs) -> bool:
"""
View the page whether rendered properly. (ensure the <base> tag to make external links work)
Args:
url (str): The url of the site.
"""
kwargs.setdefault('headers', DEFAULT_HEADERS)
if b'<base' not in (html := requests.get(url, **kwargs).content):
html = html.replace(b'<head>', f'<head><base href={url}>'.encode('utf-8'))
fd, fname = tempfile.mkstemp('.html')
os.write(fd, html)
os.close(fd)
return webbrowser.open(f'file://{fname}')
def save(total: list, *, name='data.json', sort_by: str = None, no_duplicate=False, order='asc'):
"""
Save what you crawled as a file, default format is json.
Args:
total (list): Total of data you crawled.
name (str, optional): Defaults to 'data.json'. The name of the file.
sort_by (str, optional): Defaults to None. Sort items by a specific key.
no_duplicate (bool, optional): Defaults to False. If True, it will remove duplicated data.
order (str, optional): Defaults to 'asc'. The opposite option is 'desc'.
"""
if sort_by:
total = sorted(total, key=itemgetter(sort_by), reverse=order == 'desc')
if no_duplicate:
total = [key for key, _ in groupby(total)]
_, ext = name.split('.')
if ext == 'json':
data = json.dumps(total, ensure_ascii=False)
Path(name).write_text(data, encoding='utf-8')
elif ext == 'csv':
try:
import pandas as pd
pd.DataFrame(total).to_csv(name, encoding='utf-8')
except ImportError:
exit('pandas not installed! Plz run `pip install pandas`.')
else:
exit('Sorry, other formats are not supported yet.')
def crawl_all(crawl: Callable, tasklist: list, max_workers=50) -> list:
"""
Crawl all the tasks in a tasklist.
Args:
crawl (Callable): The "crawl" function.
tasklist (list): A list of url.
max_workers (int, optional): Max thread count. Defaults to 50.
Returns:
list: Total of data you crawled.
"""
with futures.ThreadPoolExecutor(max_workers) as executor:
fs = {executor.submit(crawl, task): task for task in tasklist}
completed = futures.as_completed(fs)
completed = tqdm(completed, total=len(tasklist))
total = []
for future in completed:
task = fs[future]
try:
result = future.result()
except Exception as e:
print(f'[{e}] {task}.')
else:
if result:
total.extend(list(result))
return total
def cli():
"""
Commandline for looter :d
"""
argv = docopt(__doc__, version=VERSION)
if argv['genspider']:
template = 'data_async.tmpl' if argv['--async'] else 'data.tmpl'
template_path = Path(__file__).parent / 'templates' / template
Path(f"{argv['<name>']}.py").write_text(template_path.read_text())
if argv['shell']:
url = argv['<url>'] if argv['<url>'] else input('Specify the url: ')
res = requests.get(url, headers=DEFAULT_HEADERS)
res.encoding = DEFAULT_ENCODING
if not res:
exit('Failed to fetch the page.')
tree = Selector(text=res.text)
allvars = {**locals(), **globals()}
try:
from ptpython.repl import embed
print(BANNER)
embed(allvars)
except ImportError:
code.interact(local=allvars, banner=BANNER)
if __name__ == '__main__':
cli()
| [
"aiohttp.ClientSession",
"itertools.groupby",
"parsel.Selector",
"os.close",
"os.write",
"concurrent.futures.ThreadPoolExecutor",
"json.dumps",
"pathlib.Path",
"webbrowser.open",
"requests.get",
"concurrent.futures.as_completed",
"ptpython.repl.embed",
"code.interact",
"pandas.DataFrame",
"operator.itemgetter",
"tempfile.mkstemp",
"docopt.docopt"
] | [((3055, 3080), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".html"""'], {}), "('.html')\n", (3071, 3080), False, 'import tempfile\n'), ((3085, 3103), 'os.write', 'os.write', (['fd', 'html'], {}), '(fd, html)\n', (3093, 3103), False, 'import os\n'), ((3108, 3120), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (3116, 3120), False, 'import os\n'), ((3132, 3166), 'webbrowser.open', 'webbrowser.open', (['f"""file://{fname}"""'], {}), "(f'file://{fname}')\n", (3147, 3166), False, 'import webbrowser\n'), ((5356, 5388), 'docopt.docopt', 'docopt', (['__doc__'], {'version': 'VERSION'}), '(__doc__, version=VERSION)\n', (5362, 5388), False, 'from docopt import docopt\n'), ((1842, 1869), 'requests.get', 'requests.get', (['url'], {}), '(url, **kwargs)\n', (1854, 1869), False, 'import requests\n'), ((2074, 2093), 'parsel.Selector', 'Selector', ([], {'text': 'html'}), '(text=html)\n', (2082, 2093), False, 'from parsel import Selector\n'), ((2443, 2474), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '(**kwargs)\n', (2464, 2474), False, 'import aiohttp\n'), ((3976, 4013), 'json.dumps', 'json.dumps', (['total'], {'ensure_ascii': '(False)'}), '(total, ensure_ascii=False)\n', (3986, 4013), False, 'import json\n'), ((4732, 4771), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', (['max_workers'], {}), '(max_workers)\n', (4758, 4771), False, 'from concurrent import futures\n'), ((4876, 4900), 'concurrent.futures.as_completed', 'futures.as_completed', (['fs'], {}), '(fs)\n', (4896, 4900), False, 'from concurrent import futures\n'), ((5747, 5789), 'requests.get', 'requests.get', (['url'], {'headers': 'DEFAULT_HEADERS'}), '(url, headers=DEFAULT_HEADERS)\n', (5759, 5789), False, 'import requests\n'), ((5911, 5934), 'parsel.Selector', 'Selector', ([], {'text': 'res.text'}), '(text=res.text)\n', (5919, 5934), False, 'from parsel import Selector\n'), ((2588, 2607), 'parsel.Selector', 'Selector', ([], {'text': 'html'}), '(text=html)\n', (2596, 2607), False, 'from parsel import Selector\n'), ((6074, 6088), 'ptpython.repl.embed', 'embed', (['allvars'], {}), '(allvars)\n', (6079, 6088), False, 'from ptpython.repl import embed\n'), ((2918, 2945), 'requests.get', 'requests.get', (['url'], {}), '(url, **kwargs)\n', (2930, 2945), False, 'import requests\n'), ((3792, 3811), 'operator.itemgetter', 'itemgetter', (['sort_by'], {}), '(sort_by)\n', (3802, 3811), False, 'from operator import itemgetter\n'), ((3894, 3908), 'itertools.groupby', 'groupby', (['total'], {}), '(total)\n', (3901, 3908), False, 'from itertools import groupby\n'), ((4022, 4032), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (4026, 4032), False, 'from pathlib import Path\n'), ((5567, 5595), 'pathlib.Path', 'Path', (['f"""{argv[\'<name>\']}.py"""'], {}), '(f"{argv[\'<name>\']}.py")\n', (5571, 5595), False, 'from pathlib import Path\n'), ((6129, 6172), 'code.interact', 'code.interact', ([], {'local': 'allvars', 'banner': 'BANNER'}), '(local=allvars, banner=BANNER)\n', (6142, 6172), False, 'import code\n'), ((5512, 5526), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5516, 5526), False, 'from pathlib import Path\n'), ((4148, 4167), 'pandas.DataFrame', 'pd.DataFrame', (['total'], {}), '(total)\n', (4160, 4167), True, 'import pandas as pd\n')] |
# M_ADL96_YZ483_Lab2
# <NAME> (ADL96)
# <NAME> (YZ483)
# Sep 27, 2021
import RPi.GPIO as GPIO
import sys, os
import pygame
from pygame.locals import*
import time
keep_run = True # Global variable used to track running status
# Button 27: quit
def GPIO27_callback(channel):
print ("Button #27 pressed, quit.")
global keep_run
keep_run = False # Set the keep_run flag to false in order to quit
def run():
pygame.init()
pygame.mouse.set_visible(True)
WHITE = 255,255,255
BLACK = 0,0,0
size = width, height = 320, 240 #resolution of window
screen = pygame.display.set_mode(size)
my_font = pygame.font.Font(None, 20)
start_btn = {'Start': (80,220)}
start_rect = None
quit_btn = {'Quit': (240,220)}
quit_rect = None
faster_btn = {'Faster': (40,220)}
faster_rect = None
slower_btn = {'Slower': (120,220)}
slower_rect = None
pause_btn = {'Pause': (200,220)}
pause_rect = None
back_btn = {'Back': (280,220)}
back_rect = None
playing = False # Menu level
freeze = False # Pause flag
# Two collide init
speed1 = [1, 1]# speed of ball (pixels in x and y directions
speed2 = [1, 1]# speed of ball (pixels in x and y directions
ball1 = pygame.image.load("soccer-ball.png")
ball2 = pygame.image.load("magic_ball.png")
ball1 = pygame.transform.scale(ball1, (50, 50))
ball2 = pygame.transform.scale(ball2, (50, 50))
ballrect1 = ball1.get_rect()
ballrect2 = ball2.get_rect()
ballrect1.size=(40,40)
ballrect2.size=(40,40)
ballrect2.center = (280, 150)
speed = 0.002 # Speed control
screen.fill(BLACK)
global keep_run
while keep_run:
# Exit event detect
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
keep_run = False
# Blit button text
if(playing): # Menu level 2 (four buttons)
for my_text, text_pos in faster_btn.items(): # Faster
text_surface = my_font.render(my_text, True, WHITE)
rect = text_surface.get_rect(center=text_pos)
faster_rect = rect
screen.blit(text_surface, rect)
for my_text, text_pos in slower_btn.items(): # Slower
text_surface = my_font.render(my_text, True, WHITE)
rect = text_surface.get_rect(center=text_pos)
slower_rect = rect
screen.blit(text_surface, rect)
for my_text, text_pos in pause_btn.items(): # Pause
text_surface = my_font.render(my_text, True, WHITE)
rect = text_surface.get_rect(center=text_pos)
pause_rect = rect
screen.blit(text_surface, rect)
for my_text, text_pos in back_btn.items(): # Back
text_surface = my_font.render(my_text, True, WHITE)
rect = text_surface.get_rect(center=text_pos)
back_rect = rect
screen.blit(text_surface, rect)
else: # Menu level 1 (two buttons)
for my_text, text_pos in start_btn.items(): # Start
text_surface = my_font.render(my_text, True, WHITE)
rect = text_surface.get_rect(center=text_pos)
start_rect = rect
screen.blit(text_surface, rect)
for my_text, text_pos in quit_btn.items(): # Quit
text_surface = my_font.render(my_text, True, WHITE)
rect = text_surface.get_rect(center=text_pos)
quit_rect = rect
screen.blit(text_surface, rect)
# Display Flip
pygame.display.flip()
# Touch detections
for event in pygame.event.get():
if(event.type is MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
if(playing): # Menu level 2
if faster_rect.collidepoint(pos): # Faster
print ("Faster button pressed")
if(speed > 0.0005):
speed -= 0.0002
elif slower_rect.collidepoint(pos): # Slower
print ("Slower button pressed")
if(speed < 0.05):
speed += 0.0002
elif pause_rect.collidepoint(pos): # Pause
print ("Pause button pressed")
freeze = not freeze
elif back_rect.collidepoint(pos): # Back
print ("Back button pressed")
playing = False
freeze = False
else: # Menu level 1
if quit_rect.collidepoint(pos): # Quit
print ("Quit button pressed")
keep_run = False
elif start_rect.collidepoint(pos): # Start
print ("Start button pressed")
playing = True
freeze = False
else: # Print screen coordinates
print(pos)
screen.fill(BLACK)
text_surface = my_font.render(("touch at "+str(pos)), True, WHITE)
rect = text_surface.get_rect(center=(160,120))
screen.blit(text_surface, rect)
elif(event.type is MOUSEBUTTONUP):
pos = pygame.mouse.get_pos()
x,y = pos
# Refresh next screen
screen.fill(BLACK)
# Two collide animation
if(playing):
if(not freeze):
ballrect1 = ballrect1.move(speed1)
ballrect2 = ballrect2.move(speed2)
# Collide detect
collide = ballrect1.colliderect(ballrect2)
if collide:
speed1[0] = -speed1[0]
speed1[1] = -speed1[1]
speed2[0] = -speed2[0]
speed2[1] = -speed2[1]
# Edge detect
if ballrect1.left < 0 or ballrect1.right > width:
speed1[0] = -speed1[0]
if ballrect1.top < 0 or ballrect1.bottom > height-50:
speed1[1] = -speed1[1]
if ballrect2.left < 0 or ballrect2.right > width:
speed2[0] = -speed2[0]
if ballrect2.top < 0 or ballrect2.bottom > height-50:
speed2[1] = -speed2[1]
screen.blit(ball1, ballrect1)
screen.blit(ball2, ballrect2)
time.sleep(speed)
pygame.display.quit() # Uninitialize the display module
pygame.quit() # Shutdown pygame
if __name__ == "__main__":
os.putenv('SDL_VIDEODRIVER', 'fbcon') # Display on piTFT
os.putenv('SDL_FBDEV', '/dev/fb0') #
os.putenv('SDL_MOUSEDRV', 'TSLIB') # Track mouse clicks on piTFT
os.putenv('SDL_MOUSEDEV', '/dev/input/touchscreen')
GPIO.setmode(GPIO.BCM)
GPIO.setup(27, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(27, GPIO.FALLING, callback=GPIO27_callback, bouncetime=300)
run()
GPIO.cleanup() # clean up GPIO on normal exit
| [
"RPi.GPIO.cleanup",
"RPi.GPIO.add_event_detect",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"os.putenv",
"RPi.GPIO.setup",
"pygame.display.flip",
"pygame.mouse.get_pos",
"time.sleep",
"pygame.mouse.set_visible",
"pygame.display.quit",
"pygame.image.load",
"pygame.font.Font",
"RPi.GPIO.setmode",
"pygame.transform.scale"
] | [((440, 453), 'pygame.init', 'pygame.init', ([], {}), '()\n', (451, 453), False, 'import pygame\n'), ((458, 488), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(True)'], {}), '(True)\n', (482, 488), False, 'import pygame\n'), ((602, 631), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size'], {}), '(size)\n', (625, 631), False, 'import pygame\n'), ((646, 672), 'pygame.font.Font', 'pygame.font.Font', (['None', '(20)'], {}), '(None, 20)\n', (662, 672), False, 'import pygame\n'), ((1289, 1325), 'pygame.image.load', 'pygame.image.load', (['"""soccer-ball.png"""'], {}), "('soccer-ball.png')\n", (1306, 1325), False, 'import pygame\n'), ((1338, 1373), 'pygame.image.load', 'pygame.image.load', (['"""magic_ball.png"""'], {}), "('magic_ball.png')\n", (1355, 1373), False, 'import pygame\n'), ((1386, 1425), 'pygame.transform.scale', 'pygame.transform.scale', (['ball1', '(50, 50)'], {}), '(ball1, (50, 50))\n', (1408, 1425), False, 'import pygame\n'), ((1438, 1477), 'pygame.transform.scale', 'pygame.transform.scale', (['ball2', '(50, 50)'], {}), '(ball2, (50, 50))\n', (1460, 1477), False, 'import pygame\n'), ((6839, 6860), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (6858, 6860), False, 'import pygame\n'), ((6905, 6918), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (6916, 6918), False, 'import pygame\n'), ((6983, 7020), 'os.putenv', 'os.putenv', (['"""SDL_VIDEODRIVER"""', '"""fbcon"""'], {}), "('SDL_VIDEODRIVER', 'fbcon')\n", (6992, 7020), False, 'import sys, os\n'), ((7044, 7078), 'os.putenv', 'os.putenv', (['"""SDL_FBDEV"""', '"""/dev/fb0"""'], {}), "('SDL_FBDEV', '/dev/fb0')\n", (7053, 7078), False, 'import sys, os\n'), ((7085, 7119), 'os.putenv', 'os.putenv', (['"""SDL_MOUSEDRV"""', '"""TSLIB"""'], {}), "('SDL_MOUSEDRV', 'TSLIB')\n", (7094, 7119), False, 'import sys, os\n'), ((7154, 7205), 'os.putenv', 'os.putenv', (['"""SDL_MOUSEDEV"""', '"""/dev/input/touchscreen"""'], {}), "('SDL_MOUSEDEV', '/dev/input/touchscreen')\n", (7163, 7205), False, 'import sys, os\n'), ((7215, 7237), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (7227, 7237), True, 'import RPi.GPIO as GPIO\n'), ((7242, 7291), 'RPi.GPIO.setup', 'GPIO.setup', (['(27)', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(27, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (7252, 7291), True, 'import RPi.GPIO as GPIO\n'), ((7296, 7381), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['(27)', 'GPIO.FALLING'], {'callback': 'GPIO27_callback', 'bouncetime': '(300)'}), '(27, GPIO.FALLING, callback=GPIO27_callback,\n bouncetime=300)\n', (7317, 7381), True, 'import RPi.GPIO as GPIO\n'), ((7396, 7410), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (7408, 7410), True, 'import RPi.GPIO as GPIO\n'), ((1803, 1821), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1819, 1821), False, 'import pygame\n'), ((3805, 3826), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (3824, 3826), False, 'import pygame\n'), ((3876, 3894), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3892, 3894), False, 'import pygame\n'), ((6817, 6834), 'time.sleep', 'time.sleep', (['speed'], {}), '(speed)\n', (6827, 6834), False, 'import time\n'), ((1880, 1901), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (1899, 1901), False, 'import pygame\n'), ((3965, 3987), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (3985, 3987), False, 'import pygame\n'), ((5661, 5683), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (5681, 5683), False, 'import pygame\n')] |
import os
import pandas as pd
####Loading The Batsmen Data#######
bb = {}
matches = 0
directory = "all_csv/"
for root,dirs,files in os.walk(directory):
for file in files:
if matches>5000:
break
match_bb = {}
#1 match = 1CSV | Each match handling
if file.endswith(".csv"):
matches += 1
f = open(directory+file,'r')
for i in f.readlines():
x = i.split(",")
if(x[0]=="version"):
continue
elif(x[0]=='info'):
if(x[1]=='gender' and x[2]=='female'):
break
elif(x[0]=='ball'):
batsmen = x[4]
six = 1 if x[7]=='6' else 0
four = 1 if x[7]=='4' else 0
wicket = 1 if len(x[9])>2 else 0
ball = 1
dot = 1 if x[7]=='0' and not wicket else 0
if(batsmen not in match_bb.keys()):
match_bb[batsmen] = [int(x[7]),four,six,wicket,1] #run,wicket,4's and 6's,balls faced
else:
match_bb[batsmen][0] += int(x[7]) #doesn't matter in case of wicket as it will be 0
match_bb[batsmen][1] += four
match_bb[batsmen][2] += six
match_bb[batsmen][3] = wicket #wicket fallen
match_bb[batsmen][4] += ball
f.close()
for i in match_bb.keys():
# Matches,Runs,4's,6's,Wickets,balls faced,50,100,0's , HighScore
if(i not in bb.keys()):
bb[i] = [1] + match_bb[i]
if(match_bb[i][0]>=50):
bb[i] = bb[i] + [1]
else:
bb[i] = bb[i] + [0]
if(match_bb[i][0]>=100):
bb[i] = bb[i] + [1]
else:
bb[i] = bb[i] + [0]
if(match_bb[i][0]==0):
bb[i] = bb[i] + [1]
else:
bb[i] = bb[i] + [0]
bb[i] = bb[i] + [match_bb[i][0]]
else:
bb[i][0] = bb[i][0] + 1
bb[i][1] = bb[i][1] + match_bb[i][0]
bb[i][2] = bb[i][2] + match_bb[i][1]
bb[i][3] = bb[i][3] + match_bb[i][2]
bb[i][4] = bb[i][4] + match_bb[i][3]
bb[i][5] = bb[i][5] + match_bb[i][4]
if(match_bb[i][0]>=50):
bb[i][6] = bb[i][6] + 1
if(match_bb[i][0]>=100):
bb[i][7] = bb[i][7] + 1
if(match_bb[i][0]==0):
bb[i][8] = bb[i][8] + 1
if(bb[i][9] < match_bb[i][0]):
bb[i][9] = match_bb[i][0]
d = {'Batsmen':[],'Matches':[],'Runs':[],'Wickets':[],'Balls':[],'0':[],'4':[],'6':[],'Strike Rate':[],'Average':[],'50':[],'100':[],'High Score':[]}
for i in bb.keys():
d['Batsmen'].append(i)
d['Matches'].append(bb[i][0])
d['Runs'].append(bb[i][1])
d['4'].append(bb[i][2])
d['6'].append(bb[i][3])
d['Wickets'].append(bb[i][4])
d['Balls'].append(bb[i][5])
d['50'].append(bb[i][6])
d['100'].append(bb[i][7])
d['0'].append(bb[i][8])
d['High Score'].append(bb[i][9])
d['Strike Rate'].append(float(bb[i][1])*100/int(bb[i][5]))
if(int(bb[i][4])==0):
d['Average'].append(float(bb[i][1])/int(bb[i][0]))
else:
d['Average'].append(float(bb[i][1])/int(bb[i][4]))
df = pd.DataFrame(d)
df.to_csv(r'/content/drive/My Drive/batsmen.csv', index=False)
####Loading The Bowler Data#######
bb = {}
matches = 0
directory = "all_csv/"
for root,dirs,files in os.walk(directory):
for file in files:
if matches>5000:
break
match_bb = {}
#1 match = 1CSV | Each match handling
if file.endswith(".csv"):
matches += 1
f = open(directory+file,'r')
for i in f.readlines():
x = i.split(",")
if(x[0]=="version"):
continue
elif(x[0]=='info'):
if(x[1]=='gender' and x[2]=='female'):
break
elif(x[0]=='ball'):
bowler = x[6]
six = 1 if x[7]=='6' else 0
four = 1 if x[7]=='4' else 0
wicket = 1 if len(x[9])>2 else 0
ball = 1
if(bowler not in match_bb.keys()):
match_bb[bowler] = [int(x[7]),four,six,wicket,1] #run,wicket,4's and 6's,balls faced
else:
match_bb[bowler][0] += int(x[7]) #doesn't matter in case of wicket as it will be 0
match_bb[bowler][1] += four
match_bb[bowler][2] += six
match_bb[bowler][3] += wicket #wicket fallen
match_bb[bowler][4] += ball
f.close()
for i in match_bb.keys():
# Matches,Runs,4's,6's,Wickets,balls faced,50,100,0's , HighScore
if(i not in bb.keys()):
bb[i] = [1] + match_bb[i]
bb[i][5] = int(bb[i][5]/6) + int(bb[i][5]%6)
else:
bb[i][0] = bb[i][0] + 1
bb[i][1] = bb[i][1] + match_bb[i][0]
bb[i][2] = bb[i][2] + match_bb[i][1]
bb[i][3] = bb[i][3] + match_bb[i][2]
bb[i][4] = bb[i][4] + match_bb[i][3]
bb[i][5] = bb[i][5] + match_bb[i][4]
d = {'Bowler':[],'Matches':[],'Overs':[],'Runs':[],'4':[],'6':[],'Wickets':[],'Economy':[],'Average':[],'Strike Rate':[]}
for i in bb.keys():
d['Bowler'].append(i)
d['Matches'].append(bb[i][0])
d['Runs'].append(bb[i][1])
d['4'].append(bb[i][2])
d['6'].append(bb[i][3])
d['Wickets'].append(bb[i][4])
d['Overs'].append(int(bb[i][5]/6)+int(bb[i][5]%6)/10)
if(int(bb[i][5]/6)==0):
d['Economy'].append(float(bb[i][1])/int(bb[i][5]%6))
else:
d['Economy'].append(float(bb[i][1])/int(bb[i][5]/6))
if(int(bb[i][4])==0):
d['Average'].append(float(bb[i][1]))
else:
d['Average'].append(float(bb[i][1])/int(bb[i][4]))
if(int(bb[i][4])==0):
d['Strike Rate'].append(float(bb[i][5]))
else:
d['Strike Rate'].append(float(bb[i][5])/int(bb[i][4]))
df = pd.DataFrame(d)
df.to_csv(r'bowler.csv', index=False)
| [
"pandas.DataFrame",
"os.walk"
] | [((165, 183), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (172, 183), False, 'import os\n'), ((3675, 3690), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (3687, 3690), True, 'import pandas as pd\n'), ((3889, 3907), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (3896, 3907), False, 'import os\n'), ((6741, 6756), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (6753, 6756), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose, Join
from scrapy.item import Item, Field
from collections import defaultdict
class Item(scrapy.Item):
business_name = scrapy.Field()
website = scrapy.Field()
phonenumber = scrapy.Field()
email = scrapy.Field()
location = scrapy.Field()
second_location = scrapy.Field()
# third_location = scrapy.Field()
# fourth_location = scrapy.Field()
visit_id = scrapy.Field()
visit_status = scrapy.Field()
| [
"scrapy.Field"
] | [((388, 402), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (400, 402), False, 'import scrapy\n'), ((417, 431), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (429, 431), False, 'import scrapy\n'), ((450, 464), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (462, 464), False, 'import scrapy\n'), ((477, 491), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (489, 491), False, 'import scrapy\n'), ((507, 521), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (519, 521), False, 'import scrapy\n'), ((544, 558), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (556, 558), False, 'import scrapy\n'), ((654, 668), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (666, 668), False, 'import scrapy\n'), ((688, 702), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (700, 702), False, 'import scrapy\n')] |
__author__ = 'rogerjiang'
'''
Purposes:
1. Visualization of training data
2. Evaluation of training data augmentation
'''
'''
Notes on the data files:
train_wkt_v4.csv: training labels with ImageId, ClassType, MultipolygonWKT
train_geoson_v3 (similar to train_wkt_v4.csv): training labels with ImageId
(folder name), ClassType (detailed, name of .geojson files), Multipolygon
(data of .geojson files, also contains detailed ClassType information)
grid_size.csv: sizes of all images with ImageId, 0<Xmax<1, -1<Ymin<0
(size of images, assuming origin (0,0) is at the upper left corner)
three_band: all 3-band images, in name of ImageId.tif
sixteen_band: all 16-band images, in name of ImageId_{A,M,P}.tif
sample_submission.csv: submission with ImageId, ClassType, MultipolygonWKT
If the order of dimension in all the image data is x-y, this order is switched
to y-x in grid_sizes and wkt data from train_wkt_v4.
-------------
'''
'''
Basically, the combination of ClassType and MultipolygonWKT gives the voxel-wise
class labels.
The 'three_band' and 'sixteen_band' folders are the input for training.
ImageId connects the class labels with the training data.
MultipolygonWKT is relative position in the figure and can be converted to pixel
coordinate with the grid_size (Xmax, Ymin)
There is slightly mismatch between the three_band and sixteen_band data due to
delay in measurements, such that they should be aligned.
'''
import shapely.wkt as wkt
import pandas as pd
import cv2
import numpy as np
import matplotlib.pyplot as plt
from descartes.patch import PolygonPatch
from matplotlib.patches import Patch
from matplotlib import cm
from shapely import affinity
from shapely.affinity import scale
from shapely.geometry import MultiPolygon, Polygon
from collections import defaultdict
import sys
import seaborn as sns
import os
CLASSES = {
1: 'Bldg',
2: 'Struct',
3: 'Road',
4: 'Track',
5: 'Trees',
6: 'Crops',
7: 'Fast H2O',
8: 'Slow H2O',
9: 'Truck',
10: 'Car',
}
COLORS = {
1: '0.7',
2: '0.4',
3: '#b35806',
4: '#dfc27d',
5: '#1b7837',
6: '#a6dba0',
7: '#74add1',
8: '#4575b4',
9: '#f46d43',
10: '#d73027',
}
# ZORDER defines the priority for plotting overlay of class labels.
ZORDER = {
1: 6,
2: 5,
3: 4,
4: 1,
5: 3,
6: 2,
7: 7,
8: 8,
9: 9,
10: 10,
}
def resize(im, shape_out):
return cv2.resize(im, (shape_out[1], shape_out[0]),
interpolation=cv2.INTER_CUBIC)
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.144])
def crop(img, crop_coord):
width, height = img.shape[0], img.shape[1]
x_lim = crop_coord[0].astype(np.int)
y_lim = crop_coord[1].astype(np.int)
assert 0 <= x_lim[0] < x_lim[1] <= width
assert 0 <= y_lim[0] < y_lim[1] <= height
return img[x_lim[0]: x_lim[1], y_lim[0]: y_lim[1]]
def affine_transform(img, warp_matrix, out_shape):
'''
Apply affine transformation using warp_matrix to img, and perform
interpolation as needed
:param img:
:param warp_matrix:
:param out_shape:
:return:
'''
new_img = cv2.warpAffine(img, warp_matrix, (out_shape[1], out_shape[0]),
flags = cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP,
borderMode= cv2.BORDER_REPLICATE)
# new_img[new_img == 0] = np.average(new_img)
return new_img
def get_polygon_list(image_id, class_type, train_wkt_v4):
'''
Load the wkt data (relative coordiantes of polygons) from csv file and
returns a list of polygons (in the format of shapely multipolygon)
:param image_id:
:param class_type:
:return:
'''
all_polygon = train_wkt_v4[train_wkt_v4.ImageId == image_id]
polygon = all_polygon[all_polygon.ClassType == class_type].MultipolygonWKT
# For empty polygon, polygon is a string of 'MULTIPOLYGON EMPTY'
# wkt.loads will automatically handle this and len(polygon_list) returns 0
# But polygon_list will never be None!
polygon_list = wkt.loads(polygon.values[0])
return polygon_list
def convert_coordinate_to_raster(coords, img_size, xymax):
'''
Converts the relative coordinates of contours into raster coordinates.
:param coords:
:param img_size:
:param xymax:
:return:
'''
xmax, ymax = xymax
width, height = img_size
coords[:, 0] *= (height + 1) / xmax
coords[:, 1] *= (width + 1) / ymax
coords = np.round(coords).astype(np.int32)
return coords
def generate_contours(polygon_list, img_size, xymax):
'''
Convert shapely MultipolygonWKT type of data (relative coordinate) into
list type of date for polygon raster coordinates
:param polygon_list:
:param img_size:
:param xymax:
:return:
'''
if len(polygon_list) == 0:
return [], []
to_ind = lambda x: np.array(list(x)).astype(np.float32)
perim_list = [convert_coordinate_to_raster(to_ind(poly.exterior.coords),
img_size, xymax)
for poly in polygon_list]
inter_list = [convert_coordinate_to_raster(
to_ind(poly.coords), img_size, xymax)
for poly_ex in polygon_list for poly in poly_ex.interiors]
return perim_list, inter_list
def generate_mask_from_contours(img_size, perim_list, inter_list, class_id = 1):
'''
Create pixel-wise mask from contours from polygon of raster coordinates
:param img_size:
:param perim_list:
:param inter_list:
:param class_id:
:return:
'''
mask = np.zeros(img_size, np.uint8)
if perim_list is None:
return mask
# mask should match the dimension of image
# however, cv2.fillpoly assumes the x and y axes are oppsite between mask and
# perim_list (inter_list)
cv2.fillPoly(mask, perim_list, class_id)
cv2.fillPoly(mask, inter_list, 0)
return mask
def plot_polygon(polygon_list, ax, scaler = None, alpha = 0.7):
'''
polygon_list is a dictionary of polygon list for all class types.
key is the class id, and value is the polygon list.
:param polygon_list:
:param ax:
:param scaler:
:param alpha:
:return:
'''
legend_list = []
for cl in CLASSES:
# Patch is a function in the matplotlib.patches module
legend_list.append(Patch(
color = COLORS[cl],
label = '{}: ({})'.format(CLASSES[cl], len(polygon_list[cl]))))
for polygon in polygon_list[cl]:
if scaler is not None:
# affinity is a function from shapely
polygon_rescale = affinity.scale(polygon, xfact = scaler[1],
yfact = scaler[0],
origin = [0., 0., 0.])
else:
polygon_rescale = polygon
# PolygonPatch is a function from descartes.patch module
# polygon_list is in relative coordinates and they are
# generated from get_polygon_list and are further
# converted to raster coordinates through scaler.
patch = PolygonPatch(polygon = polygon_rescale, color = COLORS[cl],
lw = 0, alpha = alpha, zorder = ZORDER[cl])
ax.add_patch(patch)
ax.autoscale_view()
ax.set_title('Objects')
ax.set_xticks([])
ax.set_yticks([])
return legend_list
def plot_image(img, ax, image_id, image_key, selected_channel = None):
'''
Plot an selected channels of img into ax.
:param img:
:param ax:
:param image_id:
:param image_key:
:param selected_channel:
:return:
'''
title_suffix = ''
if selected_channel is not None:
img = img[:, :, selected_channel]
title_suffix = '(' + ','.join(repr(i) for i in selected_channel) + ')'
ax.imshow(img)
ax.set_title(image_id + '-' + image_key + title_suffix)
ax.set_xlabel(img.shape[0])
ax.set_ylabel(img.shape[1])
ax.set_xticks([])
ax.set_yticks([])
def plot_overlay(img, ax, image_id, image_key, polygon_list, scaler = [1., 1.],
x_range = None, y_range = None, label = None, alpha = 1.0,
rgb = False):
'''
Plot image with polygon overlays
:param img:
:param ax:
:param image_id:
:param image_key:
:param polygon_list:
:param scaler:
:return:
'''
# cm is a function from matplotlib
if not x_range:
x_range = [0, img.shape[0]]
if not y_range:
y_range = [0, img.shape[1]]
if rgb:
ax.imshow(scale_percentile(img), vmax=1., vmin=0.)
else:
ax.imshow(scale_percentile(rgb2gray(img)),
cmap = cm.gray, vmax = 1., vmin = 0.)
ax.set_xlabel(x_range[1] - x_range[0])
ax.set_ylabel(y_range[1] - y_range[0])
legend = plot_polygon(polygon_list, ax, scaler, alpha = alpha)
ax.set_title(image_id + '-' + image_key + '-Overlay')
return legend
def scale_percentile(img):
'''
Scale an image's 1 - 99 percentiles into 0 - 1 for display
:param img:
:return:
'''
orig_shape = img.shape
if len(orig_shape) == 3:
img = np.reshape(img,
[orig_shape[0] * orig_shape[1], orig_shape[2]]
).astype(np.float32)
elif len(orig_shape) == 2:
img = np.reshape(img, [orig_shape[0] * orig_shape[1]]).astype(np.float32)
mins = np.percentile(img, 1, axis = 0)
maxs = np.percentile(img, 99, axis = 0) - mins
img = (img - mins) / maxs
img.clip(0., 1.)
img = np.reshape(img, orig_shape)
return img
def get_image_area(image_id):
'''
Calculate the area of an image
:param image_id:
:return:
'''
xmax = grid_sizes[grid_sizes.ImageId == image_id].Xmax.values[0]
ymin = grid_sizes[grid_sizes.ImageId == image_id].Ymin.values[0]
return abs(xmax * ymin)
def image_stat(image_id):
'''
Return the statistics ofd an image as a pd dataframe
:param image_id:
:return:
'''
counts, total_area, mean_area, std_area = {}, {}, {}, {}
img_area = get_image_area(image_id)
for cl in CLASSES:
polygon_list = get_polygon_list(image_id, cl)
counts[cl] = len(polygon_list)
if len(polygon_list) > 0:
total_area[cl] = np.sum([poly.area for poly in polygon_list])\
/ img_area * 100.
mean_area[cl] = np.mean([poly.area for poly in polygon_list])\
/ img_area * 100.
std_area[cl] = np.std([poly.area for poly in polygon_list])\
/ img_area * 100.
return pd.DataFrame({'Class': CLASSES, 'Counts': counts,
'TotalArea': total_area, 'MeanArea': mean_area,
'STDArea': std_area})
def collect_stats():
'''
Collect the area statistics for all images and concatenate them
:return:
'''
stats = []
total_no = len(all_train_names) - 1
for image_no, image_id in enumerate(all_train_names):
stat = image_stat(image_id)
stat['ImageId'] = image_id
stats.append(stat)
sys.stdout.write('\rCollecting class stats [{}{}] {}%'.\
format('=' * image_no,
' ' * (total_no - image_no),
100 * image_no / total_no))
sys.stdout.flush()
sys.stdout.write('\n')
return pd.concat(stats)
def calculate_class_weights():
'''
:return: class-wise true-label-area / false-label-area as a dictionary
'''
df = collect_stats()
df = df.fillna(0)
df = df.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
df = df.sum(axis=1)
df = df / (2500. - df)
return df.to_dict()
def plot_stats(value, title):
'''
Plot 2D grid plot of statistics of MeanArea, Counts, TotalArea, STDArea.
:param value:
:param title:
:return:
'''
stats = collect_stats()
pvt = stats.pivot(index='Class', columns='ImageId', values = value)
pvt.fillna(0., inplace = True)
fig, ax = plt.subplots(figsize = (10, 4))
im = ax.imshow(pvt, interpolation = 'nearest', cmap = plt.cm.plasma,
extent = [0 ,25, 10, 0])
ax.set_xlabel('Image')
ax.set_ylabel('Class Type')
ax.set_xticks(np.arange(0.5, 25.4, 1))
ax.set_yticks(np.arange(0.5, 10.4, 1))
ax.set_xticklabels(np.arange(1, 26))
ax.set_yticklabels(pvt.index)
ax.set_title(title)
fig.colorbar(im)
def plot_bar_stats():
stats = collect_stats()
pvt = stats.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
perc_area = np.cumsum(pvt, axis = 0)
class_r = {}
sns.set_style('white')
sns.set_context({'figure.figsize': (12, 8)})
for cl in CLASSES: class_r[CLASSES[cl]] = cl
for cl in np.arange(1, 11):
class_name = perc_area.index[-cl]
class_id = class_r[class_name]
ax = sns.barplot(x = perc_area.columns, y = perc_area.loc[class_name],
color = COLORS[class_id], label = class_name)
ax.legend(loc = 2)
sns.despine(left = True)
ax.set_xlabel('Image ID')
ax.set_ylabel('Class Type')
ax.set_xticklabels(perc_area.columns, rotation = -60)
def jaccard_index(mask_1, mask_2):
'''
Calculate jaccard index between two masks
:param mask_1:
:param mask_2:
:return:
'''
assert len(mask_1.shape) == len(mask_2.shape) == 2
assert 0 <= np.amax(mask_1) <=1
assert 0 <= np.amax(mask_2) <=1
intersection = np.sum(mask_1.astype(np.float32) * mask_2.astype(np.float32))
union = np.sum(mask_1.astype(np.float32) + mask_2.astype(np.float32)) - \
intersection
if union == 0:
return 1.
return intersection / union
def polygon_jaccard(final_polygons, train_polygons):
'''
Calcualte the jaccard index of two polygons, based on data type of
shapely.geometry.MultiPolygon
:param final_polygons:
:param train_polygons:
:return:
'''
return final_polygons.intersection(train_polygons).area /\
final_polygons.union(train_polygons).area
| [
"shapely.wkt.loads",
"descartes.patch.PolygonPatch",
"seaborn.set_style",
"numpy.percentile",
"shapely.affinity.scale",
"numpy.arange",
"numpy.mean",
"numpy.reshape",
"seaborn.despine",
"numpy.dot",
"pandas.DataFrame",
"sys.stdout.flush",
"numpy.round",
"cv2.fillPoly",
"cv2.warpAffine",
"numpy.amax",
"seaborn.set_context",
"numpy.std",
"cv2.resize",
"numpy.sum",
"numpy.zeros",
"numpy.cumsum",
"seaborn.barplot",
"pandas.concat",
"matplotlib.pyplot.subplots",
"sys.stdout.write"
] | [((2422, 2497), 'cv2.resize', 'cv2.resize', (['im', '(shape_out[1], shape_out[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(im, (shape_out[1], shape_out[0]), interpolation=cv2.INTER_CUBIC)\n', (2432, 2497), False, 'import cv2\n'), ((2552, 2595), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.299, 0.587, 0.144]'], {}), '(rgb[..., :3], [0.299, 0.587, 0.144])\n', (2558, 2595), True, 'import numpy as np\n'), ((3156, 3303), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'warp_matrix', '(out_shape[1], out_shape[0])'], {'flags': '(cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)', 'borderMode': 'cv2.BORDER_REPLICATE'}), '(img, warp_matrix, (out_shape[1], out_shape[0]), flags=cv2.\n INTER_LINEAR + cv2.WARP_INVERSE_MAP, borderMode=cv2.BORDER_REPLICATE)\n', (3170, 3303), False, 'import cv2\n'), ((4062, 4090), 'shapely.wkt.loads', 'wkt.loads', (['polygon.values[0]'], {}), '(polygon.values[0])\n', (4071, 4090), True, 'import shapely.wkt as wkt\n'), ((5598, 5626), 'numpy.zeros', 'np.zeros', (['img_size', 'np.uint8'], {}), '(img_size, np.uint8)\n', (5606, 5626), True, 'import numpy as np\n'), ((5838, 5878), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'perim_list', 'class_id'], {}), '(mask, perim_list, class_id)\n', (5850, 5878), False, 'import cv2\n'), ((5883, 5916), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'inter_list', '(0)'], {}), '(mask, inter_list, 0)\n', (5895, 5916), False, 'import cv2\n'), ((9481, 9510), 'numpy.percentile', 'np.percentile', (['img', '(1)'], {'axis': '(0)'}), '(img, 1, axis=0)\n', (9494, 9510), True, 'import numpy as np\n'), ((9627, 9654), 'numpy.reshape', 'np.reshape', (['img', 'orig_shape'], {}), '(img, orig_shape)\n', (9637, 9654), True, 'import numpy as np\n'), ((10715, 10838), 'pandas.DataFrame', 'pd.DataFrame', (["{'Class': CLASSES, 'Counts': counts, 'TotalArea': total_area, 'MeanArea':\n mean_area, 'STDArea': std_area}"], {}), "({'Class': CLASSES, 'Counts': counts, 'TotalArea': total_area,\n 'MeanArea': mean_area, 'STDArea': std_area})\n", (10727, 10838), True, 'import pandas as pd\n'), ((11482, 11504), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (11498, 11504), False, 'import sys\n'), ((11516, 11532), 'pandas.concat', 'pd.concat', (['stats'], {}), '(stats)\n', (11525, 11532), True, 'import pandas as pd\n'), ((12180, 12209), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (12192, 12209), True, 'import matplotlib.pyplot as plt\n'), ((12745, 12767), 'numpy.cumsum', 'np.cumsum', (['pvt'], {'axis': '(0)'}), '(pvt, axis=0)\n', (12754, 12767), True, 'import numpy as np\n'), ((12791, 12813), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (12804, 12813), True, 'import seaborn as sns\n'), ((12818, 12862), 'seaborn.set_context', 'sns.set_context', (["{'figure.figsize': (12, 8)}"], {}), "({'figure.figsize': (12, 8)})\n", (12833, 12862), True, 'import seaborn as sns\n'), ((12928, 12944), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (12937, 12944), True, 'import numpy as np\n'), ((13204, 13226), 'seaborn.despine', 'sns.despine', ([], {'left': '(True)'}), '(left=True)\n', (13215, 13226), True, 'import seaborn as sns\n'), ((9524, 9554), 'numpy.percentile', 'np.percentile', (['img', '(99)'], {'axis': '(0)'}), '(img, 99, axis=0)\n', (9537, 9554), True, 'import numpy as np\n'), ((11459, 11477), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11475, 11477), False, 'import sys\n'), ((12406, 12429), 'numpy.arange', 'np.arange', (['(0.5)', '(25.4)', '(1)'], {}), '(0.5, 25.4, 1)\n', (12415, 12429), True, 'import numpy as np\n'), ((12449, 12472), 'numpy.arange', 'np.arange', (['(0.5)', '(10.4)', '(1)'], {}), '(0.5, 10.4, 1)\n', (12458, 12472), True, 'import numpy as np\n'), ((12497, 12513), 'numpy.arange', 'np.arange', (['(1)', '(26)'], {}), '(1, 26)\n', (12506, 12513), True, 'import numpy as np\n'), ((13040, 13148), 'seaborn.barplot', 'sns.barplot', ([], {'x': 'perc_area.columns', 'y': 'perc_area.loc[class_name]', 'color': 'COLORS[class_id]', 'label': 'class_name'}), '(x=perc_area.columns, y=perc_area.loc[class_name], color=COLORS[\n class_id], label=class_name)\n', (13051, 13148), True, 'import seaborn as sns\n'), ((13570, 13585), 'numpy.amax', 'np.amax', (['mask_1'], {}), '(mask_1)\n', (13577, 13585), True, 'import numpy as np\n'), ((13606, 13621), 'numpy.amax', 'np.amax', (['mask_2'], {}), '(mask_2)\n', (13613, 13621), True, 'import numpy as np\n'), ((4484, 4500), 'numpy.round', 'np.round', (['coords'], {}), '(coords)\n', (4492, 4500), True, 'import numpy as np\n'), ((7169, 7266), 'descartes.patch.PolygonPatch', 'PolygonPatch', ([], {'polygon': 'polygon_rescale', 'color': 'COLORS[cl]', 'lw': '(0)', 'alpha': 'alpha', 'zorder': 'ZORDER[cl]'}), '(polygon=polygon_rescale, color=COLORS[cl], lw=0, alpha=alpha,\n zorder=ZORDER[cl])\n', (7181, 7266), False, 'from descartes.patch import PolygonPatch\n'), ((6646, 6731), 'shapely.affinity.scale', 'affinity.scale', (['polygon'], {'xfact': 'scaler[1]', 'yfact': 'scaler[0]', 'origin': '[0.0, 0.0, 0.0]'}), '(polygon, xfact=scaler[1], yfact=scaler[0], origin=[0.0, 0.0,\n 0.0])\n', (6660, 6731), False, 'from shapely import affinity\n'), ((9223, 9286), 'numpy.reshape', 'np.reshape', (['img', '[orig_shape[0] * orig_shape[1], orig_shape[2]]'], {}), '(img, [orig_shape[0] * orig_shape[1], orig_shape[2]])\n', (9233, 9286), True, 'import numpy as np\n'), ((9402, 9450), 'numpy.reshape', 'np.reshape', (['img', '[orig_shape[0] * orig_shape[1]]'], {}), '(img, [orig_shape[0] * orig_shape[1]])\n', (9412, 9450), True, 'import numpy as np\n'), ((10371, 10415), 'numpy.sum', 'np.sum', (['[poly.area for poly in polygon_list]'], {}), '([poly.area for poly in polygon_list])\n', (10377, 10415), True, 'import numpy as np\n'), ((10492, 10537), 'numpy.mean', 'np.mean', (['[poly.area for poly in polygon_list]'], {}), '([poly.area for poly in polygon_list])\n', (10499, 10537), True, 'import numpy as np\n'), ((10612, 10656), 'numpy.std', 'np.std', (['[poly.area for poly in polygon_list]'], {}), '([poly.area for poly in polygon_list])\n', (10618, 10656), True, 'import numpy as np\n')] |
#coding=utf-8
'''
filename:triradian_radian.py
chap:3
subject:9 10.4
user input a,b,c
conditions:side1,a,b,c of sides =3,7,9
solution:radian A,B,C
COS_C = (a^2+b^2-c^2 )/(2*a*b)
'''
import math
#sides = (3,7,9)
sides = tuple(eval(input("""
Please enter the 3 sides of triangle
with the format of 'a,b,c': """)))
#print(sides)
results = []
for i in range(3):
a = sides[(i+0)%3]
b = sides[(i+1)%3]
c = sides[(i+2)%3]
cos_a = float(c**2 + b**2 - a**2)/float(2*c*b)
C = math.acos(cos_a)
#print(a,b,c,cos_a,C)
results.append(C)
message = '''The sides of triradian are a,b,c={0},{1},{2}
then It's radians are A,B,C={3},{4},{5}
'''.format(*(list(sides)+results) )
print(message)
| [
"math.acos"
] | [((518, 534), 'math.acos', 'math.acos', (['cos_a'], {}), '(cos_a)\n', (527, 534), False, 'import math\n')] |
#!/usr/bin/env python
import sys
from cv2 import cv
slider_pos = 0
def update_slider(pos):
global slider_pos
slider_pos = pos
def do_pyrdown(in_img, filter=cv.CV_GAUSSIAN_5x5):
# verify image is halvable
assert(in_img.width % 2 == 0 and in_img.height % 2 == 0)
out_img = cv.CreateImage((in_img.width/2, in_img.height/2), in_img.depth, in_img.nChannels)
cv.PyrDown(in_img, out_img, filter)
return out_img
def do_capture():
global slider_pos
# Set up the camera capture
capture = cv.CreateCameraCapture(0)
# Create a window
cv.NamedWindow("Exercise5", cv.CV_WINDOW_AUTOSIZE)
# Set up the trackbar
slider_pos = 0
cv.CreateTrackbar("Reduction", "Exercise5", slider_pos, 4, update_slider)
while True:
# Capture a frame
frame = cv.QueryFrame(capture)
if frame == None:
continue
# Make sure it is divisible by up to 8
out = cv.CreateImage(((frame.width/8)*8, (frame.height/8)*8), frame.depth, frame.nChannels)
cv.Resize(frame, out)
# Reduce the image by pyramid depending on the slider position
if slider_pos != 0:
for i in range(slider_pos):
out = do_pyrdown(out)
cv.ShowImage("Exercise5", out)
# Check for esc key
c = cv.WaitKey(33)
if c == 27:
return(0)
if __name__ == "__main__":
do_capture()
sys.exit(0)
| [
"cv2.cv.PyrDown",
"cv2.cv.Resize",
"cv2.cv.CreateCameraCapture",
"cv2.cv.NamedWindow",
"cv2.cv.CreateTrackbar",
"cv2.cv.WaitKey",
"cv2.cv.ShowImage",
"sys.exit",
"cv2.cv.CreateImage",
"cv2.cv.QueryFrame"
] | [((286, 376), 'cv2.cv.CreateImage', 'cv.CreateImage', (['(in_img.width / 2, in_img.height / 2)', 'in_img.depth', 'in_img.nChannels'], {}), '((in_img.width / 2, in_img.height / 2), in_img.depth, in_img.\n nChannels)\n', (300, 376), False, 'from cv2 import cv\n'), ((370, 405), 'cv2.cv.PyrDown', 'cv.PyrDown', (['in_img', 'out_img', 'filter'], {}), '(in_img, out_img, filter)\n', (380, 405), False, 'from cv2 import cv\n'), ((505, 530), 'cv2.cv.CreateCameraCapture', 'cv.CreateCameraCapture', (['(0)'], {}), '(0)\n', (527, 530), False, 'from cv2 import cv\n'), ((554, 604), 'cv2.cv.NamedWindow', 'cv.NamedWindow', (['"""Exercise5"""', 'cv.CV_WINDOW_AUTOSIZE'], {}), "('Exercise5', cv.CV_WINDOW_AUTOSIZE)\n", (568, 604), False, 'from cv2 import cv\n'), ((649, 722), 'cv2.cv.CreateTrackbar', 'cv.CreateTrackbar', (['"""Reduction"""', '"""Exercise5"""', 'slider_pos', '(4)', 'update_slider'], {}), "('Reduction', 'Exercise5', slider_pos, 4, update_slider)\n", (666, 722), False, 'from cv2 import cv\n'), ((1314, 1325), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1322, 1325), False, 'import sys\n'), ((772, 794), 'cv2.cv.QueryFrame', 'cv.QueryFrame', (['capture'], {}), '(capture)\n', (785, 794), False, 'from cv2 import cv\n'), ((886, 979), 'cv2.cv.CreateImage', 'cv.CreateImage', (['(frame.width / 8 * 8, frame.height / 8 * 8)', 'frame.depth', 'frame.nChannels'], {}), '((frame.width / 8 * 8, frame.height / 8 * 8), frame.depth,\n frame.nChannels)\n', (900, 979), False, 'from cv2 import cv\n'), ((976, 997), 'cv2.cv.Resize', 'cv.Resize', (['frame', 'out'], {}), '(frame, out)\n', (985, 997), False, 'from cv2 import cv\n'), ((1158, 1188), 'cv2.cv.ShowImage', 'cv.ShowImage', (['"""Exercise5"""', 'out'], {}), "('Exercise5', out)\n", (1170, 1188), False, 'from cv2 import cv\n'), ((1222, 1236), 'cv2.cv.WaitKey', 'cv.WaitKey', (['(33)'], {}), '(33)\n', (1232, 1236), False, 'from cv2 import cv\n')] |
from flask import Flask, render_template, request
from os import getenv
from matplotlib.figure import Figure
from .models import DB, tracks, numbers
from .predict import get_recommendations
from io import BytesIO
import matplotlib.pyplot as plt
import seaborn as sns
import base64
import textwrap
import numpy as np
import scipy.stats as sps
def create_app():
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = getenv("DATABASE_URI")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
DB.init_app(app)
@app.route('/')
def root():
return render_template('index.html')
@app.route('/didyoumean', methods=['POST', 'GET'])
def didyoumean():
song_name = request.values['song_name']
suggestions = tracks.query.filter(tracks.name == song_name).all()
pseudo_suggestion_list = []
sugg_form = []
for track in suggestions:
if ([track.name, track.artists, track.release_year]) not in pseudo_suggestion_list:
pseudo_suggestion_list.append([track.name, track.artists, track.release_year])
sugg_form.append([f'{track.name} by {track.artists}({track.release_year})'.replace('_', ' '), track.id])
if suggestions == []:
sugg_form = [["We did not find any tracks that match your search."]]
return render_template('didyoumean.html', suggestions=sugg_form)
@app.route('/recommendations', methods=['POST', 'GET'])
@app.route('/recommendations/<id>', methods=['POST', 'GET'])
def recommendations(id=None):
if id is None:
song_name = request.values['song_name']
track = tracks.query.filter(tracks.name == song_name).one()
song_name = f'{track.name}'
by_statement = f'by {track.artists}({track.release_year})'.replace('_', ' ')
else:
song_id = id
desired_track = tracks.query.filter(tracks.id == song_id).one()
song_name = f'{desired_track.name}'
by_statement = f'by {desired_track.artists}({desired_track.release_year})'.replace('_', ' ')
track = desired_track
# Get the distances and recommended songs' id's from the model
dist, rec = get_recommendations(track)
# print(rec)
# Populate a list with recommended songs' names via query
rec_names = []
names_for_plot = []
popularity = []
duration_ms = []
danceability = []
energy = []
speechiness = []
acousticness = []
liveness = []
valence = []
loudness = []
for r in rec:
r = int(r)
rec_track = tracks.query.filter(tracks.id == r).one()
rec_attr_vect = numbers.query.filter(numbers.id == r).one()
names_for_plot.append(rec_track.name)
rec_names.append(f'{rec_track.name} by {rec_track.artists}({rec_track.release_year})'.replace('_', ' '))
# Create feature vectors
popularity.append(rec_attr_vect.popularity)
duration_ms.append(rec_attr_vect.duration_ms)
danceability.append(rec_attr_vect.danceability)
energy.append(rec_attr_vect.energy)
speechiness.append(rec_attr_vect.speechiness)
acousticness.append(rec_attr_vect.acousticness)
liveness.append(rec_attr_vect.liveness)
valence.append(rec_attr_vect.valence)
loudness.append(rec_attr_vect.loudness)
# Create feature array
feature_array = [popularity, duration_ms,
danceability, energy,
speechiness, acousticness,
liveness, valence, loudness]
feature_names = ['Popularity', 'Length',
'Boogie Factor', 'Energy',
'Speechiness', 'Acousticness',
'Liveness', 'Upbeatness', 'Volume']
feature_plots = dist_plot(feature_array, names_for_plot)
plot_dict = {}
for i in range(len(feature_names)):
plot_dict[feature_names[i]] = feature_plots[i]
# print(type(feature_plots))
# print(plot_dict)
# print(feature_array)
# Plot the similarities relative to the primary query
track_plot = plot(rec_names, dist)
zip_rec = zip(rec, rec_names)
return render_template('song.html', song_name=song_name, by_statement=by_statement,
song=track, zip_rec=zip_rec, track_plot=track_plot,
feature_plots=feature_plots, plot_dict=plot_dict,
feature_names=feature_names)
return app
def plot(rec_names, distances):
print(rec_names)
print(distances)
fig, ax = plt.subplots()
percentages = [100*(1 - x) for x in distances]
min_x = min(percentages)-1
max_x = max(percentages)+1
x_range = np.linspace(min_x, max_x, 5)
plt.barh([textwrap.fill(name, 15) for name in rec_names], percentages,
color=['tab:purple', 'tab:blue', 'tab:orange', 'tab:green', 'tab:red'])
ax.set_xlim(left=min_x, right=max_x)
plt.gca().invert_yaxis()
plt.xticks(x_range, [str(round(x))+"%" for x in x_range])
plt.xlabel("Percent Similarity of Songs")
plt.tight_layout()
tmpfile = BytesIO()
img = fig.savefig(tmpfile, format='png')
encoded = base64.b64encode(tmpfile.getvalue()).decode('utf-8')
return encoded
def dist_plot(feature_array, names):
# define the normal distribution and PDF
dist = sps.norm(loc=50, scale=10)
x = np.linspace(0.1, 100)
y = dist.pdf(np.linspace(dist.ppf(.001), dist.ppf(.999)))
encoded = []
for i in feature_array:
rec_attr = i
# calculate PPFs
ppfs = {}
for ppf in rec_attr:
p = dist.ppf(ppf)
ppfs.update({ppf * 100: p})
# plot results
fig, ax = plt.subplots(figsize=(10, 4))
ax.plot(x, y, color='k')
for i, ppf in enumerate(ppfs):
ax.axvline(ppfs[ppf], color=f'C{i}', label=f'{ppf:.0f}th: {ppfs[ppf]:.1f}')
# Remove incorrect y axis ticks
axes = plt.gca()
axes.get_yaxis().set_visible(False)
ax.legend(labels=names, loc=9, bbox_to_anchor=(0.5, 1.45))
plt.tight_layout(pad=0.1)
tmpfile = BytesIO()
img = fig.savefig(tmpfile, format='png')
encoded.append(base64.b64encode(tmpfile.getvalue()).decode('utf-8'))
return encoded
| [
"flask.render_template",
"os.getenv",
"flask.Flask",
"scipy.stats.norm",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"io.BytesIO",
"numpy.linspace",
"textwrap.fill",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
] | [((371, 386), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (376, 386), False, 'from flask import Flask, render_template, request\n'), ((431, 453), 'os.getenv', 'getenv', (['"""DATABASE_URI"""'], {}), "('DATABASE_URI')\n", (437, 453), False, 'from os import getenv\n'), ((4829, 4843), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4841, 4843), True, 'import matplotlib.pyplot as plt\n'), ((4971, 4999), 'numpy.linspace', 'np.linspace', (['min_x', 'max_x', '(5)'], {}), '(min_x, max_x, 5)\n', (4982, 4999), True, 'import numpy as np\n'), ((5287, 5328), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Percent Similarity of Songs"""'], {}), "('Percent Similarity of Songs')\n", (5297, 5328), True, 'import matplotlib.pyplot as plt\n'), ((5333, 5351), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5349, 5351), True, 'import matplotlib.pyplot as plt\n'), ((5367, 5376), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (5374, 5376), False, 'from io import BytesIO\n'), ((5603, 5629), 'scipy.stats.norm', 'sps.norm', ([], {'loc': '(50)', 'scale': '(10)'}), '(loc=50, scale=10)\n', (5611, 5629), True, 'import scipy.stats as sps\n'), ((5638, 5659), 'numpy.linspace', 'np.linspace', (['(0.1)', '(100)'], {}), '(0.1, 100)\n', (5649, 5659), True, 'import numpy as np\n'), ((585, 614), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (600, 614), False, 'from flask import Flask, render_template, request\n'), ((1350, 1407), 'flask.render_template', 'render_template', (['"""didyoumean.html"""'], {'suggestions': 'sugg_form'}), "('didyoumean.html', suggestions=sugg_form)\n", (1365, 1407), False, 'from flask import Flask, render_template, request\n'), ((4422, 4638), 'flask.render_template', 'render_template', (['"""song.html"""'], {'song_name': 'song_name', 'by_statement': 'by_statement', 'song': 'track', 'zip_rec': 'zip_rec', 'track_plot': 'track_plot', 'feature_plots': 'feature_plots', 'plot_dict': 'plot_dict', 'feature_names': 'feature_names'}), "('song.html', song_name=song_name, by_statement=by_statement,\n song=track, zip_rec=zip_rec, track_plot=track_plot, feature_plots=\n feature_plots, plot_dict=plot_dict, feature_names=feature_names)\n", (4437, 4638), False, 'from flask import Flask, render_template, request\n'), ((5981, 6010), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (5993, 6010), True, 'import matplotlib.pyplot as plt\n'), ((6228, 6237), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6235, 6237), True, 'import matplotlib.pyplot as plt\n'), ((6358, 6383), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.1)'}), '(pad=0.1)\n', (6374, 6383), True, 'import matplotlib.pyplot as plt\n'), ((6403, 6412), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6410, 6412), False, 'from io import BytesIO\n'), ((5014, 5037), 'textwrap.fill', 'textwrap.fill', (['name', '(15)'], {}), '(name, 15)\n', (5027, 5037), False, 'import textwrap\n'), ((5196, 5205), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5203, 5205), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
from nose.tools import raises
def Oversampling_2_test():
import numpy as np
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoubleOversampling6points5order_2Filter
from numpy.testing import assert_almost_equal
ref = np.sin(np.arange(2000, dtype=np.float64)[None,:] * 1000 * 2 * np.pi / 96000)
input = np.ascontiguousarray(ref[:, ::2])
output = np.ascontiguousarray(np.zeros(2000, dtype=np.float64)[None,:])
inputfilter = DoubleInPointerFilter(input, False)
oversamplingfilter = DoubleOversampling6points5order_2Filter()
outputfilter = DoubleOutPointerFilter(output, False)
inputfilter.output_sampling_rate = 48000
oversamplingfilter.input_sampling_rate = 48000
oversamplingfilter.output_sampling_rate = 96000
outputfilter.input_sampling_rate = 96000
oversamplingfilter.set_input_port(0, inputfilter, 0)
outputfilter.set_input_port(0, oversamplingfilter, 0)
outputfilter.process(2000)
assert_almost_equal(ref[:,994:-6], output[:,1000:], decimal=1)
def Oversampling_4_test():
import numpy as np
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoubleOversampling6points5order_4Filter
from numpy.testing import assert_almost_equal
ref = np.sin(np.arange(4000, dtype=np.float64)[None,:] * 1000 * 2 * np.pi / 192000)
input = np.ascontiguousarray(ref[:, ::4])
output = np.ascontiguousarray(np.zeros(4000, dtype=np.float64)[None,:])
inputfilter = DoubleInPointerFilter(input, False)
oversamplingfilter = DoubleOversampling6points5order_4Filter()
outputfilter = DoubleOutPointerFilter(output, False)
inputfilter.output_sampling_rate = 48000
oversamplingfilter.input_sampling_rate = 48000
oversamplingfilter.output_sampling_rate = 192000
outputfilter.input_sampling_rate = 192000
oversamplingfilter.set_input_port(0, inputfilter, 0)
outputfilter.set_input_port(0, oversamplingfilter, 0)
outputfilter.process(4000)
assert_almost_equal(ref[:,988:-12], output[:,1000:], decimal=1)
def Oversampling_8_test():
import numpy as np
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoubleOversampling6points5order_8Filter
from numpy.testing import assert_almost_equal
ref = np.sin(np.arange(8000, dtype=np.float64)[None,:] * 1000 * 2 * np.pi / 384000)
input = np.ascontiguousarray(ref[:, ::8])
output = np.ascontiguousarray(np.zeros(8000, dtype=np.float64)[None,:])
inputfilter = DoubleInPointerFilter(input, False)
oversamplingfilter = DoubleOversampling6points5order_8Filter()
outputfilter = DoubleOutPointerFilter(output, False)
inputfilter.output_sampling_rate = 48000
oversamplingfilter.input_sampling_rate = 48000
oversamplingfilter.output_sampling_rate = 384000
outputfilter.input_sampling_rate = 384000
oversamplingfilter.set_input_port(0, inputfilter, 0)
outputfilter.set_input_port(0, oversamplingfilter, 0)
outputfilter.process(8000)
assert_almost_equal(ref[:,976:-24], output[:,1000:], decimal=1)
def Oversampling_16_test():
import numpy as np
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoubleOversampling6points5order_16Filter
from numpy.testing import assert_almost_equal
ref = np.sin(np.arange(16000, dtype=np.float64)[None,:] * 1000 * 2 * np.pi / 768000)
input = np.ascontiguousarray(ref[:, ::16])
output = np.ascontiguousarray(np.zeros(16000, dtype=np.float64)[None,:])
inputfilter = DoubleInPointerFilter(input, False)
oversamplingfilter = DoubleOversampling6points5order_16Filter()
outputfilter = DoubleOutPointerFilter(output, False)
inputfilter.output_sampling_rate = 48000
oversamplingfilter.input_sampling_rate = 48000
oversamplingfilter.output_sampling_rate = 768000
outputfilter.input_sampling_rate = 768000
oversamplingfilter.set_input_port(0, inputfilter, 0)
outputfilter.set_input_port(0, oversamplingfilter, 0)
outputfilter.process(16000)
assert_almost_equal(ref[:,952:-48], output[:,1000:], decimal=1)
def Oversampling_32_test():
import numpy as np
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoubleOversampling6points5order_32Filter
from numpy.testing import assert_almost_equal
ref = np.sin(np.arange(32000, dtype=np.float64)[None,:] * 1000 * 2 * np.pi / (2*768000))
input = np.ascontiguousarray(ref[:, ::32])
output = np.ascontiguousarray(np.zeros(32000, dtype=np.float64)[None,:])
inputfilter = DoubleInPointerFilter(input, False)
oversamplingfilter = DoubleOversampling6points5order_32Filter()
outputfilter = DoubleOutPointerFilter(output, False)
inputfilter.output_sampling_rate = 48000
oversamplingfilter.input_sampling_rate = 48000
oversamplingfilter.output_sampling_rate = 32*48000
outputfilter.input_sampling_rate = 32*48000
oversamplingfilter.set_input_port(0, inputfilter, 0)
outputfilter.set_input_port(0, oversamplingfilter, 0)
outputfilter.process(32000)
assert_almost_equal(ref[:,904:-96], output[:,1000:], decimal=1)
sample_rate = 96000
def filter(input):
import numpy as np
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoubleOversampling6points5order_32Filter
output = np.zeros((1, input.shape[0] * 32), dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.input_sampling_rate = sample_rate
overfilter = DoubleOversampling6points5order_32Filter()
overfilter.input_sampling_rate = sample_rate
overfilter.output_sampling_rate = sample_rate * 32
overfilter.set_input_port(0, infilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.input_sampling_rate = sample_rate * 32
outfilter.set_input_port(0, overfilter, 0)
for i in range(10):
outfilter.process(input.shape[0] * 32 // 10)
return output
def OversamplingFilter_test():
import numpy as np
samples = 100
t = np.arange(samples, dtype=np.float64) / sample_rate
d = np.sin(t * 2 * np.pi * 100)
output = filter(d)
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
samples = 100
t = np.arange(samples, dtype=np.float64) / sample_rate
d = np.sin(t * 2 * np.pi * 100)
t2 = np.arange(samples * 32, dtype=np.float64) / (sample_rate * 32)
d2 = filter(d)
plt.plot(t, d, label="input")
plt.plot(t2 - 3. / sample_rate, d2, label="output")
plt.gcf().suptitle("Oversampling")
plt.legend()
plt.show()
| [
"ATK.Core.DoubleOutPointerFilter",
"ATK.Tools.DoubleOversampling6points5order_16Filter",
"numpy.arange",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"numpy.ascontiguousarray",
"ATK.Tools.DoubleOversampling6points5order_8Filter",
"numpy.testing.assert_almost_equal",
"ATK.Tools.DoubleOversampling6points5order_4Filter",
"numpy.zeros",
"ATK.Core.DoubleInPointerFilter",
"numpy.sin",
"ATK.Tools.DoubleOversampling6points5order_32Filter",
"ATK.Tools.DoubleOversampling6points5order_2Filter",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((384, 417), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ref[:, ::2]'], {}), '(ref[:, ::2])\n', (404, 417), True, 'import numpy as np\n'), ((511, 546), 'ATK.Core.DoubleInPointerFilter', 'DoubleInPointerFilter', (['input', '(False)'], {}), '(input, False)\n', (532, 546), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((570, 611), 'ATK.Tools.DoubleOversampling6points5order_2Filter', 'DoubleOversampling6points5order_2Filter', ([], {}), '()\n', (609, 611), False, 'from ATK.Tools import DoubleOversampling6points5order_2Filter\n'), ((629, 666), 'ATK.Core.DoubleOutPointerFilter', 'DoubleOutPointerFilter', (['output', '(False)'], {}), '(output, False)\n', (651, 666), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((1004, 1068), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ref[:, 994:-6]', 'output[:, 1000:]'], {'decimal': '(1)'}), '(ref[:, 994:-6], output[:, 1000:], decimal=1)\n', (1023, 1068), False, 'from numpy.testing import assert_almost_equal\n'), ((1399, 1432), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ref[:, ::4]'], {}), '(ref[:, ::4])\n', (1419, 1432), True, 'import numpy as np\n'), ((1526, 1561), 'ATK.Core.DoubleInPointerFilter', 'DoubleInPointerFilter', (['input', '(False)'], {}), '(input, False)\n', (1547, 1561), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((1585, 1626), 'ATK.Tools.DoubleOversampling6points5order_4Filter', 'DoubleOversampling6points5order_4Filter', ([], {}), '()\n', (1624, 1626), False, 'from ATK.Tools import DoubleOversampling6points5order_4Filter\n'), ((1644, 1681), 'ATK.Core.DoubleOutPointerFilter', 'DoubleOutPointerFilter', (['output', '(False)'], {}), '(output, False)\n', (1666, 1681), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((2021, 2086), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ref[:, 988:-12]', 'output[:, 1000:]'], {'decimal': '(1)'}), '(ref[:, 988:-12], output[:, 1000:], decimal=1)\n', (2040, 2086), False, 'from numpy.testing import assert_almost_equal\n'), ((2417, 2450), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ref[:, ::8]'], {}), '(ref[:, ::8])\n', (2437, 2450), True, 'import numpy as np\n'), ((2544, 2579), 'ATK.Core.DoubleInPointerFilter', 'DoubleInPointerFilter', (['input', '(False)'], {}), '(input, False)\n', (2565, 2579), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((2603, 2644), 'ATK.Tools.DoubleOversampling6points5order_8Filter', 'DoubleOversampling6points5order_8Filter', ([], {}), '()\n', (2642, 2644), False, 'from ATK.Tools import DoubleOversampling6points5order_8Filter\n'), ((2662, 2699), 'ATK.Core.DoubleOutPointerFilter', 'DoubleOutPointerFilter', (['output', '(False)'], {}), '(output, False)\n', (2684, 2699), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((3041, 3106), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ref[:, 976:-24]', 'output[:, 1000:]'], {'decimal': '(1)'}), '(ref[:, 976:-24], output[:, 1000:], decimal=1)\n', (3060, 3106), False, 'from numpy.testing import assert_almost_equal\n'), ((3440, 3474), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ref[:, ::16]'], {}), '(ref[:, ::16])\n', (3460, 3474), True, 'import numpy as np\n'), ((3569, 3604), 'ATK.Core.DoubleInPointerFilter', 'DoubleInPointerFilter', (['input', '(False)'], {}), '(input, False)\n', (3590, 3604), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((3628, 3670), 'ATK.Tools.DoubleOversampling6points5order_16Filter', 'DoubleOversampling6points5order_16Filter', ([], {}), '()\n', (3668, 3670), False, 'from ATK.Tools import DoubleOversampling6points5order_16Filter\n'), ((3688, 3725), 'ATK.Core.DoubleOutPointerFilter', 'DoubleOutPointerFilter', (['output', '(False)'], {}), '(output, False)\n', (3710, 3725), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((4067, 4132), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ref[:, 952:-48]', 'output[:, 1000:]'], {'decimal': '(1)'}), '(ref[:, 952:-48], output[:, 1000:], decimal=1)\n', (4086, 4132), False, 'from numpy.testing import assert_almost_equal\n'), ((4470, 4504), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ref[:, ::32]'], {}), '(ref[:, ::32])\n', (4490, 4504), True, 'import numpy as np\n'), ((4599, 4634), 'ATK.Core.DoubleInPointerFilter', 'DoubleInPointerFilter', (['input', '(False)'], {}), '(input, False)\n', (4620, 4634), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((4658, 4700), 'ATK.Tools.DoubleOversampling6points5order_32Filter', 'DoubleOversampling6points5order_32Filter', ([], {}), '()\n', (4698, 4700), False, 'from ATK.Tools import DoubleOversampling6points5order_32Filter\n'), ((4718, 4755), 'ATK.Core.DoubleOutPointerFilter', 'DoubleOutPointerFilter', (['output', '(False)'], {}), '(output, False)\n', (4740, 4755), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((5103, 5168), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ref[:, 904:-96]', 'output[:, 1000:]'], {'decimal': '(1)'}), '(ref[:, 904:-96], output[:, 1000:], decimal=1)\n', (5122, 5168), False, 'from numpy.testing import assert_almost_equal\n'), ((5380, 5432), 'numpy.zeros', 'np.zeros', (['(1, input.shape[0] * 32)'], {'dtype': 'np.float64'}), '((1, input.shape[0] * 32), dtype=np.float64)\n', (5388, 5432), True, 'import numpy as np\n'), ((5449, 5484), 'ATK.Core.DoubleInPointerFilter', 'DoubleInPointerFilter', (['input', '(False)'], {}), '(input, False)\n', (5470, 5484), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((5548, 5590), 'ATK.Tools.DoubleOversampling6points5order_32Filter', 'DoubleOversampling6points5order_32Filter', ([], {}), '()\n', (5588, 5590), False, 'from ATK.Tools import DoubleOversampling6points5order_32Filter\n'), ((5752, 5789), 'ATK.Core.DoubleOutPointerFilter', 'DoubleOutPointerFilter', (['output', '(False)'], {}), '(output, False)\n', (5774, 5789), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((6109, 6136), 'numpy.sin', 'np.sin', (['(t * 2 * np.pi * 100)'], {}), '(t * 2 * np.pi * 100)\n', (6115, 6136), True, 'import numpy as np\n'), ((6324, 6351), 'numpy.sin', 'np.sin', (['(t * 2 * np.pi * 100)'], {}), '(t * 2 * np.pi * 100)\n', (6330, 6351), True, 'import numpy as np\n'), ((6444, 6473), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'd'], {'label': '"""input"""'}), "(t, d, label='input')\n", (6452, 6473), True, 'import matplotlib.pyplot as plt\n'), ((6476, 6528), 'matplotlib.pyplot.plot', 'plt.plot', (['(t2 - 3.0 / sample_rate)', 'd2'], {'label': '"""output"""'}), "(t2 - 3.0 / sample_rate, d2, label='output')\n", (6484, 6528), True, 'import matplotlib.pyplot as plt\n'), ((6567, 6579), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6577, 6579), True, 'import matplotlib.pyplot as plt\n'), ((6582, 6592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6590, 6592), True, 'import matplotlib.pyplot as plt\n'), ((6052, 6088), 'numpy.arange', 'np.arange', (['samples'], {'dtype': 'np.float64'}), '(samples, dtype=np.float64)\n', (6061, 6088), True, 'import numpy as np\n'), ((6267, 6303), 'numpy.arange', 'np.arange', (['samples'], {'dtype': 'np.float64'}), '(samples, dtype=np.float64)\n', (6276, 6303), True, 'import numpy as np\n'), ((6362, 6403), 'numpy.arange', 'np.arange', (['(samples * 32)'], {'dtype': 'np.float64'}), '(samples * 32, dtype=np.float64)\n', (6371, 6403), True, 'import numpy as np\n'), ((450, 482), 'numpy.zeros', 'np.zeros', (['(2000)'], {'dtype': 'np.float64'}), '(2000, dtype=np.float64)\n', (458, 482), True, 'import numpy as np\n'), ((1465, 1497), 'numpy.zeros', 'np.zeros', (['(4000)'], {'dtype': 'np.float64'}), '(4000, dtype=np.float64)\n', (1473, 1497), True, 'import numpy as np\n'), ((2483, 2515), 'numpy.zeros', 'np.zeros', (['(8000)'], {'dtype': 'np.float64'}), '(8000, dtype=np.float64)\n', (2491, 2515), True, 'import numpy as np\n'), ((3507, 3540), 'numpy.zeros', 'np.zeros', (['(16000)'], {'dtype': 'np.float64'}), '(16000, dtype=np.float64)\n', (3515, 3540), True, 'import numpy as np\n'), ((4537, 4570), 'numpy.zeros', 'np.zeros', (['(32000)'], {'dtype': 'np.float64'}), '(32000, dtype=np.float64)\n', (4545, 4570), True, 'import numpy as np\n'), ((6530, 6539), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6537, 6539), True, 'import matplotlib.pyplot as plt\n'), ((304, 337), 'numpy.arange', 'np.arange', (['(2000)'], {'dtype': 'np.float64'}), '(2000, dtype=np.float64)\n', (313, 337), True, 'import numpy as np\n'), ((1318, 1351), 'numpy.arange', 'np.arange', (['(4000)'], {'dtype': 'np.float64'}), '(4000, dtype=np.float64)\n', (1327, 1351), True, 'import numpy as np\n'), ((2336, 2369), 'numpy.arange', 'np.arange', (['(8000)'], {'dtype': 'np.float64'}), '(8000, dtype=np.float64)\n', (2345, 2369), True, 'import numpy as np\n'), ((3358, 3392), 'numpy.arange', 'np.arange', (['(16000)'], {'dtype': 'np.float64'}), '(16000, dtype=np.float64)\n', (3367, 3392), True, 'import numpy as np\n'), ((4384, 4418), 'numpy.arange', 'np.arange', (['(32000)'], {'dtype': 'np.float64'}), '(32000, dtype=np.float64)\n', (4393, 4418), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from sandbox.settings import APIKEY, ORIGIN, MERCHANTID, ACCOUNTID, URL_PAYU, TEST_PAYU
from django.http import HttpResponse, HttpResponseNotFound
import md5, string, random
# Create your views here.
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# end def
def pagar(request):
currency = "COP" # Moneda en la que se paga
referenceCode = id_generator() # Debe ser unico por transacción
buyerFullName = "<NAME>"
buyerEmail = "<EMAIL>"
amount = 20000 # Valor de la compra
signature = "%s~%d~%s~%d~%s" % (APIKEY, MERCHANTID, referenceCode, amount, currency) # Es la firma digital creada para cada uno de las transacciones.
signatureMD5 = md5.new(signature) # Se le aplica md5
description = "Descripción del producto"
confirmationUrl = "http://localhost:8000/payu/confirmacion/pago/" # Url de confirmación
return render(request, 'webcheckout/compra.html', {"url": URL_PAYU, "test": TEST_PAYU, "merchantId": MERCHANTID, "accountId":ACCOUNTID, "referenceCode":referenceCode ,
"buyerFullName":buyerFullName, "description": description, "currency": currency, "amount": amount, "buyerEmail": buyerEmail, "signature":signatureMD5.hexdigest(), "confirmationUrl": confirmationUrl})
#end def
def new_value(value):
val = value.split(".")
try:
if val[1] == "00":
num = val[0]+".0"
else:
num = value
return num
except:
return value
# end if
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@csrf_exempt
def confirmacion(request):
if request.method == "POST":
form = forms.ConfirmacionPago(request.POST)
state_pol = request.POST.get('state_pol', False)
sign = request.POST.get('sign', False)
if form.is_valid():
con = form.save(commit=False)
value = new_value(con.value)
validacion = "%s~%d~%s~%s~%s~%s" % (APIKEY, con.merchant_id , con.reference_sale, value, con.currency, con.state_pol)
validacionMD5 = md5.new(validacion)
firma = validacionMD5.hexdigest()
if sign == firma:
con.validacion = True
else:
con.validacion = False
# end if
con.cita = cita
con.save()
return HttpResponse(status=200)
# end if
errors = form.errors.items()
file = open(os.path.join(BASE_DIR, "confirmacion_ERROR.txt"), "w+")
file.write(str(json.dumps(errors)))
file.close()
# return HttpResponse(json.dumps(errors), status=400)
return HttpResponse(status=400)
# end def
| [
"random.choice",
"django.http.HttpResponse",
"os.path.join",
"md5.new",
"os.path.abspath"
] | [((905, 923), 'md5.new', 'md5.new', (['signature'], {}), '(signature)\n', (912, 923), False, 'import md5, string, random\n'), ((2849, 2873), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(400)'}), '(status=400)\n', (2861, 2873), False, 'from django.http import HttpResponse, HttpResponseNotFound\n'), ((1746, 1771), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1761, 1771), False, 'import os\n'), ((446, 466), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (459, 466), False, 'import md5, string, random\n'), ((2274, 2293), 'md5.new', 'md5.new', (['validacion'], {}), '(validacion)\n', (2281, 2293), False, 'import md5, string, random\n'), ((2556, 2580), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(200)'}), '(status=200)\n', (2568, 2580), False, 'from django.http import HttpResponse, HttpResponseNotFound\n'), ((2655, 2703), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""confirmacion_ERROR.txt"""'], {}), "(BASE_DIR, 'confirmacion_ERROR.txt')\n", (2667, 2703), False, 'import os\n')] |
"""Admin interface."""
from datetime import datetime
from typing import Optional
from flask import Blueprint, render_template, abort, flash, url_for, redirect
from flask import current_app as app
from flask_login import current_user
from flask_babel import _
from app.database import db
from app.utils.utils import redirect_return
from app.utils.email import send_email
from app.utils.pagination import Pagination
from app.decorators import moderator, admin
from app.models.location import Location, LocationType
from app.models.user import User, Invitation, LoginLog, InvitationState
from app.models import event
from app.models.event import EventLog
from app.forms.admin import MessageForm
from app.routes.user import send_invitation
from app.routes.message import send_message
blueprint = Blueprint('admin', __name__, url_prefix='/admin')
def _send_mail_to_all(subject: str, message: str) -> None:
"""Sends email to all users.
Args:
subject: Subject of the message
message: Message to be sent
"""
users = User.get()
recipients = []
for user in users:
recipients.append(user.email)
send_email(recipients, subject, message)
@blueprint.route('/locations')
@blueprint.route('/locations/<int:page>')
@blueprint.route('/locations/<string:location>')
@blueprint.route('/locations/<string:location>/<int:page>')
@moderator
def locations(page: int = 1, location: Optional[str] = None):
"""Lists existing locations.
Args:
page: Page number for results pagination
location: Location type (urbex, underground, private)
"""
if location is None:
query = Location.get(LocationType.ALL)
title = _("All locations")
elif location == 'urbex':
query = Location.get(LocationType.URBEX)
title = _("Urbex locations")
elif location == 'underground':
query = Location.get(LocationType.UNDERGROUND)
title = _("Underground locations")
elif location == 'private':
query = Location.get_unpublished(LocationType.ALL)
title = _("Private locations")
else:
abort(404)
query = query.paginate(page, app.config['ITEMS_PER_PAGE'], True)
pagination = Pagination(page, query.pages, 'admin.locations',
location=location)
locations_count = Location.get(LocationType.ALL).count()
urbex_count = Location.get(LocationType.URBEX).count()
underground_count = Location.get(LocationType.UNDERGROUND).count()
private_count = Location.get_unpublished(LocationType.ALL).count()
current_user.location_check_ts = datetime.utcnow()
db.session.commit()
return render_template('admin/locations.html', locations=query.items,
title=title, locations_count=locations_count,
underground_count=underground_count,
urbex_count=urbex_count,
private_count=private_count, pagination=pagination)
@blueprint.route('/users')
@blueprint.route('/users/<int:page>')
@blueprint.route('/users/<string:role>')
@blueprint.route('/users/<string:role>/<int:page>')
@admin
def users(page: int = 1, role: Optional[str] = None):
"""Lists existing users.
Args:
page: Page number for results pagination
role: Select specific users (admins, moderators, bans)
"""
if role is None:
query = User.get()
title = _("Users")
elif role == 'admins':
query = User.get_admins()
title = _("Admins")
elif role == 'moderators':
query = User.get_moderators()
title = _("Moderators")
elif role == 'bans':
query = User.get_banned()
title = _("Banned users")
else:
abort(404)
query = query.paginate(page, app.config['ITEMS_PER_PAGE'], True)
pagination = Pagination(page, query.pages, 'admin.users', role=role)
users_count = User.get().count()
admins_count = User.get_admins().count()
moderators_count = User.get_moderators().count()
bans_count = User.get_banned().count()
return render_template('admin/users.html', users=query.items, title=title,
users_count=users_count, admins_count=admins_count,
moderators_count=moderators_count,
bans_count=bans_count, pagination=pagination)
@blueprint.route('/invitations')
@blueprint.route('/invitations/<int:page>')
@blueprint.route('/invitations/<string:state>')
@blueprint.route('/invitations/<string:state>/<int:page>')
@admin
def invitations(page: int = 1, state: Optional[str] = None):
"""Lists invitations
Args:
page: Page number for results pagination
state: State of the invitation (waiting, approved, denied, registered)
"""
if state is None:
query = Invitation.get()
title = _("Invitations")
elif state == 'waiting':
query = Invitation.get_by_state(InvitationState.WAITING)
title = _("Waiting for approval")
elif state == 'approved':
query = Invitation.get_by_state(InvitationState.APPROVED)
title = _("Approved invitations")
elif state == 'denied':
query = Invitation.get_by_state(InvitationState.DENIED)
title = _("Denied invitations")
elif state == 'registered':
query = Invitation.get_by_state(InvitationState.REGISTERED)
title = _("Registered users")
else:
abort(404)
query = query.paginate(page, app.config['ITEMS_PER_PAGE'], True)
pagination = Pagination(page, query.pages, 'admin.invitations',
state=state)
waiting = Invitation.get_by_state(InvitationState.WAITING).count()
approved = Invitation.get_by_state(InvitationState.APPROVED).count()
denied = Invitation.get_by_state(InvitationState.DENIED).count()
registered = Invitation.get_by_state(InvitationState.REGISTERED).count()
return render_template('admin/invitations.html', invitations=query.items,
waiting=waiting, approved=approved, denied=denied,
registered=registered, title=title,
pagination=pagination)
@blueprint.route('/logins')
@blueprint.route('/logins/<int:page>')
@blueprint.route('/logins/<string:login_type>')
@blueprint.route('/logins/<string:login_type>/<int:page>')
@admin
def logins(page: int = 1, login_type: Optional[str] = None):
"""Shows log of login attempts.
Args:
page: Page number for results pagination
login_type: Login type (unique, failed)
"""
if login_type is None:
query = LoginLog.get()
title = _("Logins")
elif login_type == 'unique':
query = LoginLog.get_unique()
title = _("Unique IPs")
elif login_type == 'failed':
query = LoginLog.get_failed()
title = _("Failed logins")
else:
abort(404)
attempts = LoginLog.get().count()
failed = LoginLog.get_failed().count()
unique = LoginLog.get_unique().count()
per_month = LoginLog.get_last_month().count()
query = query.paginate(page, app.config['ITEMS_PER_PAGE'], True)
pagination = Pagination(page, query.pages, 'admin.logins')
current_user.login_check_ts = datetime.utcnow()
db.session.commit()
return render_template('admin/logins.html', logins=query.items,
failed=failed, unique=unique, attempts=attempts,
per_month=per_month, pagination=pagination,
title=title)
@blueprint.route('invitations/approve/<int:invite_id>')
@admin
def invite_approve(invite_id: int):
"""Approves the user invitation
Args:
invite_id: ID of the invitation
"""
invite = Invitation.get_by_id(invite_id)
if not invite:
abort(404)
if invite.state in (InvitationState.WAITING, InvitationState.DENIED):
invite.state = InvitationState.APPROVED
invite.approved_by = current_user
EventLog.log(current_user, event.ApproveInviteEvent(invite))
db.session.commit()
send_invitation(invite)
return redirect_return()
@blueprint.route('invitation/deny/<int:invite_id>')
@admin
def invite_deny(invite_id: int):
"""Denies the user invitation
Args:
invite_id: ID of the invitation
"""
invite = Invitation.get_by_id(invite_id)
if not invite:
abort(404)
if invite.state in (InvitationState.WAITING, InvitationState.APPROVED):
invite.state = InvitationState.DENIED
invite.approved_by = current_user
EventLog.log(current_user, event.DenyInviteEvent(invite))
db.session.commit()
return redirect_return()
@blueprint.route('/events')
@blueprint.route('/events/<int:page>')
@admin
def events(page: int = 1):
"""Shows log of events
Args:
page: Page number for results pagination
"""
query = EventLog.get()
query = query.paginate(page, app.config['ITEMS_PER_PAGE'], True)
pagination = Pagination(page, query.pages, 'admin.events')
current_user.event_check_ts = datetime.utcnow()
db.session.commit()
return render_template('admin/events.html', events=query.items,
pagination=pagination)
@blueprint.route('/message', methods=['GET', 'POST'])
@admin
def message():
"""Render form to send email/message to all users"""
form = MessageForm()
if form.validate_on_submit():
subject = form.subject.data
message = form.text.data
if form.email.data:
_send_mail_to_all(subject, message)
flash(_("Email to all users was sent"), 'success')
else:
users = User.get()
for user in users:
send_message(user, subject, message)
flash(_("Message to all users was sent"), 'success')
return redirect(url_for('page.index'))
return render_template('admin/message.html', form=form)
| [
"flask.render_template",
"app.utils.utils.redirect_return",
"app.models.user.User.get_banned",
"app.utils.email.send_email",
"app.models.user.Invitation.get_by_state",
"app.models.user.LoginLog.get_last_month",
"app.models.location.Location.get",
"app.models.user.LoginLog.get_failed",
"app.models.event.EventLog.get",
"app.models.user.LoginLog.get_unique",
"app.routes.message.send_message",
"flask.abort",
"flask_babel._",
"app.models.user.User.get_moderators",
"app.utils.pagination.Pagination",
"app.models.event.DenyInviteEvent",
"flask.Blueprint",
"app.database.db.session.commit",
"app.models.event.ApproveInviteEvent",
"app.routes.user.send_invitation",
"datetime.datetime.utcnow",
"app.models.user.Invitation.get",
"app.models.user.User.get",
"app.models.user.Invitation.get_by_id",
"flask.url_for",
"app.models.location.Location.get_unpublished",
"app.forms.admin.MessageForm",
"app.models.user.User.get_admins",
"app.models.user.LoginLog.get"
] | [((795, 844), 'flask.Blueprint', 'Blueprint', (['"""admin"""', '__name__'], {'url_prefix': '"""/admin"""'}), "('admin', __name__, url_prefix='/admin')\n", (804, 844), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((1046, 1056), 'app.models.user.User.get', 'User.get', ([], {}), '()\n', (1054, 1056), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((1143, 1183), 'app.utils.email.send_email', 'send_email', (['recipients', 'subject', 'message'], {}), '(recipients, subject, message)\n', (1153, 1183), False, 'from app.utils.email import send_email\n'), ((2207, 2274), 'app.utils.pagination.Pagination', 'Pagination', (['page', 'query.pages', '"""admin.locations"""'], {'location': 'location'}), "(page, query.pages, 'admin.locations', location=location)\n", (2217, 2274), False, 'from app.utils.pagination import Pagination\n'), ((2604, 2621), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2619, 2621), False, 'from datetime import datetime\n'), ((2626, 2645), 'app.database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2643, 2645), False, 'from app.database import db\n'), ((2658, 2893), 'flask.render_template', 'render_template', (['"""admin/locations.html"""'], {'locations': 'query.items', 'title': 'title', 'locations_count': 'locations_count', 'underground_count': 'underground_count', 'urbex_count': 'urbex_count', 'private_count': 'private_count', 'pagination': 'pagination'}), "('admin/locations.html', locations=query.items, title=title,\n locations_count=locations_count, underground_count=underground_count,\n urbex_count=urbex_count, private_count=private_count, pagination=pagination\n )\n", (2673, 2893), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((3844, 3899), 'app.utils.pagination.Pagination', 'Pagination', (['page', 'query.pages', '"""admin.users"""'], {'role': 'role'}), "(page, query.pages, 'admin.users', role=role)\n", (3854, 3899), False, 'from app.utils.pagination import Pagination\n'), ((4091, 4300), 'flask.render_template', 'render_template', (['"""admin/users.html"""'], {'users': 'query.items', 'title': 'title', 'users_count': 'users_count', 'admins_count': 'admins_count', 'moderators_count': 'moderators_count', 'bans_count': 'bans_count', 'pagination': 'pagination'}), "('admin/users.html', users=query.items, title=title,\n users_count=users_count, admins_count=admins_count, moderators_count=\n moderators_count, bans_count=bans_count, pagination=pagination)\n", (4106, 4300), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((5547, 5610), 'app.utils.pagination.Pagination', 'Pagination', (['page', 'query.pages', '"""admin.invitations"""'], {'state': 'state'}), "(page, query.pages, 'admin.invitations', state=state)\n", (5557, 5610), False, 'from app.utils.pagination import Pagination\n'), ((5942, 6128), 'flask.render_template', 'render_template', (['"""admin/invitations.html"""'], {'invitations': 'query.items', 'waiting': 'waiting', 'approved': 'approved', 'denied': 'denied', 'registered': 'registered', 'title': 'title', 'pagination': 'pagination'}), "('admin/invitations.html', invitations=query.items, waiting=\n waiting, approved=approved, denied=denied, registered=registered, title\n =title, pagination=pagination)\n", (5957, 6128), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((7182, 7227), 'app.utils.pagination.Pagination', 'Pagination', (['page', 'query.pages', '"""admin.logins"""'], {}), "(page, query.pages, 'admin.logins')\n", (7192, 7227), False, 'from app.utils.pagination import Pagination\n'), ((7263, 7280), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7278, 7280), False, 'from datetime import datetime\n'), ((7285, 7304), 'app.database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (7302, 7304), False, 'from app.database import db\n'), ((7317, 7488), 'flask.render_template', 'render_template', (['"""admin/logins.html"""'], {'logins': 'query.items', 'failed': 'failed', 'unique': 'unique', 'attempts': 'attempts', 'per_month': 'per_month', 'pagination': 'pagination', 'title': 'title'}), "('admin/logins.html', logins=query.items, failed=failed,\n unique=unique, attempts=attempts, per_month=per_month, pagination=\n pagination, title=title)\n", (7332, 7488), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((7770, 7801), 'app.models.user.Invitation.get_by_id', 'Invitation.get_by_id', (['invite_id'], {}), '(invite_id)\n', (7790, 7801), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((8146, 8163), 'app.utils.utils.redirect_return', 'redirect_return', ([], {}), '()\n', (8161, 8163), False, 'from app.utils.utils import redirect_return\n'), ((8364, 8395), 'app.models.user.Invitation.get_by_id', 'Invitation.get_by_id', (['invite_id'], {}), '(invite_id)\n', (8384, 8395), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((8705, 8722), 'app.utils.utils.redirect_return', 'redirect_return', ([], {}), '()\n', (8720, 8722), False, 'from app.utils.utils import redirect_return\n'), ((8933, 8947), 'app.models.event.EventLog.get', 'EventLog.get', ([], {}), '()\n', (8945, 8947), False, 'from app.models.event import EventLog\n'), ((9035, 9080), 'app.utils.pagination.Pagination', 'Pagination', (['page', 'query.pages', '"""admin.events"""'], {}), "(page, query.pages, 'admin.events')\n", (9045, 9080), False, 'from app.utils.pagination import Pagination\n'), ((9116, 9133), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (9131, 9133), False, 'from datetime import datetime\n'), ((9138, 9157), 'app.database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9155, 9157), False, 'from app.database import db\n'), ((9170, 9249), 'flask.render_template', 'render_template', (['"""admin/events.html"""'], {'events': 'query.items', 'pagination': 'pagination'}), "('admin/events.html', events=query.items, pagination=pagination)\n", (9185, 9249), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((9423, 9436), 'app.forms.admin.MessageForm', 'MessageForm', ([], {}), '()\n', (9434, 9436), False, 'from app.forms.admin import MessageForm\n'), ((9933, 9981), 'flask.render_template', 'render_template', (['"""admin/message.html"""'], {'form': 'form'}), "('admin/message.html', form=form)\n", (9948, 9981), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((1645, 1675), 'app.models.location.Location.get', 'Location.get', (['LocationType.ALL'], {}), '(LocationType.ALL)\n', (1657, 1675), False, 'from app.models.location import Location, LocationType\n'), ((1692, 1710), 'flask_babel._', '_', (['"""All locations"""'], {}), "('All locations')\n", (1693, 1710), False, 'from flask_babel import _\n'), ((3407, 3417), 'app.models.user.User.get', 'User.get', ([], {}), '()\n', (3415, 3417), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((3434, 3444), 'flask_babel._', '_', (['"""Users"""'], {}), "('Users')\n", (3435, 3444), False, 'from flask_babel import _\n'), ((4837, 4853), 'app.models.user.Invitation.get', 'Invitation.get', ([], {}), '()\n', (4851, 4853), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((4870, 4886), 'flask_babel._', '_', (['"""Invitations"""'], {}), "('Invitations')\n", (4871, 4886), False, 'from flask_babel import _\n'), ((6639, 6653), 'app.models.user.LoginLog.get', 'LoginLog.get', ([], {}), '()\n', (6651, 6653), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((6670, 6681), 'flask_babel._', '_', (['"""Logins"""'], {}), "('Logins')\n", (6671, 6681), False, 'from flask_babel import _\n'), ((7829, 7839), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (7834, 7839), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((8082, 8101), 'app.database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (8099, 8101), False, 'from app.database import db\n'), ((8110, 8133), 'app.routes.user.send_invitation', 'send_invitation', (['invite'], {}), '(invite)\n', (8125, 8133), False, 'from app.routes.user import send_invitation\n'), ((8423, 8433), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (8428, 8433), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((8673, 8692), 'app.database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (8690, 8692), False, 'from app.database import db\n'), ((1757, 1789), 'app.models.location.Location.get', 'Location.get', (['LocationType.URBEX'], {}), '(LocationType.URBEX)\n', (1769, 1789), False, 'from app.models.location import Location, LocationType\n'), ((1806, 1826), 'flask_babel._', '_', (['"""Urbex locations"""'], {}), "('Urbex locations')\n", (1807, 1826), False, 'from flask_babel import _\n'), ((2326, 2356), 'app.models.location.Location.get', 'Location.get', (['LocationType.ALL'], {}), '(LocationType.ALL)\n', (2338, 2356), False, 'from app.models.location import Location, LocationType\n'), ((2383, 2415), 'app.models.location.Location.get', 'Location.get', (['LocationType.URBEX'], {}), '(LocationType.URBEX)\n', (2395, 2415), False, 'from app.models.location import Location, LocationType\n'), ((2448, 2486), 'app.models.location.Location.get', 'Location.get', (['LocationType.UNDERGROUND'], {}), '(LocationType.UNDERGROUND)\n', (2460, 2486), False, 'from app.models.location import Location, LocationType\n'), ((2515, 2557), 'app.models.location.Location.get_unpublished', 'Location.get_unpublished', (['LocationType.ALL'], {}), '(LocationType.ALL)\n', (2539, 2557), False, 'from app.models.location import Location, LocationType\n'), ((3488, 3505), 'app.models.user.User.get_admins', 'User.get_admins', ([], {}), '()\n', (3503, 3505), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((3522, 3533), 'flask_babel._', '_', (['"""Admins"""'], {}), "('Admins')\n", (3523, 3533), False, 'from flask_babel import _\n'), ((3919, 3929), 'app.models.user.User.get', 'User.get', ([], {}), '()\n', (3927, 3929), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((3957, 3974), 'app.models.user.User.get_admins', 'User.get_admins', ([], {}), '()\n', (3972, 3974), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((4006, 4027), 'app.models.user.User.get_moderators', 'User.get_moderators', ([], {}), '()\n', (4025, 4027), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((4053, 4070), 'app.models.user.User.get_banned', 'User.get_banned', ([], {}), '()\n', (4068, 4070), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((4932, 4980), 'app.models.user.Invitation.get_by_state', 'Invitation.get_by_state', (['InvitationState.WAITING'], {}), '(InvitationState.WAITING)\n', (4955, 4980), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((4997, 5022), 'flask_babel._', '_', (['"""Waiting for approval"""'], {}), "('Waiting for approval')\n", (4998, 5022), False, 'from flask_babel import _\n'), ((5654, 5702), 'app.models.user.Invitation.get_by_state', 'Invitation.get_by_state', (['InvitationState.WAITING'], {}), '(InvitationState.WAITING)\n', (5677, 5702), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((5726, 5775), 'app.models.user.Invitation.get_by_state', 'Invitation.get_by_state', (['InvitationState.APPROVED'], {}), '(InvitationState.APPROVED)\n', (5749, 5775), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((5797, 5844), 'app.models.user.Invitation.get_by_state', 'Invitation.get_by_state', (['InvitationState.DENIED'], {}), '(InvitationState.DENIED)\n', (5820, 5844), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((5870, 5921), 'app.models.user.Invitation.get_by_state', 'Invitation.get_by_state', (['InvitationState.REGISTERED'], {}), '(InvitationState.REGISTERED)\n', (5893, 5921), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((6731, 6752), 'app.models.user.LoginLog.get_unique', 'LoginLog.get_unique', ([], {}), '()\n', (6750, 6752), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((6769, 6784), 'flask_babel._', '_', (['"""Unique IPs"""'], {}), "('Unique IPs')\n", (6770, 6784), False, 'from flask_babel import _\n'), ((6936, 6950), 'app.models.user.LoginLog.get', 'LoginLog.get', ([], {}), '()\n', (6948, 6950), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((6972, 6993), 'app.models.user.LoginLog.get_failed', 'LoginLog.get_failed', ([], {}), '()\n', (6991, 6993), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((7015, 7036), 'app.models.user.LoginLog.get_unique', 'LoginLog.get_unique', ([], {}), '()\n', (7034, 7036), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((7061, 7086), 'app.models.user.LoginLog.get_last_month', 'LoginLog.get_last_month', ([], {}), '()\n', (7084, 7086), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((8040, 8072), 'app.models.event.ApproveInviteEvent', 'event.ApproveInviteEvent', (['invite'], {}), '(invite)\n', (8064, 8072), False, 'from app.models import event\n'), ((8634, 8663), 'app.models.event.DenyInviteEvent', 'event.DenyInviteEvent', (['invite'], {}), '(invite)\n', (8655, 8663), False, 'from app.models import event\n'), ((9714, 9724), 'app.models.user.User.get', 'User.get', ([], {}), '()\n', (9722, 9724), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((9898, 9919), 'flask.url_for', 'url_for', (['"""page.index"""'], {}), "('page.index')\n", (9905, 9919), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((1879, 1917), 'app.models.location.Location.get', 'Location.get', (['LocationType.UNDERGROUND'], {}), '(LocationType.UNDERGROUND)\n', (1891, 1917), False, 'from app.models.location import Location, LocationType\n'), ((1934, 1960), 'flask_babel._', '_', (['"""Underground locations"""'], {}), "('Underground locations')\n", (1935, 1960), False, 'from flask_babel import _\n'), ((3581, 3602), 'app.models.user.User.get_moderators', 'User.get_moderators', ([], {}), '()\n', (3600, 3602), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((3619, 3634), 'flask_babel._', '_', (['"""Moderators"""'], {}), "('Moderators')\n", (3620, 3634), False, 'from flask_babel import _\n'), ((5069, 5118), 'app.models.user.Invitation.get_by_state', 'Invitation.get_by_state', (['InvitationState.APPROVED'], {}), '(InvitationState.APPROVED)\n', (5092, 5118), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((5135, 5160), 'flask_babel._', '_', (['"""Approved invitations"""'], {}), "('Approved invitations')\n", (5136, 5160), False, 'from flask_babel import _\n'), ((6834, 6855), 'app.models.user.LoginLog.get_failed', 'LoginLog.get_failed', ([], {}), '()\n', (6853, 6855), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((6872, 6890), 'flask_babel._', '_', (['"""Failed logins"""'], {}), "('Failed logins')\n", (6873, 6890), False, 'from flask_babel import _\n'), ((6909, 6919), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (6914, 6919), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((9635, 9667), 'flask_babel._', '_', (['"""Email to all users was sent"""'], {}), "('Email to all users was sent')\n", (9636, 9667), False, 'from flask_babel import _\n'), ((9772, 9808), 'app.routes.message.send_message', 'send_message', (['user', 'subject', 'message'], {}), '(user, subject, message)\n', (9784, 9808), False, 'from app.routes.message import send_message\n'), ((9827, 9861), 'flask_babel._', '_', (['"""Message to all users was sent"""'], {}), "('Message to all users was sent')\n", (9828, 9861), False, 'from flask_babel import _\n'), ((2009, 2051), 'app.models.location.Location.get_unpublished', 'Location.get_unpublished', (['LocationType.ALL'], {}), '(LocationType.ALL)\n', (2033, 2051), False, 'from app.models.location import Location, LocationType\n'), ((2068, 2090), 'flask_babel._', '_', (['"""Private locations"""'], {}), "('Private locations')\n", (2069, 2090), False, 'from flask_babel import _\n'), ((2109, 2119), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2114, 2119), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((3676, 3693), 'app.models.user.User.get_banned', 'User.get_banned', ([], {}), '()\n', (3691, 3693), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((3710, 3727), 'flask_babel._', '_', (['"""Banned users"""'], {}), "('Banned users')\n", (3711, 3727), False, 'from flask_babel import _\n'), ((3746, 3756), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (3751, 3756), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n'), ((5205, 5252), 'app.models.user.Invitation.get_by_state', 'Invitation.get_by_state', (['InvitationState.DENIED'], {}), '(InvitationState.DENIED)\n', (5228, 5252), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((5269, 5292), 'flask_babel._', '_', (['"""Denied invitations"""'], {}), "('Denied invitations')\n", (5270, 5292), False, 'from flask_babel import _\n'), ((5341, 5392), 'app.models.user.Invitation.get_by_state', 'Invitation.get_by_state', (['InvitationState.REGISTERED'], {}), '(InvitationState.REGISTERED)\n', (5364, 5392), False, 'from app.models.user import User, Invitation, LoginLog, InvitationState\n'), ((5409, 5430), 'flask_babel._', '_', (['"""Registered users"""'], {}), "('Registered users')\n", (5410, 5430), False, 'from flask_babel import _\n'), ((5449, 5459), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (5454, 5459), False, 'from flask import Blueprint, render_template, abort, flash, url_for, redirect\n')] |
from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale
# Metric scale object tests -------------------------------------
# Test the creation of a 10k object
print(f"Test #1: Integer Positive Base, Positive Exponent")
o_TenKObject = MetricScale("10k")
if o_TenKObject.f_Value != 10000:
print(f"\tTest #1: Failed! Expected 10000 got {o_TenKObject.f_Value}")
else:
print("Success!")
del o_TenKObject
print(f"Test #2: Integer Negative Base, Positive Exponent")
o_TestObject = MetricScale("-27G")
f_Expected = float(-27000000000)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print(f"Test #3: Integer Positive Base, Negative Exponent")
o_TestObject = MetricScale("19n")
f_Expected = float(0.000000019)
if o_TestObject.f_Value != f_Expected:
print("Success!")
else:
print("Success!")
del o_TestObject
print(f"Test #4: Integer Negative Base, Negative Exponent")
o_TestObject = MetricScale("-24m")
f_Expected = float(-0.024)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print(f"Test #5: float Positive Base, Positive Exponent")
o_TestObject = MetricScale("2.3M")
f_Expected = float(2300000)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print(f"Test #6: float Negative Base, Positive Exponent")
o_TestObject = MetricScale("-2.1h")
f_Expected = float(-210)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print(f"Test #7: float Positive Base, Negative Exponent")
o_TestObject = MetricScale("4.3u")
f_Expected = float(0.0000043)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print(f"Test #8: Integer Negative Base, Negative Exponent")
o_TestObject = MetricScale("-1.00005p")
f_Expected = float(-0.00000000000100005)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print(f"Test #9: Integer input, not string")
o_TestObject = MetricScale(1234321)
f_Expected = float(1234321)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print(f"Test #10: float input, not string")
o_TestObject = MetricScale(105323.0)
f_Expected = float(105323.0)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print(f"Test #11: negative float")
o_TestObject = MetricScale(-0.435240)
f_Expected = float(-0.435240)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print(f"Test #11: empty object")
try:
o_TestObject = MetricScale()
except ValueError as ve:
print("Success!")
print(f"Test #12: Unitless string")
o_TestObject = MetricScale("130.2")
f_Expected = float(130.2)
if o_TestObject.f_Value != f_Expected:
print(f"\tTest #1: Failed! Expected {f_Expected} got {o_TestObject.f_Value}")
else:
print("Success!")
del o_TestObject
print("############## End of MetricScale() tests #####################")
print("\n############## Testing RCLowPass() ######################")
from app.classes.passiveFilters.rcLowpassFilter import RCLowpassFilter
print(f"Test #1: Create a low pass filter with R=10k, c=318n")
o_TestObject = RCLowpassFilter(o_Resistance="10k", o_Capacitance="318n")
f_ExpectedResistance = 10000
f_ExpectedCapacitance = 0.000000318
if o_TestObject.f_Resistance != f_ExpectedResistance and o_TestObject.f_Capacitance != f_ExpectedCapacitance:
print(f"\tFailure: Expected R={f_ExpectedResistance} and C={f_ExpectedCapacitance}, got {o_TestObject.o_Resistance} and {o_TestObject.o_Capacitance}")
else:
print("Success!")
del o_TestObject
print(f"Test #2: Create a low pass filter with R=5.6M, c=4.2u")
o_TestObject = RCLowpassFilter(o_Resistance="5.6M", o_Capacitance="4.2u")
f_ExpectedResistance = 5600000
f_ExpectedCapacitance = 0.0000042
if o_TestObject.f_Resistance != f_ExpectedResistance and o_TestObject.f_Capacitance != f_ExpectedCapacitance:
print(f"\tFailure: Expected R={f_ExpectedResistance} and C={f_ExpectedCapacitance}, got {o_TestObject.o_Resistance} and {o_TestObject.o_Capacitance}")
else:
print("Success!")
del o_TestObject
print(f"Test #3: Create a low pass filter with R=130.2k, c=93.4n and get the cut-off frequency")
o_TestObject = RCLowpassFilter(o_Resistance="130.2k", o_Capacitance="93.4n")
f_ExpectedFrequency = 13.1
if o_TestObject.f_CutoffFrequency != f_ExpectedFrequency:
print(f"\tFailure: Expected Fc={f_ExpectedFrequency} and got {o_TestObject.f_CutoffFrequency}")
else:
print(f"Success")
del o_TestObject
print(f"Test #3: Create a low pass filter with R=433, c=42.9u and get the cut-off frequency")
o_TestObject = RCLowpassFilter(o_Resistance="433", o_Capacitance="42.9u")
f_ExpectedFrequency = 8.57
if o_TestObject.f_CutoffFrequency != f_ExpectedFrequency:
print(f"\tFailure: Expected Fc={f_ExpectedFrequency} and got {o_TestObject.f_CutoffFrequency}")
else:
print(f"Success")
print(f"Test #3: Create a low pass filter with R=3.4M, c=5.9n and get the time constant")
o_TestObject = RCLowpassFilter(o_Resistance="3.4M", o_Capacitance="5.9n")
f_Expected = o_TestObject.f_Capacitance*o_TestObject.f_Resistance
if o_TestObject.f_Tau != f_Expected:
print(f"\tFailure: Expected Fc={f_Expected} and got {o_TestObject.f_Tau}")
else:
print(f"Success")
| [
"app.filterdesigner.classes.passiveFilters.metricScale.MetricScale",
"app.classes.passiveFilters.rcLowpassFilter.RCLowpassFilter"
] | [((261, 279), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['"""10k"""'], {}), "('10k')\n", (272, 279), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((516, 535), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['"""-27G"""'], {}), "('-27G')\n", (527, 535), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((822, 840), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['"""19n"""'], {}), "('19n')\n", (833, 840), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((1067, 1086), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['"""-24m"""'], {}), "('-24m')\n", (1078, 1086), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((1365, 1384), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['"""2.3M"""'], {}), "('2.3M')\n", (1376, 1384), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((1664, 1684), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['"""-2.1h"""'], {}), "('-2.1h')\n", (1675, 1684), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((1961, 1980), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['"""4.3u"""'], {}), "('4.3u')\n", (1972, 1980), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((2264, 2288), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['"""-1.00005p"""'], {}), "('-1.00005p')\n", (2275, 2288), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((2568, 2588), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['(1234321)'], {}), '(1234321)\n', (2579, 2588), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((2854, 2875), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['(105323.0)'], {}), '(105323.0)\n', (2865, 2875), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((3134, 3155), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['(-0.43524)'], {}), '(-0.43524)\n', (3145, 3155), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((3534, 3554), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', (['"""130.2"""'], {}), "('130.2')\n", (3545, 3554), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n'), ((4070, 4127), 'app.classes.passiveFilters.rcLowpassFilter.RCLowpassFilter', 'RCLowpassFilter', ([], {'o_Resistance': '"""10k"""', 'o_Capacitance': '"""318n"""'}), "(o_Resistance='10k', o_Capacitance='318n')\n", (4085, 4127), False, 'from app.classes.passiveFilters.rcLowpassFilter import RCLowpassFilter\n'), ((4607, 4665), 'app.classes.passiveFilters.rcLowpassFilter.RCLowpassFilter', 'RCLowpassFilter', ([], {'o_Resistance': '"""5.6M"""', 'o_Capacitance': '"""4.2u"""'}), "(o_Resistance='5.6M', o_Capacitance='4.2u')\n", (4622, 4665), False, 'from app.classes.passiveFilters.rcLowpassFilter import RCLowpassFilter\n'), ((5177, 5238), 'app.classes.passiveFilters.rcLowpassFilter.RCLowpassFilter', 'RCLowpassFilter', ([], {'o_Resistance': '"""130.2k"""', 'o_Capacitance': '"""93.4n"""'}), "(o_Resistance='130.2k', o_Capacitance='93.4n')\n", (5192, 5238), False, 'from app.classes.passiveFilters.rcLowpassFilter import RCLowpassFilter\n'), ((5599, 5657), 'app.classes.passiveFilters.rcLowpassFilter.RCLowpassFilter', 'RCLowpassFilter', ([], {'o_Resistance': '"""433"""', 'o_Capacitance': '"""42.9u"""'}), "(o_Resistance='433', o_Capacitance='42.9u')\n", (5614, 5657), False, 'from app.classes.passiveFilters.rcLowpassFilter import RCLowpassFilter\n'), ((5997, 6055), 'app.classes.passiveFilters.rcLowpassFilter.RCLowpassFilter', 'RCLowpassFilter', ([], {'o_Resistance': '"""3.4M"""', 'o_Capacitance': '"""5.9n"""'}), "(o_Resistance='3.4M', o_Capacitance='5.9n')\n", (6012, 6055), False, 'from app.classes.passiveFilters.rcLowpassFilter import RCLowpassFilter\n'), ((3416, 3429), 'app.filterdesigner.classes.passiveFilters.metricScale.MetricScale', 'MetricScale', ([], {}), '()\n', (3427, 3429), False, 'from app.filterdesigner.classes.passiveFilters.metricScale import MetricScale\n')] |
# coding=utf-8
import os
def write_file(file_path, *content):
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'a', encoding='utf-8') as f:
f.write(''.join(content))
| [
"os.path.dirname"
] | [((80, 106), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (95, 106), False, 'import os\n')] |
# coding: utf-8
from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
# pylint:disable=import-error
from ordereddict import OrderedDict
# pylint:enable=import-error
import six
from .genty_args import GentyArgs
from .private import format_arg
def genty_dataprovider(builder_function):
"""Decorator defining that this test gets parameters from the given
build_function.
:param builder_function:
A callable that returns parameters that will be passed to the method
decorated by this decorator.
If the builder_function returns a tuple or list, then that will be
passed as *args to the decorated method.
If the builder_function returns a :class:`GentyArgs`, then that will
be used to pass *args and **kwargs to the decorated method.
Any other return value will be treated as a single parameter, and
passed as such to the decorated method.
:type builder_function:
`callable`
"""
datasets = getattr(builder_function, 'genty_datasets', {None: ()})
def wrap(test_method):
# Save the data providers in the test method. This data will be
# consumed by the @genty decorator.
if not hasattr(test_method, 'genty_dataproviders'):
test_method.genty_dataproviders = []
test_method.genty_dataproviders.append(
(builder_function, datasets),
)
return test_method
return wrap
def genty_dataset(*args, **kwargs):
"""Decorator defining data sets to provide to a test.
Inspired by http://sebastian-bergmann.de/archives/
702-Data-Providers-in-PHPUnit-3.2.html
The canonical way to call @genty_dataset, with each argument each
representing a data set to be injected in the test method call:
@genty_dataset(
('a1', 'b1'),
('a2', 'b2'),
)
def test_some_function(a, b)
...
If the test function takes only one parameter, you can replace the tuples
by a single value. So instead of the more verbose:
@genty_dataset(
('c1',),
('c2',),
)
def test_some_other_function(c)
...
One can write:
@genty_dataset('c1', 'c2')
def test_some_other_function(c)
...
For each set of arguments, a suffix identifying that argument set is
built by concatenating the string representation of the arguments
together. You can control the test names for each data set by passing
the data sets as keyword args, where the keyword is the desired suffix.
For example:
@genty_dataset(
('a1', 'b1),
)
def test_function(a, b)
...
produces a test named 'test_function_for_a1_and_b1', while
@genty_dataset(
happy_path=('a1', 'b1'),
)
def test_function(a, b)
...
produces a test named test_function_for_happy_path. These are just
parameters to a method call, so one can have unnamed args first
followed by keyword args
@genty_dataset(
('x', 'y'),
('p', 'q'),
Monday=('a1', 'b1'),
Tuesday=('t1', 't2'),
)
def test_function(a, b)
...
Finally, datasets can be chained. Useful for example if there are
distinct sets of params that make sense (cleaner, more readable, or
semantically nicer) if kept separate. A fabricated example:
@genty_dataset(
*([i for i in range(10)] + [(i, i) for i in range(10)])
)
def test_some_other_function(param1, param2=None)
...
-- vs --
@genty_dataset(*[i for i in range(10)])
@genty_dataset(*[(i, i) for i in range(10)])
def test_some_other_function(param1, param2=None)
...
If the names of datasets conflict across chained genty_datasets, the
key&value pair from the outer (first) decorator will override the
data from the inner.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
datasets = _build_datasets(*args, **kwargs)
def wrap(test_method):
# Save the datasets in the test method. This data will be consumed
# by the @genty decorator.
if not hasattr(test_method, 'genty_datasets'):
test_method.genty_datasets = OrderedDict()
test_method.genty_datasets.update(datasets)
return test_method
return wrap
def _build_datasets(*args, **kwargs):
"""Build the datasets into a dict, where the keys are the name of the
data set and the values are the data sets themselves.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
:return:
The dataset dict.
:rtype:
`dict`
"""
datasets = OrderedDict()
_add_arg_datasets(datasets, args)
_add_kwarg_datasets(datasets, kwargs)
return datasets
def _add_arg_datasets(datasets, args):
"""Add data sets of the given args.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
"""
for dataset in args:
# turn a value into a 1-tuple.
if not isinstance(dataset, (tuple, GentyArgs)):
dataset = (dataset,)
# Create a test_name_suffix - basically the parameter list
if isinstance(dataset, GentyArgs):
dataset_strings = dataset # GentyArgs supports iteration
else:
dataset_strings = [format_arg(data) for data in dataset]
test_method_suffix = ", ".join(dataset_strings)
datasets[test_method_suffix] = dataset
def _add_kwarg_datasets(datasets, kwargs):
"""Add data sets of the given kwargs.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
for test_method_suffix, dataset in six.iteritems(kwargs):
datasets[test_method_suffix] = dataset
| [
"six.iteritems",
"ordereddict.OrderedDict"
] | [((5144, 5157), 'ordereddict.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5155, 5157), False, 'from ordereddict import OrderedDict\n'), ((6422, 6443), 'six.iteritems', 'six.iteritems', (['kwargs'], {}), '(kwargs)\n', (6435, 6443), False, 'import six\n'), ((4564, 4577), 'ordereddict.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4575, 4577), False, 'from ordereddict import OrderedDict\n')] |
import argparse
from concurrent.futures import ProcessPoolExecutor
from article_separation.image_segmentation.net_post_processing.separator_net_post_processor import SeparatorNetPostProcessor
from article_separation.image_segmentation.net_post_processing.heading_net_post_processor import HeadingNetPostProcessor
from python_util.io.file_loader import load_list_file
def run_separator(image_list, path_to_pb, fixed_height, scaling_factor, threshold):
post_processor = SeparatorNetPostProcessor(image_list, path_to_pb, fixed_height, scaling_factor, threshold,
gpu_devices='')
post_processor.run()
def run_heading(image_list, path_to_pb, fixed_height=900, scaling_factor=1, is_heading_threshold=0.4, weight_dict=None,
thresh_dict=None, text_line_percentage=0.8):
if thresh_dict is None:
thresh_dict = {'net_thresh': 1.0, 'stroke_width_thresh': 1.0, 'text_height_thresh': 0.9, 'sw_th_thresh': 0.9}
if weight_dict is None:
weight_dict = {'net': 0.8, 'stroke_width': 0.0, 'text_height': 0.2}
post_processor = HeadingNetPostProcessor(image_list, path_to_pb, fixed_height, scaling_factor, weight_dict,
is_heading_threshold, thresh_dict, text_line_percentage)
post_processor.run(gpu_device='')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--path_to_image_list", type=str, required=True,
help="Path to the list file holding the image paths.")
parser.add_argument("--path_to_pb", type=str, required=True,
help="Path to the TensorFlow pixel labelling graph.")
parser.add_argument("--num_processes", type=int, required=False,
help="Number of processes that run in parallel.", default=8)
parser.add_argument("--fixed_height", type=int, required=False,
help="Input image height")
parser.add_argument("--scaling_factor", type=float, required=False,
help="Scaling factor of images.", default=1.0)
parser.add_argument("--mode", type=str, required=True, choices=['heading', 'separator'],
help="Which information should be processed, e.g. headings or separator.")
parser.add_argument("--threshold", type=float, required=False,
help="Threshold for binarization of net output.", default=0.05)
args = parser.parse_args()
mode = args.mode
image_path_list = load_list_file(args.path_to_image_list)
path_to_pb = args.path_to_pb
num_processes = args.num_processes
if args.fixed_height is None:
if mode == 'heading':
fixed_height = 900
elif mode == 'separator':
fixed_height = 1500
else:
fixed_height = args.fixed_height
scaling_factor = args.scaling_factor
threshold = args.threshold
MAX_SUBLIST_SIZE = 50
with ProcessPoolExecutor(num_processes) as executor:
size_sub_lists = len(image_path_list) // num_processes
if size_sub_lists == 0:
size_sub_lists = 1
num_processes = len(image_path_list)
size_sub_lists = min(MAX_SUBLIST_SIZE, size_sub_lists)
image_path_sub_lists = [image_path_list[i: i + size_sub_lists] for i in
range(0, len(image_path_list), size_sub_lists)]
if mode == 'separator':
run_args = ((image_path_sub_list, path_to_pb, fixed_height, scaling_factor, threshold) for
image_path_sub_list in image_path_sub_lists)
[executor.submit(run_separator, *run_arg) for run_arg in run_args]
elif mode == 'heading':
run_args = ((image_path_sub_list, path_to_pb, fixed_height, scaling_factor, 0.4, None, None, 0.8)
for image_path_sub_list in image_path_sub_lists)
[executor.submit(run_heading, *run_arg) for run_arg in run_args]
| [
"python_util.io.file_loader.load_list_file",
"article_separation.image_segmentation.net_post_processing.separator_net_post_processor.SeparatorNetPostProcessor",
"argparse.ArgumentParser",
"article_separation.image_segmentation.net_post_processing.heading_net_post_processor.HeadingNetPostProcessor",
"concurrent.futures.ProcessPoolExecutor"
] | [((475, 585), 'article_separation.image_segmentation.net_post_processing.separator_net_post_processor.SeparatorNetPostProcessor', 'SeparatorNetPostProcessor', (['image_list', 'path_to_pb', 'fixed_height', 'scaling_factor', 'threshold'], {'gpu_devices': '""""""'}), "(image_list, path_to_pb, fixed_height,\n scaling_factor, threshold, gpu_devices='')\n", (500, 585), False, 'from article_separation.image_segmentation.net_post_processing.separator_net_post_processor import SeparatorNetPostProcessor\n'), ((1108, 1263), 'article_separation.image_segmentation.net_post_processing.heading_net_post_processor.HeadingNetPostProcessor', 'HeadingNetPostProcessor', (['image_list', 'path_to_pb', 'fixed_height', 'scaling_factor', 'weight_dict', 'is_heading_threshold', 'thresh_dict', 'text_line_percentage'], {}), '(image_list, path_to_pb, fixed_height,\n scaling_factor, weight_dict, is_heading_threshold, thresh_dict,\n text_line_percentage)\n', (1131, 1263), False, 'from article_separation.image_segmentation.net_post_processing.heading_net_post_processor import HeadingNetPostProcessor\n'), ((1381, 1406), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1404, 1406), False, 'import argparse\n'), ((2542, 2581), 'python_util.io.file_loader.load_list_file', 'load_list_file', (['args.path_to_image_list'], {}), '(args.path_to_image_list)\n', (2556, 2581), False, 'from python_util.io.file_loader import load_list_file\n'), ((2976, 3010), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', (['num_processes'], {}), '(num_processes)\n', (2995, 3010), False, 'from concurrent.futures import ProcessPoolExecutor\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from argparse import ArgumentParser
from logging import DEBUG, INFO, basicConfig
from exchangelib.util import PrettyXmlHandler
from .core import main
from .utils import valid_date
def cli():
parser = ArgumentParser()
parser.add_argument(
"-N", "--offset", help="offset day integer.", type=int, default=1
)
parser.add_argument("--date", help="Date formats. (YYYY-MM-DD)", type=valid_date)
parser.add_argument("-d", "--debug", help="debug mode", action="store_true")
args = parser.parse_args()
if args.debug:
basicConfig(
format="%(asctime)s:%(name)s:%(levelname)s:%(message)s",
level=DEBUG,
handlers=[PrettyXmlHandler()],
)
else:
basicConfig(level=INFO)
main(date=args.date, n=args.offset)
if __name__ == "__main__":
cli()
# vim fileencoding=utf-8
| [
"logging.basicConfig",
"exchangelib.util.PrettyXmlHandler",
"argparse.ArgumentParser"
] | [((296, 312), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (310, 312), False, 'from argparse import ArgumentParser\n'), ((821, 844), 'logging.basicConfig', 'basicConfig', ([], {'level': 'INFO'}), '(level=INFO)\n', (832, 844), False, 'from logging import DEBUG, INFO, basicConfig\n'), ((772, 790), 'exchangelib.util.PrettyXmlHandler', 'PrettyXmlHandler', ([], {}), '()\n', (788, 790), False, 'from exchangelib.util import PrettyXmlHandler\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Script created by <NAME> [IVT-360]
# Game with nature teory
import csv
def main():
gurvitz_alpha = input("Введите коэффициент для критерия Гурвица [0,1]: ")
hojj_leman_alpha = input("Введите коэффициент для критерия Ходжа Лемана [0;1]: ")
input_data = matrix_to_float(read_data())
print(input_data)
probability_vector = input_data[0]
payoff_matrix = input_data[1:]
# Calculations
vald = Vald_criterium(payoff_matrix)
savidge = Savidge_criterium(payoff_matrix)
laplas_baies = Laplas_Baies_criterium(probability_vector, payoff_matrix)
ghurvitz = Ghurvitz_criterium(gurvitz_alpha, payoff_matrix)
ghermeyer = Ghermeyer_criterium(probability_vector, payoff_matrix)
hojj_leman = Hojj_Leman_criterium(hojj_leman_alpha, probability_vector, payoff_matrix)
# Output
with open('output.csv', 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';')
# Write header
csvwriter.writerow([probability_vector, 'Vald', 'Savidge', 'Laplas-Baies', 'Ghurvitz', 'Ghermeyer', 'Hojj-Leman'])
# Write data
for i in range(0, len(payoff_matrix)):
csvwriter.writerow([payoff_matrix[i], vald[i], savidge[i], laplas_baies[i], ghurvitz[i], ghermeyer[i], hojj_leman[i]])
# read input from csv
def read_data():
data = []
with open('input.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=';')
for row in csvreader:
data.append(row)
return data
# Variuos criteries of "game with nature" teory
# Max(Mins)
def Vald_criterium(payoff_matrix):
criterium_row = []
for row in payoff_matrix:
criterium_row.append(min(row))
value_of_best = max(criterium_row)
index_of_best = criterium_row.index(value_of_best)
return highlight_best_value_row(criterium_row, index_of_best, value_of_best)
# Min(Max - Min)
def Savidge_criterium(payoff_matrix):
criterium_row = []
for row in payoff_matrix:
criterium_row.append(max(row) - min(row))
value_of_best = min(criterium_row)
index_of_best = criterium_row.index(value_of_best)
return highlight_best_value_row(criterium_row, index_of_best, value_of_best)
# Max(summ(a_i * q_i))
def Laplas_Baies_criterium(probability_vector, payoff_matrix):
criterium_row = []
for row in payoff_matrix:
criterium_for_row = 0.0
for i in range(0, len(row)):
criterium_for_row += row[i] * probability_vector[i]
criterium_row.append(criterium_for_row)
value_of_best = max(criterium_row)
index_of_best = criterium_row.index(value_of_best)
return highlight_best_value_row(criterium_row, index_of_best, value_of_best)
# Max(alpha * min(row) + (1-alpha) * max(row))
def Ghurvitz_criterium(alpha, payoff_matrix):
criterium_row = []
for row in payoff_matrix:
criterium_row.append(alpha * min(row) + (1-alpha) * max(row))
value_of_best = max(criterium_row)
index_of_best = criterium_row.index(value_of_best)
return highlight_best_value_row(criterium_row, index_of_best, value_of_best)
# Min(Max(a_i*q_i of each in row))
def Ghermeyer_criterium(probability_vector, payoff_matrix):
criterium_row = []
for row in payoff_matrix:
inefficiency_row = []
for i in range(0, len(row)):
inefficiency_row.append(row[i] * probability_vector[i])
criterium_for_row = max(inefficiency_row)
criterium_row.append(criterium_for_row)
value_of_best = min(criterium_row)
index_of_best = criterium_row.index(value_of_best)
return highlight_best_value_row(criterium_row, index_of_best, value_of_best)
# Max(alpha * (a_i * q_i) + (1-alpha) * min(row))
def Hojj_Leman_criterium(alpha, probability_vector, payoff_matrix):
criterium_row = []
for row in payoff_matrix:
criterium_for_row = 0.0
for i in range(0, len(row)):
criterium_for_row += (alpha * (row[i] * probability_vector[i]) +
(1-alpha) * min(row))
criterium_row.append(criterium_for_row)
value_of_best = max(criterium_row)
index_of_best = criterium_row.index(value_of_best)
return highlight_best_value_row(criterium_row, index_of_best, value_of_best)
# Helpers
# highlight best value in criterium row
def highlight_best_value_row(criterium_row, index_of_best, value_of_best):
criterium_row[index_of_best] = "| {} |".format(str(round(value_of_best, 2)))
return criterium_row
# Convert matrix os strings to matrix of floats
def matrix_to_float(matrix):
matrix_height = len(matrix)
matrix_width = len(matrix[0])
for row in range(0, matrix_height):
for col in range(0, matrix_width):
matrix[row][col] = float(matrix[row][col])
return matrix
# ===============================================================
# Execution
# ===============================================================
main()
| [
"csv.writer",
"csv.reader"
] | [((931, 965), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""";"""'}), "(csvfile, delimiter=';')\n", (941, 965), False, 'import csv\n'), ((1401, 1435), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""";"""'}), "(csvfile, delimiter=';')\n", (1411, 1435), False, 'import csv\n')] |
import argparse
import os
import shutil
import sys
import time
sys.path.append('..')
sys.path.append('../cubenet')
import pickle as pkl
import numpy as np
import tensorflow as tf
from skimage.io import imread, imsave
from utils import make_dirs, manage_directories
from architectures import GVGG
from dataloader import DataLoader
##### Training #####
def test(args):
print('...Building inputs')
tf.reset_default_graph()
print('...Connecting data io and preprocessing')
with tf.device("/cpu:0"):
with tf.name_scope("IO"):
test_data = DataLoader(args.test_file, 'test', args.batch_size,
args.height, args.jitter, shuffle=False)
args.n_classes = test_data.n_classes
args.data_size = test_data.data_size
print("Found {} test examples".format(args.data_size))
test_iterator = test_data.data.make_initializable_iterator()
test_inputs, test_targets = test_iterator.get_next()
test_inputs.set_shape([args.batch_size, args.height, args.width, args.depth, 1])
test_init_op = test_iterator.make_initializer(test_data.data)
# Outputs
print('...Constructing model')
with tf.get_default_graph().as_default():
with tf.variable_scope("model", reuse=False):
model = GVGG(test_inputs, False, args)
test_logits = model.pred_logits
test_preds = tf.nn.softmax(test_logits)
# Prediction loss
print("...Building metrics")
preds = tf.to_int32(tf.argmax(test_preds, 1))
test_accuracy = tf.contrib.metrics.accuracy(preds, test_targets)
# HACK: Rotation averaging is brittle.
preds_rot = tf.to_int32(tf.argmax(tf.reduce_mean(test_preds, 0)))
test_targets_rot = test_targets[0]
test_accuracy_rot = tf.contrib.metrics.accuracy(preds_rot, test_targets_rot)
with tf.Session() as sess:
# Load pretrained model, ignoring final layer
print('...Restore variables')
tf.global_variables_initializer().run()
restorer = tf.train.Saver()
model_path = tf.train.latest_checkpoint(args.save_dir)
restorer.restore(sess, model_path)
accuracies = []
accuracies_rotavg = []
print("...Testing")
sess.run([test_init_op])
for i in range(args.data_size // args.batch_size):
tacc, tacc_rotavg = sess.run([test_accuracy, test_accuracy_rot])
accuracies.append(tacc)
accuracies_rotavg.append(tacc_rotavg)
sys.stdout.write("[{} | {}] Running acc: {:0.4f}, Running rot acc: {:0.4f}\r".format(i*args.batch_size, args.data_size, np.mean(accuracies), np.mean(accuracies_rotavg)))
sys.stdout.flush()
print()
print("Test accuracy: {:04f}".format(np.mean(accuracies)))
print("Test accuracy rot avg: {:04f}".format(np.mean(accuracies_rotavg)))
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", help="minibatch size", type=int, default=12)
parser.add_argument("--height", help="input image height", type=int, default=32)
parser.add_argument("--width", help="input image width", type=int, default=32)
parser.add_argument("--depth", help="input image depth", type=int, default=32)
parser.add_argument("--n_channels", help="number of input image channels", type=int, default=9*4)
parser.add_argument("--kernel_size", help="number of channel in first layer", type=int, default=3)
parser.add_argument("--first_kernel_size", help="number of channel in first layer", type=int, default=5)
parser.add_argument("--n_classes", help="number of output classes", type=int, default=10)
parser.add_argument("--jitter", help="amount of test time jitter", type=int, default=0)
parser.add_argument("--group", help='group', type=str, default='V')
parser.add_argument("--drop_sigma", help='dropout rate', type=float, default=0.0)
parser.add_argument("--group_rotations", help="whether to rotation average", type=bool, default=True)
parser.add_argument("--preprocess", help="whether to preprocess images", type=bool, default=True)
parser.add_argument("--min_after_dequeue", help="minimum number of images to keep in RAM", type=int, default=500)
parser.add_argument("--test_file", help="directory of test addresses", default="./addresses/modelnet10_test_addresses.txt")
parser.add_argument("--save_dir", help="directory to save results", default="./models/model_0/checkpoints")
test(parser.parse_args())
| [
"architectures.GVGG",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"sys.path.append",
"dataloader.DataLoader",
"numpy.mean",
"argparse.ArgumentParser",
"tensorflow.Session",
"tensorflow.contrib.metrics.accuracy",
"sys.stdout.flush",
"tensorflow.get_default_graph",
"tensorflow.device",
"tensorflow.variable_scope",
"tensorflow.train.latest_checkpoint",
"tensorflow.reset_default_graph",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow.name_scope"
] | [((64, 85), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (79, 85), False, 'import sys\n'), ((86, 115), 'sys.path.append', 'sys.path.append', (['"""../cubenet"""'], {}), "('../cubenet')\n", (101, 115), False, 'import sys\n'), ((410, 434), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (432, 434), True, 'import tensorflow as tf\n'), ((3060, 3085), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3083, 3085), False, 'import argparse\n'), ((498, 517), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (507, 517), True, 'import tensorflow as tf\n'), ((1965, 1977), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1975, 1977), True, 'import tensorflow as tf\n'), ((2146, 2162), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2160, 2162), True, 'import tensorflow as tf\n'), ((2184, 2225), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['args.save_dir'], {}), '(args.save_dir)\n', (2210, 2225), True, 'import tensorflow as tf\n'), ((532, 551), 'tensorflow.name_scope', 'tf.name_scope', (['"""IO"""'], {}), "('IO')\n", (545, 551), True, 'import tensorflow as tf\n'), ((577, 674), 'dataloader.DataLoader', 'DataLoader', (['args.test_file', '"""test"""', 'args.batch_size', 'args.height', 'args.jitter'], {'shuffle': '(False)'}), "(args.test_file, 'test', args.batch_size, args.height, args.\n jitter, shuffle=False)\n", (587, 674), False, 'from dataloader import DataLoader\n'), ((1291, 1330), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(False)'}), "('model', reuse=False)\n", (1308, 1330), True, 'import tensorflow as tf\n'), ((1352, 1382), 'architectures.GVGG', 'GVGG', (['test_inputs', '(False)', 'args'], {}), '(test_inputs, False, args)\n', (1356, 1382), False, 'from architectures import GVGG\n'), ((1452, 1478), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['test_logits'], {}), '(test_logits)\n', (1465, 1478), True, 'import tensorflow as tf\n'), ((1637, 1685), 'tensorflow.contrib.metrics.accuracy', 'tf.contrib.metrics.accuracy', (['preds', 'test_targets'], {}), '(preds, test_targets)\n', (1664, 1685), True, 'import tensorflow as tf\n'), ((1894, 1950), 'tensorflow.contrib.metrics.accuracy', 'tf.contrib.metrics.accuracy', (['preds_rot', 'test_targets_rot'], {}), '(preds_rot, test_targets_rot)\n', (1921, 1950), True, 'import tensorflow as tf\n'), ((2805, 2823), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2821, 2823), False, 'import sys\n'), ((1240, 1262), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1260, 1262), True, 'import tensorflow as tf\n'), ((1583, 1607), 'tensorflow.argmax', 'tf.argmax', (['test_preds', '(1)'], {}), '(test_preds, 1)\n', (1592, 1607), True, 'import tensorflow as tf\n'), ((2087, 2120), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2118, 2120), True, 'import tensorflow as tf\n'), ((2898, 2917), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (2905, 2917), True, 'import numpy as np\n'), ((2973, 2999), 'numpy.mean', 'np.mean', (['accuracies_rotavg'], {}), '(accuracies_rotavg)\n', (2980, 2999), True, 'import numpy as np\n'), ((1783, 1812), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['test_preds', '(0)'], {}), '(test_preds, 0)\n', (1797, 1812), True, 'import tensorflow as tf\n'), ((2743, 2762), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (2750, 2762), True, 'import numpy as np\n'), ((2764, 2790), 'numpy.mean', 'np.mean', (['accuracies_rotavg'], {}), '(accuracies_rotavg)\n', (2771, 2790), True, 'import numpy as np\n')] |
from utilities.validators import validate_gtfs_representation
from utilities.constants import AGENCY_NAME
def process_agencies_count_for_gtfs_metadata(gtfs_representation):
"""Process and count all the agencies in the `agency` file from the GTFS dataset of the representation.
Add the agencies count to the representation metadata once processed.
:param gtfs_representation: The representation of the GTFS dataset to process.
:return: The representation of the GTFS dataset post-execution.
"""
validate_gtfs_representation(gtfs_representation)
dataset = gtfs_representation.dataset
metadata = gtfs_representation.metadata
agency_is_present = (
dataset.agency is not None and AGENCY_NAME in dataset.agency.columns
)
if agency_is_present:
# Count agencies
agencies_count = dataset.agency[AGENCY_NAME].size
# Set the main timezone in the GTFS representation
# if there is one agency or more
if agencies_count > 0:
metadata.agencies_count = agencies_count
return gtfs_representation
| [
"utilities.validators.validate_gtfs_representation"
] | [((520, 569), 'utilities.validators.validate_gtfs_representation', 'validate_gtfs_representation', (['gtfs_representation'], {}), '(gtfs_representation)\n', (548, 569), False, 'from utilities.validators import validate_gtfs_representation\n')] |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class AppTag(models.Model):
name = models.CharField("Application Tag", max_length=10, blank=False)
def __str__(self):
return "{}".format(self.name)
class Message(models.Model):
"""
用户发送的消息
"""
user_from = models.ForeignKey(User, on_delete=models.CASCADE, related_name="messages_from")
user_to = models.ForeignKey(User, on_delete=models.CASCADE, related_name="messages_to")
tag = models.ForeignKey(AppTag, on_delete=models.CASCADE, related_name="messages_tag")
message = models.CharField(max_length=1000)
date = models.DateTimeField('date created')
def __str__(self):
return "{}".format(self.message)
| [
"django.db.models.DateTimeField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((141, 204), 'django.db.models.CharField', 'models.CharField', (['"""Application Tag"""'], {'max_length': '(10)', 'blank': '(False)'}), "('Application Tag', max_length=10, blank=False)\n", (157, 204), False, 'from django.db import models\n'), ((342, 421), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""messages_from"""'}), "(User, on_delete=models.CASCADE, related_name='messages_from')\n", (359, 421), False, 'from django.db import models\n'), ((436, 513), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""messages_to"""'}), "(User, on_delete=models.CASCADE, related_name='messages_to')\n", (453, 513), False, 'from django.db import models\n'), ((524, 609), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AppTag'], {'on_delete': 'models.CASCADE', 'related_name': '"""messages_tag"""'}), "(AppTag, on_delete=models.CASCADE, related_name='messages_tag'\n )\n", (541, 609), False, 'from django.db import models\n'), ((619, 652), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)'}), '(max_length=1000)\n', (635, 652), False, 'from django.db import models\n'), ((664, 700), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""date created"""'], {}), "('date created')\n", (684, 700), False, 'from django.db import models\n')] |
from io import BytesIO
from django.conf import settings
from django.template.loader import get_template
import PyPDF2
from api.pdf import render as render_pdf
def generate_pdf(data):
# Add date to the payload
# today = date.today().strftime('%d-%b-%Y')
# data['date'] = today
# #######################
# # Notice To Disputant - Response
# #
# # Make the Violation Ticket Number all upper case
# try:
# x = data['ticketNumber']['prefix']
# data['ticketNumber']['prefix'] = x.upper()
# except KeyError:
# pass
# # Format the data more user friendly
# try:
# x = datetime.strptime(data['ticketDate'],'%Y-%m-%d')
# data['ticketDate'] = x.strftime('%d-%b-%Y')
# except KeyError:
# pass
template = "notice-to-disputant-response.html"
template = get_template(template)
html_content = template.render(data)
pdf_content = render_pdf(html_content)
return pdf_content
def merge_pdf(queryset):
pdfWriter = PyPDF2.PdfFileWriter()
pdfOutput = BytesIO()
for preparedPdf in queryset.iterator():
pdf_data = settings.ENCRYPTOR.decrypt(preparedPdf.key_id, preparedPdf.data)
pdfReader = PyPDF2.PdfFileReader(BytesIO(pdf_data))
for pageNum in range(pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
pdfWriter.write(pdfOutput)
return pdfOutput
| [
"django.conf.settings.ENCRYPTOR.decrypt",
"io.BytesIO",
"api.pdf.render",
"PyPDF2.PdfFileWriter",
"django.template.loader.get_template"
] | [((856, 878), 'django.template.loader.get_template', 'get_template', (['template'], {}), '(template)\n', (868, 878), False, 'from django.template.loader import get_template\n'), ((938, 962), 'api.pdf.render', 'render_pdf', (['html_content'], {}), '(html_content)\n', (948, 962), True, 'from api.pdf import render as render_pdf\n'), ((1029, 1051), 'PyPDF2.PdfFileWriter', 'PyPDF2.PdfFileWriter', ([], {}), '()\n', (1049, 1051), False, 'import PyPDF2\n'), ((1068, 1077), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1075, 1077), False, 'from io import BytesIO\n'), ((1141, 1205), 'django.conf.settings.ENCRYPTOR.decrypt', 'settings.ENCRYPTOR.decrypt', (['preparedPdf.key_id', 'preparedPdf.data'], {}), '(preparedPdf.key_id, preparedPdf.data)\n', (1167, 1205), False, 'from django.conf import settings\n'), ((1247, 1264), 'io.BytesIO', 'BytesIO', (['pdf_data'], {}), '(pdf_data)\n', (1254, 1264), False, 'from io import BytesIO\n')] |
# ===============================================================================
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from pychron.core.ui import set_qt
set_qt()
# ============= enthought library imports =======================
from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range
from traitsui.api import View, Item, UItem, VGroup, HGroup, spring
# from pychron.envisage.tasks.base_editor import BaseTraitsEditor
# from pychron.loggable import Loggable
# from pychron.canvas.canvas2D.raster_canvas import RasterCanvas
from enable.component_editor import ComponentEditor
from pychron.lasers.power.power_mapper import PowerMapper
from pychron.core.ui.thread import Thread
from pychron.lasers.power.power_map_processor import PowerMapProcessor
from pychron.managers.data_managers.h5_data_manager import H5DataManager
# from pychron.graph.graph import Graph
# from pychron.graph.contour_graph import ContourGraph
# from chaco.plot_containers import HPlotContainer
from pychron.lasers.tasks.editors.laser_editor import LaserEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
class PowerMapControls(HasTraits):
beam_diameter = Float(1)
request_power = Float(1)
padding = Float(1.0)
step_length = Float(0.25)
center_x = Float(0)
center_y = Float(0)
integration = Int(1)
discrete_scan = Bool(False)
def traits_view(self):
v = View(
VGroup(
Item("discrete_scan"),
Item("beam_diameter"),
Item("request_power"),
Item("padding"),
Item("step_length"),
Item("center_x"),
Item("center_y"),
)
)
return v
class PowerMapEditor(LaserEditor):
percent_threshold = Range(0.0, 100.0)
beam_diameter = Float
power = Float
# canvas = Instance(RasterCanvas, ())
editor = Instance(PowerMapControls, ())
mapper = Instance(PowerMapper, ())
completed = DelegatesTo("mapper")
# was_executed = False
processor = Instance(PowerMapProcessor)
def _percent_threshold_changed(self, new):
if self.processor:
self.processor.set_percent_threshold(new)
def load(self, path):
pmp = PowerMapProcessor()
reader = H5DataManager()
reader.open_data(path)
cg = pmp.load_graph(reader)
self.beam_diameter, self.power = pmp.extract_attrs(["beam_diameter", "power"])
self.component = cg.plotcontainer
self.was_executed = True
self.processor = pmp
def _do_execute(self):
mapper = self.mapper
mapper.laser_manager = self._laser_manager
editor = self.editor
padding = editor.padding
# if editor.discrete_scan:
# mapper.canvas = self.canvas
# self.component = self.canvas
# else:
c = mapper.make_component(padding)
self.component = c
bd = editor.beam_diameter
rp = editor.request_power
cx = editor.center_x
cy = editor.center_y
step_len = editor.step_length
t = Thread(
target=mapper.do_power_mapping, args=(bd, rp, cx, cy, padding, step_len)
)
t.start()
self._execute_thread = t
return True
def stop(self):
self.mapper.stop()
def traits_view(self):
v = View(
HGroup(
spring,
Item("beam_diameter", style="readonly"),
Item("power", style="readonly"),
Item("percent_threshold", label="% Threshold"),
visible_when="was_executed",
),
UItem("component", editor=ComponentEditor()),
resizable=True,
)
return v
if __name__ == "__main__":
e = PowerMapEditor()
p = "/Users/ross/Sandbox/powermap/powermap-2013-07-26005.hdf5"
p = "/Users/ross/Sandbox/powermap/powermap-2013-07-27008.hdf5"
e.load(p)
e.configure_traits()
# ============= EOF =============================================
| [
"traits.api.Instance",
"pychron.core.ui.set_qt",
"traits.api.DelegatesTo",
"pychron.lasers.power.power_map_processor.PowerMapProcessor",
"pychron.core.ui.thread.Thread",
"pychron.managers.data_managers.h5_data_manager.H5DataManager",
"enable.component_editor.ComponentEditor",
"traitsui.api.Item",
"traits.api.Range",
"traits.api.Int",
"traits.api.Bool",
"traits.api.Float"
] | [((807, 815), 'pychron.core.ui.set_qt', 'set_qt', ([], {}), '()\n', (813, 815), False, 'from pychron.core.ui import set_qt\n'), ((1902, 1910), 'traits.api.Float', 'Float', (['(1)'], {}), '(1)\n', (1907, 1910), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((1931, 1939), 'traits.api.Float', 'Float', (['(1)'], {}), '(1)\n', (1936, 1939), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((1954, 1964), 'traits.api.Float', 'Float', (['(1.0)'], {}), '(1.0)\n', (1959, 1964), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((1983, 1994), 'traits.api.Float', 'Float', (['(0.25)'], {}), '(0.25)\n', (1988, 1994), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2010, 2018), 'traits.api.Float', 'Float', (['(0)'], {}), '(0)\n', (2015, 2018), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2034, 2042), 'traits.api.Float', 'Float', (['(0)'], {}), '(0)\n', (2039, 2042), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2061, 2067), 'traits.api.Int', 'Int', (['(1)'], {}), '(1)\n', (2064, 2067), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2088, 2099), 'traits.api.Bool', 'Bool', (['(False)'], {}), '(False)\n', (2092, 2099), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2523, 2540), 'traits.api.Range', 'Range', (['(0.0)', '(100.0)'], {}), '(0.0, 100.0)\n', (2528, 2540), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2646, 2676), 'traits.api.Instance', 'Instance', (['PowerMapControls', '()'], {}), '(PowerMapControls, ())\n', (2654, 2676), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2690, 2715), 'traits.api.Instance', 'Instance', (['PowerMapper', '()'], {}), '(PowerMapper, ())\n', (2698, 2715), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2732, 2753), 'traits.api.DelegatesTo', 'DelegatesTo', (['"""mapper"""'], {}), "('mapper')\n", (2743, 2753), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2798, 2825), 'traits.api.Instance', 'Instance', (['PowerMapProcessor'], {}), '(PowerMapProcessor)\n', (2806, 2825), False, 'from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range\n'), ((2996, 3015), 'pychron.lasers.power.power_map_processor.PowerMapProcessor', 'PowerMapProcessor', ([], {}), '()\n', (3013, 3015), False, 'from pychron.lasers.power.power_map_processor import PowerMapProcessor\n'), ((3034, 3049), 'pychron.managers.data_managers.h5_data_manager.H5DataManager', 'H5DataManager', ([], {}), '()\n', (3047, 3049), False, 'from pychron.managers.data_managers.h5_data_manager import H5DataManager\n'), ((3898, 3983), 'pychron.core.ui.thread.Thread', 'Thread', ([], {'target': 'mapper.do_power_mapping', 'args': '(bd, rp, cx, cy, padding, step_len)'}), '(target=mapper.do_power_mapping, args=(bd, rp, cx, cy, padding, step_len)\n )\n', (3904, 3983), False, 'from pychron.core.ui.thread import Thread\n'), ((2182, 2203), 'traitsui.api.Item', 'Item', (['"""discrete_scan"""'], {}), "('discrete_scan')\n", (2186, 2203), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((2221, 2242), 'traitsui.api.Item', 'Item', (['"""beam_diameter"""'], {}), "('beam_diameter')\n", (2225, 2242), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((2260, 2281), 'traitsui.api.Item', 'Item', (['"""request_power"""'], {}), "('request_power')\n", (2264, 2281), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((2299, 2314), 'traitsui.api.Item', 'Item', (['"""padding"""'], {}), "('padding')\n", (2303, 2314), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((2332, 2351), 'traitsui.api.Item', 'Item', (['"""step_length"""'], {}), "('step_length')\n", (2336, 2351), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((2369, 2385), 'traitsui.api.Item', 'Item', (['"""center_x"""'], {}), "('center_x')\n", (2373, 2385), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((2403, 2419), 'traitsui.api.Item', 'Item', (['"""center_y"""'], {}), "('center_y')\n", (2407, 2419), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((4227, 4266), 'traitsui.api.Item', 'Item', (['"""beam_diameter"""'], {'style': '"""readonly"""'}), "('beam_diameter', style='readonly')\n", (4231, 4266), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((4284, 4315), 'traitsui.api.Item', 'Item', (['"""power"""'], {'style': '"""readonly"""'}), "('power', style='readonly')\n", (4288, 4315), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((4333, 4379), 'traitsui.api.Item', 'Item', (['"""percent_threshold"""'], {'label': '"""% Threshold"""'}), "('percent_threshold', label='% Threshold')\n", (4337, 4379), False, 'from traitsui.api import View, Item, UItem, VGroup, HGroup, spring\n'), ((4479, 4496), 'enable.component_editor.ComponentEditor', 'ComponentEditor', ([], {}), '()\n', (4494, 4496), False, 'from enable.component_editor import ComponentEditor\n')] |
""" Defines default values used throughout the library.
"""
import numpy as np
#: Default color to be used for drawing objects.
DEFAULT_COLOR = np.array([1, 0.5, 0.2, 1])
#: Default coordinates of a vertex.
DEFAULT_COORDINATES = np.array([0, 0, 0])
| [
"numpy.array"
] | [((147, 173), 'numpy.array', 'np.array', (['[1, 0.5, 0.2, 1]'], {}), '([1, 0.5, 0.2, 1])\n', (155, 173), True, 'import numpy as np\n'), ((234, 253), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (242, 253), True, 'import numpy as np\n')] |
import re
from tqdm import tqdm
from datetime import datetime
from operator import itemgetter
from datetime import datetime, timedelta
#실시간 검색어 함수 preprocess
def preprocess(doc):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
doc = re.sub(r'\s+'," ", doc)
doc = doc.lower()
doc = re.sub(r'[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', doc)
doc = emoji_pattern.sub(r'', doc)
doc = re.compile('[^ ㄱ-ㅣ가-힣|a-z]+').sub('', doc)
return doc
#실시간 검색어 추출 함수
def real_time_keywords(search_input):
temp = [i['keyword_split'] for i in search_input]
check_list = []
for i in range(len(temp)):
temp_word = []
for j in range(len(temp[i])):
temp[i][j] = preprocess(temp[i][j])
if len(temp[i][j]) > 1: temp_word.append(temp[i][j])
check_list.append(temp_word)
result = {}
for words in check_list:
# 단일 단어 추가
for i in range(len(words)):
if words[i] in result: result[words[i]] += 1
else: result[words[i]] = 1
# 연속 단어 추가(정방향)
for i in range(2,len(words)):
key = " ".join(words[0:i+1])
if key in result: result[key] += 1
else: result[key] = 1
result = sorted(result.items(), key = itemgetter(0))
temp = []
for i in range(len(result)-1):
if ((result[i+1][0].startswith(result[i][0]) or result[i+1][0].endswith(result[i][0])) and result[i+1][1] >= result[i][1]):
continue
temp.append(result[i])
result = sorted(temp, key = lambda x: len(x[0]))
result = sorted(result, key = itemgetter(1), reverse = True)
return result
def realtime(db, config):
search_log_list = list(db['search_log'].find({
'date': {'$gte': datetime.now() - timedelta(days = config.INDICATORS['REALTIME_EFFECTIVE_DAY'])}},
{'_id': 0, 'keyword_split': 1}
))
# 검색 로그 후보군 추출
candidate_keywords = real_time_keywords(search_log_list)
if len(candidate_keywords) == 0:
return False
# 비속어 필터링
result_keywords = []
for keyword in candidate_keywords:
if (keyword in config.INDICATORS['BAD_LANGUAGE'] or
len(keyword) > config.INDICATORS['REALTIME_KEYWORD_LEN']):
continue
result_keywords.append(keyword)
# 후보군 실시간 검색어 개수가 "REALTIME_KEYWORD_LEN" 보다 적으면,
# 가장 최신 실시간 검색어 리스트를 가져와 결합.
if len(result_keywords) < config.INDICATORS['REALTIME_KEYWORD_LEN']:
latest_realtime = list(db['realtime'].find().sort([('date', -1)]).limit(1))[0]['realtime']
result_keywords.sort(key=lambda x:x[1], reverse=True)
latest_realtime.sort(key=lambda x:x[1], reverse=True)
# 후보군 실시간 검색어 중 점수가 가장 작은 값 찾고, 0.1 점 감소
min_value = result_keywords[-1][1]
min_value -= 0.1
# (현재 후보군 키워드 / 최근 실시간 검색어 키워드) 중 중복 키워드 찾기
overlap_keyword = list(set(dict(result_keywords)) & set(dict(latest_realtime)))
for keyword in latest_realtime:
flag = False
for overlap in overlap_keyword:
if keyword[0] == overlap:
flag = True
if flag:
continue
if len(result_keywords) == config.INDICATORS['REALTIME_KEYWORD_LEN']:
break
result_keywords.append([keyword[0], min_value])
db['realtime'].insert_one({
'realtime': result_keywords[:config.INDICATORS['REALTIME_KEYWORD_LEN']],
'date': datetime.now()})
return result_keywords
| [
"re.compile",
"operator.itemgetter",
"datetime.datetime.now",
"re.sub",
"datetime.timedelta"
] | [((200, 265), 're.compile', 're.compile', (['"""[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿]+"""'], {'flags': 're.UNICODE'}), "('[😀-🙏🌀-🗿🚀-\\U0001f6ff\\U0001f1e0-🇿]+', flags=re.UNICODE)\n", (210, 265), False, 'import re\n'), ((469, 493), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'doc'], {}), "('\\\\s+', ' ', doc)\n", (475, 493), False, 'import re\n'), ((525, 602), 're.sub', 're.sub', (['"""[-=+,#/\\\\?:^$.@*\\\\"※~&%ㆍ!』\\\\\\\\‘|\\\\(\\\\)\\\\[\\\\]\\\\<\\\\>`\\\\\'…》]"""', '""""""', 'doc'], {}), '(\'[-=+,#/\\\\?:^$.@*\\\\"※~&%ㆍ!』\\\\\\\\‘|\\\\(\\\\)\\\\[\\\\]\\\\<\\\\>`\\\\\\\'…》]\', \'\', doc)\n', (531, 602), False, 'import re\n'), ((640, 669), 're.compile', 're.compile', (['"""[^ ㄱ-ㅣ가-힣|a-z]+"""'], {}), "('[^ ㄱ-ㅣ가-힣|a-z]+')\n", (650, 669), False, 'import re\n'), ((1505, 1518), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (1515, 1518), False, 'from operator import itemgetter\n'), ((1844, 1857), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (1854, 1857), False, 'from operator import itemgetter\n'), ((3719, 3733), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3731, 3733), False, 'from datetime import datetime, timedelta\n'), ((1998, 2012), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2010, 2012), False, 'from datetime import datetime, timedelta\n'), ((2015, 2074), 'datetime.timedelta', 'timedelta', ([], {'days': "config.INDICATORS['REALTIME_EFFECTIVE_DAY']"}), "(days=config.INDICATORS['REALTIME_EFFECTIVE_DAY'])\n", (2024, 2074), False, 'from datetime import datetime, timedelta\n')] |
"""Miscellaneous useful functions"""
from pathlib import Path
from jawa.util.bytecode import Operand, OperandTypes
from pyjvm.core.jvm_types import Integer
def dump_class(cf, echo):
"""Display information about a class file
Complements the functionality of the JDK javap tool.
:param cf: A ClassFile
:param echo: A print-like method
"""
echo(f'{cf.this.name.value} : {cf.super_.name.value}')
for field in cf.fields:
echo(f'\t{field.name.value}: {field.type}')
for method in cf.methods:
echo(f'{method} {method.descriptor.value}')
if method.code is not None:
for instruction in method.code.disassemble():
echo('\t' + str(instruction))
for ex in method.code.exception_table:
print(ex)
echo()
for constant in cf.constants:
echo(constant)
def split_by_predicate(iterable, predicate):
"""Split an iterable into two lists the according to a predicate
Return a tuple of two lists:
The first has the values for which `predicate` returned True
The second has the values for which `predicate` returned False
:param iterable: An Iterable[T]
:param predicate: A function from T to bool
"""
true, false = [], []
for item in iterable:
if predicate(item):
true.append(item)
else:
false.append(item)
return true, false
def class_as_descriptor(name):
"""Return the JVM descriptor for the class `name`"""
if not name.endswith(';'):
return 'L' + name + ';'
else:
return name
def literal_operand(value):
"""Create an Instruction Operand with type LITERAL and value `value`"""
return Operand(OperandTypes.LITERAL, value)
def constant_operand(const):
"""Create an Instruction Operand with type CONSTANT_INDEX and value `const.index`"""
return Operand(OperandTypes.CONSTANT_INDEX, const.index)
def local_operand(index):
"""Create an Instruction Operand with type LOCAL_INDEX and value `index`"""
return Operand(OperandTypes.LOCAL_INDEX, index)
def pull_pairs(flat):
"""Return a Iterable of pairs of adjacent items in another Iterable
`flat` should be of even size:
`len(list(flat)) % 2 == 0`
>>> list(pull_pairs([1, 2, 3, 4]))
[(1, 2), (3, 4)]
"""
it = iter(flat)
return zip(it, it)
TRUE = Integer.create_instance(1)
FALSE = Integer.create_instance(0)
def bool_to_num(b):
if b:
return TRUE
else:
return FALSE
def field_name_from_field_ref(ref):
"""Return the class name of the name_and_type attribute of field references
The need arises from a weird difference between test behaviour and real class file behaviour.
This suggests that I'm not creating the reference properly in tests.
But I'm not sure exactly how.
Maintainers are welcome to solve this issue.
"""
name = ref.name_and_type.name.value
try:
return name.value
except AttributeError:
return name
def named_tuple_replace(instance, **kwargs):
# noinspection PyProtectedMember
return instance._replace(**kwargs)
def path_to_std_lib() -> Path:
utils_file = Path(__file__)
utils_package = utils_file.parent
std_lib = utils_package.joinpath('glibj.zip')
return std_lib
def path_to_std_lib_as_str():
return str(path_to_std_lib())
| [
"jawa.util.bytecode.Operand",
"pathlib.Path",
"pyjvm.core.jvm_types.Integer.create_instance"
] | [((2383, 2409), 'pyjvm.core.jvm_types.Integer.create_instance', 'Integer.create_instance', (['(1)'], {}), '(1)\n', (2406, 2409), False, 'from pyjvm.core.jvm_types import Integer\n'), ((2418, 2444), 'pyjvm.core.jvm_types.Integer.create_instance', 'Integer.create_instance', (['(0)'], {}), '(0)\n', (2441, 2444), False, 'from pyjvm.core.jvm_types import Integer\n'), ((1721, 1757), 'jawa.util.bytecode.Operand', 'Operand', (['OperandTypes.LITERAL', 'value'], {}), '(OperandTypes.LITERAL, value)\n', (1728, 1757), False, 'from jawa.util.bytecode import Operand, OperandTypes\n'), ((1889, 1938), 'jawa.util.bytecode.Operand', 'Operand', (['OperandTypes.CONSTANT_INDEX', 'const.index'], {}), '(OperandTypes.CONSTANT_INDEX, const.index)\n', (1896, 1938), False, 'from jawa.util.bytecode import Operand, OperandTypes\n'), ((2058, 2098), 'jawa.util.bytecode.Operand', 'Operand', (['OperandTypes.LOCAL_INDEX', 'index'], {}), '(OperandTypes.LOCAL_INDEX, index)\n', (2065, 2098), False, 'from jawa.util.bytecode import Operand, OperandTypes\n'), ((3205, 3219), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3209, 3219), False, 'from pathlib import Path\n')] |
"""
Add routes to a FastAPI application to handle OAuth
"""
import logging
from typing import Dict, List, Optional
import msal
from fastapi import APIRouter, Depends, Request
from fastapi.responses import RedirectResponse
from .config import get_auth_settings
from .frontend.authentication import UserAuthenticated
from .types import RemoveCacheCallable, SaveCacheCallable, UserIdentity
from .utils import build_msal_app
auth_settings = get_auth_settings()
user_authenticated = UserAuthenticated()
def create_auth_router(
f_save_cache: SaveCacheCallable,
f_remove_cache: RemoveCacheCallable,
) -> APIRouter:
router = APIRouter()
def _auth_uri(request: Request) -> str:
redirect_uri = request.url_for("authorized")
if "http://0.0.0.0" in redirect_uri:
redirect_uri = redirect_uri.replace("http://0.0.0.0", "http://localhost")
if "http://127.0.0.1" in redirect_uri:
redirect_uri = redirect_uri.replace("http://127.0.0.1", "http://localhost")
return redirect_uri
def _auth_code_flow(
request: Request,
authority: Optional[str] = None,
scopes: Optional[List[str]] = None,
) -> str:
flow: Dict[str, str] = build_msal_app(
authority=authority
).initiate_auth_code_flow(
scopes,
redirect_uri=_auth_uri(request),
)
request.session["flow"] = flow
return flow["auth_uri"]
# pylint: disable=W0612
@router.route("/login", include_in_schema=False)
async def login(request: Request) -> RedirectResponse:
flow_uri = _auth_code_flow(request, scopes=get_auth_settings().scopes)
return RedirectResponse(url=flow_uri, status_code=302)
# pylint: disable=W0612
@router.get(
"/getAToken",
include_in_schema=False,
) # Its absolute URL must match your app's redirect_uri set in AAD
async def authorized(request: Request) -> RedirectResponse:
# see https://github.com/Azure-Samples/ms-identity-python-webapp/blob/e342e93a2a7e0cc4d4955c20660e6a81fd2536c5/app.py#L35-L45
# for try except pattern. Kind of annoying, means you may have to click sign in twice
try:
cache = msal.SerializableTokenCache()
flow = request.session.get("flow", {})
result = build_msal_app(cache=cache).acquire_token_by_auth_code_flow(
flow,
dict(request.query_params),
scopes=get_auth_settings().scopes,
)
# Remove flow cookie
request.session.pop("flow", None)
# Just store the oid (https://docs.microsoft.com/en-us/azure/active-directory/develop/id-tokens) in a signed cookie
oid = result.get("id_token_claims").get("oid")
await f_save_cache(oid, cache)
request.session["user"] = oid
except ValueError as error:
logging.debug("%s", error)
return RedirectResponse(url=request.url_for("home"), status_code=302)
@router.get("/logout", include_in_schema=False)
async def logout(
request: Request,
user: UserIdentity = Depends(user_authenticated),
) -> RedirectResponse:
"""Remove the user from the cache and pop the session cookie.
Does not sign out of Microsoft
"""
# Remove user from cache
if user:
await f_remove_cache(user.oid)
# Remove their session cookie
request.session.pop("user", None)
return RedirectResponse(url=request.url_for("home"))
return router
| [
"logging.debug",
"msal.SerializableTokenCache",
"fastapi.responses.RedirectResponse",
"fastapi.APIRouter",
"fastapi.Depends"
] | [((635, 646), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (644, 646), False, 'from fastapi import APIRouter, Depends, Request\n'), ((1691, 1738), 'fastapi.responses.RedirectResponse', 'RedirectResponse', ([], {'url': 'flow_uri', 'status_code': '(302)'}), '(url=flow_uri, status_code=302)\n', (1707, 1738), False, 'from fastapi.responses import RedirectResponse\n'), ((3170, 3197), 'fastapi.Depends', 'Depends', (['user_authenticated'], {}), '(user_authenticated)\n', (3177, 3197), False, 'from fastapi import APIRouter, Depends, Request\n'), ((2238, 2267), 'msal.SerializableTokenCache', 'msal.SerializableTokenCache', ([], {}), '()\n', (2265, 2267), False, 'import msal\n'), ((2934, 2960), 'logging.debug', 'logging.debug', (['"""%s"""', 'error'], {}), "('%s', error)\n", (2947, 2960), False, 'import logging\n')] |
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class IndexViewTest(StaticLiveServerTestCase):
fixtures = ['users.json']
def setUp(self):
self.browser = webdriver.PhantomJS()
self.browser.set_window_size(1400, 1000)
self.browser.implicitly_wait(10)
def tearDown(self):
self.browser.quit()
def test_can_reach_index_page_and_log_in_and_logout(self):
self.browser.get(self.live_server_url)
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Codango', body.text)
# logging in username and password
username_field = self.browser.find_element_by_name('username')
username_field.send_keys('lade')
password_field = self.browser.find_element_by_name('password')
password_field.send_keys('password')
password_field.send_keys(Keys.RETURN)
# username and password accepted
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Share', body.text)
# logging out
self.browser.find_element_by_link_text('lade').click()
self.browser.find_element_by_link_text('LogOut').click()
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Join Our Community', body.text)
class StaticPages(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.PhantomJS()
self.browser.set_window_size(1400, 1000)
self.browser.implicitly_wait(10)
def tearDown(self):
self.browser.quit()
def test_can_reach_static_pages(self):
self.browser.get(self.live_server_url)
# index page
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Join Our Community', body.text)
# about us page
self.browser.find_element_by_link_text('About Us').click()
body = self.browser.find_element_by_tag_name('body')
self.assertIn('About us', body.text)
# contact us page
self.browser.find_element_by_link_text('Contact Us').click()
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Contact us', body.text)
# team page
self.browser.find_element_by_link_text('Team').click()
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Our Awesome Team', body.text)
| [
"selenium.webdriver.PhantomJS"
] | [((275, 296), 'selenium.webdriver.PhantomJS', 'webdriver.PhantomJS', ([], {}), '()\n', (294, 296), False, 'from selenium import webdriver\n'), ((1480, 1501), 'selenium.webdriver.PhantomJS', 'webdriver.PhantomJS', ([], {}), '()\n', (1499, 1501), False, 'from selenium import webdriver\n')] |
import csv
import requests
import xmltodict
from json import loads, dumps
import urllib.request
from bs4 import BeautifulSoup
import json
#API keys
GOODREADS_API_KEY = ''
with open('keys.json', 'r', encoding='utf-8') as keys:
keys = json.load(keys)
GOODREADS_API_KEY= keys['goodreads_key']
#convert ordered dict to normal dict
def to_dict(input_ordered_dict):
return loads(dumps(input_ordered_dict))
#save intermediate results as csv - append rows
def save_as_csv(filename, data):
has_data = False
with open('csv/' + filename + '.csv', 'r', encoding='latin-1') as f:
reader = csv.reader(f)
#if already saved
for row in reader:
has_data = True
break
with open('csv/' + filename + '.csv', 'a', encoding='utf-8') as f:
writer = csv.writer(f)
if has_data is False:
writer.writerow(data[0].keys())
for datum in data:
writer.writerow(datum.values())
#search ids
def get_search_obj(author, title):
xml = requests.get(url='https://www.goodreads.com/search/index.xml?key=' + GOODREADS_API_KEY + '&q=' + author + ' ' + title.replace('"', ''))
obj = to_dict(xmltodict.parse(xml.content))
return obj
#get book or author object
def get_object(type, id):
url = 'https://www.goodreads.com/' + type +'/show/'
xml = requests.get(url=url + id + '.xml?key=' + GOODREADS_API_KEY)
obj = to_dict(xmltodict.parse(xml.content))
return obj['GoodreadsResponse'][type]
def get_html_page(link):
response = urllib.request.urlopen(link)
return BeautifulSoup(response.read(), 'html.parser')
| [
"xmltodict.parse",
"csv.writer",
"json.dumps",
"requests.get",
"json.load",
"csv.reader"
] | [((238, 253), 'json.load', 'json.load', (['keys'], {}), '(keys)\n', (247, 253), False, 'import json\n'), ((1345, 1405), 'requests.get', 'requests.get', ([], {'url': "(url + id + '.xml?key=' + GOODREADS_API_KEY)"}), "(url=url + id + '.xml?key=' + GOODREADS_API_KEY)\n", (1357, 1405), False, 'import requests\n'), ((388, 413), 'json.dumps', 'dumps', (['input_ordered_dict'], {}), '(input_ordered_dict)\n', (393, 413), False, 'from json import loads, dumps\n'), ((608, 621), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (618, 621), False, 'import csv\n'), ((809, 822), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (819, 822), False, 'import csv\n'), ((1180, 1208), 'xmltodict.parse', 'xmltodict.parse', (['xml.content'], {}), '(xml.content)\n', (1195, 1208), False, 'import xmltodict\n'), ((1424, 1452), 'xmltodict.parse', 'xmltodict.parse', (['xml.content'], {}), '(xml.content)\n', (1439, 1452), False, 'import xmltodict\n')] |
import math
ONE_THIRD = 1.0/3.0
class DiscretizerException(Exception):
pass
class BaseDiscretizer(object):
def __init__(self, num_bytes, val_min, val_max):
if not isinstance(num_bytes, int):
raise DiscretizerException('Number of bytes must be an integer.')
if num_bytes <= 0:
raise DiscretizerException('Number of bytes must be > 0.')
if num_bytes >= 8:
raise DiscretizerException('Too many bytes, use a 64-bit double '
'instead.')
self._num_bytes = num_bytes
num_bits = 8 * self._num_bytes
self._num_buckets = 2 ** num_bits
self._max_bucket = self._num_buckets - 1
self._max_bucket_float = float(self._max_bucket)
if not isinstance(val_min, float):
raise DiscretizerException('Minimum value must be a float.')
if not isinstance(val_max, float):
raise DiscretizerException('Maximum value must be a float.')
if val_max <= val_min:
raise DiscretizerException('Max/min values invalid.')
self._val_min = val_min
self._val_max = val_max
self._val_range = self._val_max - self._val_min
assert self._val_range > 0.0
@property
def num_bytes(self):
return self._num_bytes
@property
def num_buckets(self):
return self._num_buckets
@property
def max_bucket(self):
return self._max_bucket
@property
def max_bucket_float(self):
return self._max_bucket_float
@property
def val_min(self):
return self._val_min
@property
def val_max(self):
return self._val_max
@property
def val_range(self):
return self._val_range
def encode(self, val):
bucket_num = self.val_to_bucket_num(val)
ba = self.bucket_num_to_bytearray(bucket_num)
len_ba = len(ba)
if len_ba != self.num_bytes:
ba.reverse()
for i in range(self.num_bytes - len_ba):
ba.append(0)
ba.reverse()
return ba
def decode(self, ba):
if not isinstance(ba, bytearray):
raise DiscretizerException('Input not bytearray.')
if len(ba) != self.num_bytes:
raise DiscretizerException('Invalid number of bytes parsed.')
bucket_num = self.bytearray_to_bucket_num(ba)
return self.bucket_num_to_val(bucket_num)
def val_to_bucket_num(self, val):
if not isinstance(val, float):
raise DiscretizerException('Value must be a float.')
# normalise the input value and check bounds (v = [0.0, 1.0])
v = (float(val) - self.val_min) / self.val_range
if v <= 0.0:
return 0
elif v >= 1.0:
return self.max_bucket
# execute mapping function
b = float(self.map_encoder(v))
# get nearest bucket number, clamp and return
bucket_num = int(round(b * self.max_bucket))
if bucket_num < 0:
bucket_num = 0
elif bucket_num > self.max_bucket:
bucket_num = self.max_bucket
return bucket_num
def bucket_num_to_val(self, bucket_num):
if not isinstance(bucket_num, int):
raise DiscretizerException('Bucket number must be an integer.')
if bucket_num < 0:
raise DiscretizerException('Bucket number must be >= 0.')
if bucket_num > self.max_bucket:
raise DiscretizerException('Bucket number must be <= maximum.')
# compute bucket factor and check bounds (b = [0.0, 1.0])
b = float(bucket_num) / self.max_bucket_float
if b <= 0.0:
return self.val_min
elif b >= 1.0:
return self.val_max
# execute mapping function
v = float(self.map_decoder(b))
# clamp, compute value and return value
if v <= 0.0:
return self.val_min
elif v >= 1.0:
return self.val_max
val = self.val_min + v * self.val_range
return val
@staticmethod
def bucket_num_to_bytearray(bucket_num):
bits = BaseDiscretizer.bucket_num_to_bits(bucket_num)
return BaseDiscretizer.bits_to_bytearray(bits)
@staticmethod
def bytearray_to_bucket_num(ba):
bits = BaseDiscretizer.bytearray_to_bits(ba)
return BaseDiscretizer.bits_to_bucket_num(bits)
@staticmethod
def bucket_num_to_bits(bucket_num):
if not isinstance(bucket_num, int):
raise DiscretizerException('Bucket number must be an integer.')
if bucket_num < 0:
raise DiscretizerException('Bucket number must be >= 0.')
bits = bin(bucket_num).replace('0b', '')
len_bits = len(bits)
assert len_bits > 0
num_bytes = int(len_bits / 8)
if (len_bits % 8) > 0:
num_bytes += 1
bits = '0b' + bits.zfill(num_bytes * 8)
return bits
@staticmethod
def bits_to_bucket_num(bits):
if not isinstance(bits, str):
raise DiscretizerException('Input not a string.')
if bits[0:2] != '0b':
raise DiscretizerException('Input not a string of bits.')
bits = bits.replace('0b', '')
if len(bits) <= 0:
raise DiscretizerException('No input bits.')
bucket_num = int(bits, 2)
assert bucket_num >= 0
return bucket_num
@staticmethod
def bytearray_to_bits(ba):
if not isinstance(ba, bytearray):
raise DiscretizerException('Input not a bytearray.')
if len(ba) <= 0:
raise DiscretizerException('Bytearray is empty.')
bits = '0b'
for i in ba:
assert i >= 0 and i <= 255
bits += (bin(i).replace('0b', '').zfill(8))
return bits
@staticmethod
def bits_to_bytearray(bits):
if not isinstance(bits, str):
raise DiscretizerException('Input not a string.')
if bits[0:2] != '0b':
raise DiscretizerException('Input not a string of bits.')
bits = bits.replace('0b', '')
len_bits = len(bits)
if len_bits <= 0:
raise DiscretizerException('No input bits.')
num_bytes = int(len_bits / 8)
if (len_bits % 8) > 0:
num_bytes += 1
bits = bits.zfill(num_bytes * 8)
ba = bytearray()
for i in range(num_bytes):
local_bits = bits[i*8:(i+1)*8]
ba.append(int(local_bits, 2))
return ba
class LinearDiscretizer(BaseDiscretizer):
def __init__(self, num_bytes, val_min, val_max):
BaseDiscretizer.__init__(self, num_bytes, val_min, val_max)
def map_encoder(self, v):
return v
def map_decoder(self, b):
return b
class CubeRootDiscretizer(BaseDiscretizer):
def __init__(self, num_bytes, val_min, val_max):
BaseDiscretizer.__init__(self, num_bytes, val_min, val_max)
def map_encoder(self, v):
# compute only the real cube root
x = (v - 0.5) * 0.25
b = (math.pow(abs(x), ONE_THIRD) * (1, -1)[x < 0.0]) + 0.5
return b
def map_decoder(self, b):
v = 4.0 * math.pow(b - 0.5, 3.0) + 0.5
return v
class SigmoidDiscretizer(BaseDiscretizer):
def __init__(self, num_bytes, val_min, val_max, sharpness):
BaseDiscretizer.__init__(self, num_bytes, val_min, val_max)
if not isinstance(sharpness, float):
raise DiscretizerException('Sharpness must be a float.')
if sharpness <= 0.0:
raise DiscretizerException('Sharpness must be > 0.')
self._k = sharpness
self._inv_k = 1.0 / self._k
self._S = 2.0 / (math.exp(0.5*self._k) - 1.0)
self._one_plus_S = 1.0 + self._S
self._half_S = 0.5 * self._S
def map_encoder(self, v):
f = 1.0 + math.exp(self._k * (0.5 - v))
b = self._one_plus_S / f - self._half_S
return b
def map_decoder(self, b):
f = self._one_plus_S / (b + self._half_S) - 1.0
v = 0.5 - self._inv_k * math.log(f)
return v
| [
"math.pow",
"math.exp",
"math.log"
] | [((7883, 7912), 'math.exp', 'math.exp', (['(self._k * (0.5 - v))'], {}), '(self._k * (0.5 - v))\n', (7891, 7912), False, 'import math\n'), ((7207, 7229), 'math.pow', 'math.pow', (['(b - 0.5)', '(3.0)'], {}), '(b - 0.5, 3.0)\n', (7215, 7229), False, 'import math\n'), ((7727, 7750), 'math.exp', 'math.exp', (['(0.5 * self._k)'], {}), '(0.5 * self._k)\n', (7735, 7750), False, 'import math\n'), ((8097, 8108), 'math.log', 'math.log', (['f'], {}), '(f)\n', (8105, 8108), False, 'import math\n')] |
from requests import Session
import json
from pprint import pprint
class RedditException(Exception):
pass
class Reddit(object):
def __init__(self):
self.init_session()
def init_session(self):
self.session = Session()
self.session.headers['user-agent'] = 'Cliddit v0.0.1'
self.modhash = None
def get(self, path, **kwargs):
req = self.session.get('http://reddit.com/' + path, params=kwargs)
if req.status_code == 200:
return req.json()
else:
return False
def post(self, path, **kwargs):
kwargs['api_type'] = 'json'
if self.modhash:
kwargs['uh'] = self.modhash
req = self.session.post('http://www.reddit.com/' + path, data=kwargs)
if req.status_code == 200:
return req.json()
else:
return False
def login(self, username, password):
req = self.post('api/login', user=username, passwd=password)['json']
if req['errors']:
raise RedditException(req['errors'][0][1])
else:
self.username = username
return True
def logout(self):
self.init_session()
def list_posts(self, subreddit=None):
if subreddit:
path = 'r/' + subreddit
else:
path = '/'
entries = []
top = self.get(path + '.json')
for data in top['data']['children']:
data = data['data']
entries.append(dict(
title=data['title'],
user=data['author'],
score=data['score'],
selftext=data['selftext'] if data['is_self'] else None,
comments=data['num_comments'],
post=(data['subreddit'], data['id'])
))
if subreddit:
info = self.get(path + '/about.json')['data']
name, title = info['display_name'], info['title']
else:
name, title = 'home', 'Reddit Home'
return name, title, entries
def get_post(self, id):
def parse_comments(comments):
_children = comments['data']['children']
children = []
for data in _children:
data = data['data']
children.append(dict(
user=data['author'],
body=data['body'],
score=data['ups']-data['downs'],
votes=(data['ups'], data['downs']),
comments=parse_comments(data['replies']) if data['replies'] else []
))
return children
post = self.get('r/%s/comments/%s.json' % id)
info, comments = post
info = info['data']['children'][0]['data']
return dict(
subreddit=info['subreddit'],
user=info['author'],
score=info['score'],
votes=(info['ups'], info['downs']),
title=info['title'],
selftext=info['selftext'] if info['is_self'] else None,
link=info['url'] if not info['is_self'] else None,
comments=parse_comments(comments)
)
if __name__=='__main__':
reddit = Reddit()
pprint(reddit.get_post((u'r4r', u'19yfmk')))
pprint(reddit.get_post((u'r4r', u'19ym56')))
| [
"requests.Session"
] | [((217, 226), 'requests.Session', 'Session', ([], {}), '()\n', (224, 226), False, 'from requests import Session\n')] |
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import os
import numpy, re, sys
from multiprocessing import Pool
# from io_funcs.binary_io import BinaryIOCollection
# from linguistic_base import LinguisticBase
import matplotlib.mlab as mlab
# import lxml
# from lxml import etree
# from lxml.etree import *
# MODULE_PARSER = etree.XMLParser()
# import logging
# from logplot.logging_plotting import LoggerPlotter #, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
import numpy
class BinaryIOCollection(object):
def load_binary_file(self, file_name, dimension):
fid_lab = open(file_name, 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
assert features.size % float(dimension) == 0.0,'specified dimension %s not compatible with data'%(dimension)
features = features[:(dimension * (features.size // dimension))]
features = features.reshape((-1, dimension))
return features
def array_to_binary_file(self, data, output_file_name):
data = numpy.array(data, 'float32')
fid = open(output_file_name, 'wb')
data.tofile(fid)
fid.close()
def load_binary_file_frame(self, file_name, dimension):
fid_lab = open(file_name, 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
assert features.size % float(dimension) == 0.0,'specified dimension %s not compatible with data'%(dimension)
frame_number = features.size // dimension
features = features[:(dimension * frame_number)]
features = features.reshape((-1, dimension))
return features, frame_number
## a generic class of linguistic feature extraction
##
class LinguisticBase(object):
def __init__(self, dimension=0):
self.dimension = dimension ##the feature dimensionality of output (should that read 'input' ?)
## the number of utterances to be normalised
self.utterance_num = 0
## the ori_file_list contains the file paths of the raw linguistic data
## the output_file_list contains the file paths of the normalised linguistic data
##
def perform_normalisation(self, ori_file_list, output_file_list, label_type="state_align", dur_file_list=None):
# logger = logging.getLogger("perform_normalisation")
# logger.info('perform linguistic feature extraction')
self.utterance_num = len(ori_file_list)
if self.utterance_num != len(output_file_list):
# logger.error('the number of input and output linguistic files should be the same!\n')
sys.exit(1)
for i in xrange(self.utterance_num):
self.extract_linguistic_features(ori_file_list[i], output_file_list[i], label_type=label_type)
# def _perform_normalisation(i):
# if not dur_file_list:
# self.extract_linguistic_features(ori_file_list[i], output_file_list[i], label_type)
# else:
# self.extract_linguistic_features(ori_file_list[i], output_file_list[i], label_type, dur_file_list[i])
#
# pool = Pool()
# pool.map(_perform_normalisation, range(self.utterance_num))
# pool.close()
# pool.join()
## the exact function to do the work
## need to be implemented in the specific class
## the function will write the linguistic features directly to the output file
def extract_linguistic_features(self, in_file_name, out_file_name, label_type, dur_file_name=None):
pass
class LabelNormalisation(LinguisticBase):
# this class only knows how to deal with a single style of labels (XML or HTS)
# (to deal with composite labels, use LabelComposer instead)
def __init__(self, question_file_name=None,xpath_file_name=None):
pass
def extract_linguistic_features(self, in_file_name, out_file_name=None, label_type="state_align", dur_file_name=None):
if label_type=="phone_align":
A = self.load_labels_with_phone_alignment(in_file_name, dur_file_name)
elif label_type=="state_align":
A = self.load_labels_with_state_alignment(in_file_name)
else:
logger.critical("we don't support %s labels as of now!!" % (label_type))
if out_file_name:
print('Write: '+out_file_name+':'+str(A.shape))
io_funcs = BinaryIOCollection()
io_funcs.array_to_binary_file(A, out_file_name)
else:
return A
# -----------------------------
class HTSLabelNormalisation(LabelNormalisation):
"""This class is to convert HTS format labels into continous or binary values, and store as binary format with float32 precision.
The class supports two kinds of questions: QS and CQS.
**QS**: is the same as that used in HTS
**CQS**: is the new defined question in the system. Here is an example of the question: CQS C-Syl-Tone {_(\d+)+}. regular expression is used for continous values.
Time alignments are expected in the HTS labels. Here is an example of the HTS labels:
3050000 3100000 xx~#-p+l=i:1_4/A/0_0_0/B/1-1-4:1-1&1-4#1-3$1-4>0-1<0-1|i/C/1+1+3/D/0_0/E/content+1:1+3&1+2#0+1/F/content_1/G/0_0/H/4=3:1=1&L-L%/I/0_0/J/4+3-1[2]
3100000 3150000 xx~#-p+l=i:1_4/A/0_0_0/B/1-1-4:1-1&1-4#1-3$1-4>0-1<0-1|i/C/1+1+3/D/0_0/E/content+1:1+3&1+2#0+1/F/content_1/G/0_0/H/4=3:1=1&L-L%/I/0_0/J/4+3-1[3]
3150000 3250000 xx~#-p+l=i:1_4/A/0_0_0/B/1-1-4:1-1&1-4#1-3$1-4>0-1<0-1|i/C/1+1+3/D/0_0/E/content+1:1+3&1+2#0+1/F/content_1/G/0_0/H/4=3:1=1&L-L%/I/0_0/J/4+3-1[4]
3250000 3350000 xx~#-p+l=i:1_4/A/0_0_0/B/1-1-4:1-1&1-4#1-3$1-4>0-1<0-1|i/C/1+1+3/D/0_0/E/content+1:1+3&1+2#0+1/F/content_1/G/0_0/H/4=3:1=1&L-L%/I/0_0/J/4+3-1[5]
3350000 3900000 xx~#-p+l=i:1_4/A/0_0_0/B/1-1-4:1-1&1-4#1-3$1-4>0-1<0-1|i/C/1+1+3/D/0_0/E/content+1:1+3&1+2#0+1/F/content_1/G/0_0/H/4=3:1=1&L-L%/I/0_0/J/4+3-1[6]
305000 310000 are the starting and ending time.
[2], [3], [4], [5], [6] mean the HMM state index.
"""
# this subclass support HTS labels, which include time alignments
def __init__(self, question_file_name=None, add_frame_features=True, subphone_feats='full', continuous_flag=True):
# logger = logging.getLogger("labels")
self.question_dict = {}
self.ori_question_dict = {}
self.dict_size = 0
self.continuous_flag = continuous_flag
try:
# self.question_dict, self.ori_question_dict = self.load_question_set(question_file_name)
self.discrete_dict, self.continuous_dict = self.load_question_set_continous(question_file_name)
except:
# logger.critical('error whilst loading HTS question set')
raise
###self.dict_size = len(self.question_dict)
self.dict_size = len(self.discrete_dict) + len(self.continuous_dict)
self.add_frame_features = add_frame_features
self.subphone_feats = subphone_feats
if self.subphone_feats == 'full':
self.frame_feature_size = 9 ## zhizheng's original 5 state features + 4 phoneme features
elif self.subphone_feats == 'minimal_frame':
self.frame_feature_size = 2 ## the minimal features necessary to go from a state-level to frame-level model
elif self.subphone_feats == 'state_only':
self.frame_feature_size = 1 ## this is equivalent to a state-based system
elif self.subphone_feats == 'none':
self.frame_feature_size = 0 ## the phoneme level features only
elif self.subphone_feats == 'frame_only':
self.frame_feature_size = 1 ## this is equivalent to a frame-based system without relying on state-features
elif self.subphone_feats == 'uniform_state':
self.frame_feature_size = 2 ## this is equivalent to a frame-based system with uniform state-features
elif self.subphone_feats == 'minimal_phoneme':
self.frame_feature_size = 3 ## this is equivalent to a frame-based system with minimal features
elif self.subphone_feats == 'coarse_coding':
self.frame_feature_size = 4 ## this is equivalent to a frame-based positioning system reported in Heiga Zen's work
self.cc_features = self.compute_coarse_coding_features(3)
else:
sys.exit('Unknown value for subphone_feats: %s'%(subphone_feats))
self.dimension = self.dict_size + self.frame_feature_size
### if user wants to define their own input, simply set the question set to empty.
if self.dict_size == 0:
self.dimension = 0
# logger.debug('HTS-derived input feature dimension is %d + %d = %d' % (self.dict_size, self.frame_feature_size, self.dimension) )
def prepare_dur_data(self, ori_file_list, output_file_list, label_type="state_align", feature_type=None, unit_size=None, feat_size=None):
'''
extracting duration binary features or numerical features.
'''
# logger = logging.getLogger("dur")
utt_number = len(ori_file_list)
if utt_number != len(output_file_list):
print("the number of input and output files should be the same!\n");
sys.exit(1)
### set default feature type to numerical, if not assigned ###
if not feature_type:
feature_type = "numerical"
### set default unit size to state, if not assigned ###
if not unit_size:
unit_size = "state"
if label_type=="phone_align":
unit_size = "phoneme"
### set default feat size to frame or phoneme, if not assigned ###
if feature_type=="binary":
if not feat_size:
feat_size = "frame"
elif feature_type=="numerical":
if not feat_size:
feat_size = "phoneme"
else:
# logger.critical("Unknown feature type: %s \n Please use one of the following: binary, numerical\n" %(feature_type))
sys.exit(1)
for i in range(utt_number):
self.extract_dur_features(ori_file_list[i], output_file_list[i], label_type, feature_type, unit_size, feat_size)
def extract_dur_features(self, in_file_name, out_file_name=None, label_type="state_align", feature_type=None, unit_size=None, feat_size=None):
# logger = logging.getLogger("dur")
if label_type=="phone_align":
A = self.extract_dur_from_phone_alignment_labels(in_file_name, feature_type, unit_size, feat_size)
elif label_type=="state_align":
A = self.extract_dur_from_state_alignment_labels(in_file_name, feature_type, unit_size, feat_size)
else:
# logger.critical("we don't support %s labels as of now!!" % (label_type))
sys.exit(1)
if out_file_name:
io_funcs = BinaryIOCollection()
io_funcs.array_to_binary_file(A, out_file_name)
else:
return A
def extract_dur_from_state_alignment_labels(self, file_name, feature_type, unit_size, feat_size):
# logger = logging.getLogger("dur")
state_number = 5
dur_dim = state_number
if feature_type=="binary":
dur_feature_matrix = numpy.empty((100000, 1))
elif feature_type=="numerical":
if unit_size=="state":
dur_feature_matrix = numpy.empty((100000, dur_dim))
current_dur_array = numpy.zeros((dur_dim, 1))
else: ## phoneme/syllable/word
dur_feature_matrix = numpy.empty((100000, 1))
fid = open(file_name)
utt_labels = fid.readlines()
fid.close()
label_number = len(utt_labels)
# logger.info('loaded %s, %3d labels' % (file_name, label_number) )
MLU_dur = [[],[],[]]
list_of_silences=['#', 'sil', 'pau', 'SIL']
current_index = 0
dur_feature_index = 0
syllable_duration = 0
word_duration = 0
for line in utt_labels:
line = line.strip()
if len(line) < 1:
continue
temp_list = re.split('\s+', line)
start_time = int(temp_list[0])
end_time = int(temp_list[1])
full_label = temp_list[2]
full_label_length = len(full_label) - 3 # remove state information [k]
state_index = full_label[full_label_length + 1]
state_index = int(state_index) - 1
current_phone = full_label[full_label.index('-') + 1:full_label.index('+')]
frame_number = int(end_time/50000) - int(start_time/50000)
if state_index == 1:
phone_duration = frame_number
for i in range(state_number - 1):
line = utt_labels[current_index + i + 1].strip()
temp_list = re.split('\s+', line)
phone_duration += int((int(temp_list[1]) - int(temp_list[0]))/50000)
syllable_duration+=phone_duration
word_duration+=phone_duration
### for syllable and word positional information ###
label_binary_vector = self.pattern_matching_binary(full_label)
label_continuous_vector = self.pattern_matching_continous_position(full_label)
### syllable ending information ###
syl_end = 0
if(label_continuous_vector[0, 1]==1 or current_phone in list_of_silences): ##pos-bw and c-silences
syl_end = 1
### word ending information ###
word_end = 0
if(syl_end and label_continuous_vector[0, 9]==1 or current_phone in list_of_silences):
word_end = 1
if feature_type == "binary":
current_block_array = numpy.zeros((frame_number, 1))
if unit_size == "state":
current_block_array[-1] = 1
elif unit_size == "phoneme":
if state_index == state_number:
current_block_array[-1] = 1
else:
# logger.critical("Unknown unit size: %s \n Please use one of the following: state, phoneme\n" %(unit_size))
sys.exit(1)
elif feature_type == "numerical":
if unit_size == "state":
current_dur_array[current_index%5] = frame_number
if feat_size == "phoneme" and state_index == state_number:
current_block_array = current_dur_array.transpose()
if feat_size == "frame":
current_block_array = numpy.tile(current_dur_array.transpose(), (frame_number, 1))
elif state_index == state_number:
if unit_size == "phoneme":
current_block_array = numpy.array([phone_duration])
elif unit_size == "syllable":
current_block_array = numpy.array([syllable_duration])
elif unit_size == "word":
current_block_array = numpy.array([word_duration])
if syl_end:
syllable_duration = 0
if word_end:
word_duration = 0
### writing into dur_feature_matrix ###
if feat_size == "frame":
dur_feature_matrix[dur_feature_index:dur_feature_index+frame_number,] = current_block_array
dur_feature_index = dur_feature_index + frame_number
elif state_index == state_number:
if feat_size == "phoneme":
dur_feature_matrix[dur_feature_index:dur_feature_index+1,] = current_block_array
dur_feature_index = dur_feature_index + 1
elif current_phone!='#': ## removing silence here
if feat_size == "syllable" and syl_end:
dur_feature_matrix[dur_feature_index:dur_feature_index+1,] = current_block_array
dur_feature_index = dur_feature_index + 1
elif feat_size == "word" and word_end:
dur_feature_matrix[dur_feature_index:dur_feature_index+1,] = current_block_array
dur_feature_index = dur_feature_index + 1
elif feat_size == "MLU":
if word_end:
if current_phone=='pau':
MLU_dur[0].append(1)
else:
MLU_dur[0].append(int(label_continuous_vector[0, 24]))
if syl_end:
if current_phone=='pau':
MLU_dur[1].append(1)
else:
MLU_dur[1].append(int(label_continuous_vector[0, 7]))
MLU_dur[2].append(int(phone_duration))
current_index += 1
if feat_size == "MLU":
for seg_indx in xrange(len(MLU_dur)):
seg_len = len(MLU_dur[seg_indx])
current_block_array = numpy.reshape(numpy.array(MLU_dur[seg_indx]), (-1, 1))
dur_feature_matrix[dur_feature_index:dur_feature_index+seg_len, ] = current_block_array
dur_feature_index = dur_feature_index + seg_len
dur_feature_matrix = dur_feature_matrix[0:dur_feature_index,]
# logger.debug('made duration matrix of %d frames x %d features' % dur_feature_matrix.shape )
return dur_feature_matrix
def extract_dur_from_phone_alignment_labels(self, file_name, feature_type, unit_size, feat_size):
# logger = logging.getLogger("dur")
dur_dim = 1 # hard coded here
if feature_type=="binary":
dur_feature_matrix = numpy.empty((100000, dur_dim))
elif feature_type=="numerical":
if unit_size=="phoneme":
dur_feature_matrix = numpy.empty((100000, dur_dim))
fid = open(file_name)
utt_labels = fid.readlines()
fid.close()
label_number = len(utt_labels)
# logger.info('loaded %s, %3d labels' % (file_name, label_number) )
current_index = 0
dur_feature_index = 0
for line in utt_labels:
line = line.strip()
if len(line) < 1:
continue
temp_list = re.split('\s+', line)
start_time = int(temp_list[0])
end_time = int(temp_list[1])
full_label = temp_list[2]
frame_number = int(end_time/50000) - int(start_time/50000)
phone_duration = frame_number
if feature_type == "binary":
current_block_array = numpy.zeros((frame_number, 1))
if unit_size == "phoneme":
current_block_array[-1] = 1
else:
# logger.critical("Unknown unit size: %s \n Please use one of the following: phoneme\n" %(unit_size))
sys.exit(1)
elif feature_type == "numerical":
if unit_size == "phoneme":
current_block_array = numpy.array([phone_duration])
### writing into dur_feature_matrix ###
if feat_size == "frame":
dur_feature_matrix[dur_feature_index:dur_feature_index+frame_number,] = current_block_array
dur_feature_index = dur_feature_index + frame_number
elif feat_size == "phoneme":
dur_feature_matrix[dur_feature_index:dur_feature_index+1,] = current_block_array
dur_feature_index = dur_feature_index + 1
current_index += 1
dur_feature_matrix = dur_feature_matrix[0:dur_feature_index,]
# logger.debug('made duration matrix of %d frames x %d features' % dur_feature_matrix.shape )
return dur_feature_matrix
def load_labels_with_phone_alignment(self, file_name, dur_file_name):
# this is not currently used ??? -- it works now :D
# logger = logging.getLogger("labels")
#logger.critical('unused function ???')
#raise Exception
if dur_file_name:
io_funcs = BinaryIOCollection()
dur_dim = 1 ## hard coded for now
manual_dur_data = io_funcs.load_binary_file(dur_file_name, dur_dim)
if self.add_frame_features:
assert self.dimension == self.dict_size+self.frame_feature_size
elif self.subphone_feats != 'none':
assert self.dimension == self.dict_size+self.frame_feature_size
else:
assert self.dimension == self.dict_size
label_feature_matrix = numpy.empty((100000, self.dimension))
ph_count=0
label_feature_index = 0
with open(file_name) as fid:
all_data = fid.readlines()
for line in all_data:
line = line.strip()
if len(line) < 1:
continue
temp_list = re.split('\s+', line)
if len(temp_list)==1:
frame_number = 0
full_label = temp_list[0]
else:
start_time = int(temp_list[0])
end_time = int(temp_list[1])
full_label = temp_list[2]
# to do - support different frame shift - currently hardwired to 5msec
# currently under beta testing: support different frame shift
if dur_file_name:
frame_number = manual_dur_data[ph_count]
else:
frame_number = int(end_time/50000) - int(start_time/50000)
if self.subphone_feats == "coarse_coding":
cc_feat_matrix = self.extract_coarse_coding_features_relative(frame_number)
ph_count = ph_count+1
#label_binary_vector = self.pattern_matching(full_label)
label_binary_vector = self.pattern_matching_binary(full_label)
# if there is no CQS question, the label_continuous_vector will become to empty
label_continuous_vector = self.pattern_matching_continous_position(full_label)
label_vector = numpy.concatenate([label_binary_vector, label_continuous_vector], axis = 1)
if self.add_frame_features:
current_block_binary_array = numpy.zeros((frame_number, self.dict_size+self.frame_feature_size))
for i in range(frame_number):
current_block_binary_array[i, 0:self.dict_size] = label_vector
if self.subphone_feats == 'minimal_phoneme':
## features which distinguish frame position in phoneme
current_block_binary_array[i, self.dict_size] = float(i+1)/float(frame_number) # fraction through phone forwards
current_block_binary_array[i, self.dict_size+1] = float(frame_number - i)/float(frame_number) # fraction through phone backwards
current_block_binary_array[i, self.dict_size+2] = float(frame_number) # phone duration
elif self.subphone_feats == 'coarse_coding':
## features which distinguish frame position in phoneme using three continous numerical features
current_block_binary_array[i, self.dict_size+0] = cc_feat_matrix[i, 0]
current_block_binary_array[i, self.dict_size+1] = cc_feat_matrix[i, 1]
current_block_binary_array[i, self.dict_size+2] = cc_feat_matrix[i, 2]
current_block_binary_array[i, self.dict_size+3] = float(frame_number)
elif self.subphone_feats == 'none':
pass
else:
sys.exit('unknown subphone_feats type')
label_feature_matrix[label_feature_index:label_feature_index+frame_number,] = current_block_binary_array
label_feature_index = label_feature_index + frame_number
elif self.subphone_feats == 'none':
current_block_binary_array = label_vector
label_feature_matrix[label_feature_index:label_feature_index+1,] = current_block_binary_array
label_feature_index = label_feature_index + 1
label_feature_matrix = label_feature_matrix[0:label_feature_index,]
# logger.info('loaded %s, %3d labels' % (file_name, ph_count) )
# logger.debug('made label matrix of %d frames x %d labels' % label_feature_matrix.shape )
return label_feature_matrix
def load_labels_with_state_alignment(self, file_name):
## setting add_frame_features to False performs either state/phoneme level normalisation
# logger = logging.getLogger("labels")
if self.add_frame_features:
assert self.dimension == self.dict_size+self.frame_feature_size
elif self.subphone_feats != 'none':
assert self.dimension == self.dict_size+self.frame_feature_size
else:
assert self.dimension == self.dict_size
# label_feature_matrix = numpy.empty((100000, self.dict_size+self.frame_feature_size))
label_feature_matrix = numpy.empty((100000, self.dimension))
label_feature_index = 0
state_number = 5
lab_binary_vector = numpy.zeros((1, self.dict_size))
fid = open(file_name)
utt_labels = fid.readlines()
fid.close()
current_index = 0
label_number = len(utt_labels)
# logger.info('loaded %s, %3d labels' % (file_name, label_number) )
phone_duration = 0
state_duration_base = 0
for line in utt_labels:
line = line.strip()
if len(line) < 1:
continue
temp_list = re.split('\s+', line)
if len(temp_list)==1:
frame_number = 0
state_index = 1
full_label = temp_list[0]
else:
start_time = int(temp_list[0])
end_time = int(temp_list[1])
frame_number = int(end_time/50000) - int(start_time/50000)
full_label = temp_list[2]
full_label_length = len(full_label) - 3 # remove state information [k]
state_index = full_label[full_label_length + 1]
state_index = int(state_index) - 1
state_index_backward = 6 - state_index
full_label = full_label[0:full_label_length]
if state_index == 1:
current_frame_number = 0
phone_duration = frame_number
state_duration_base = 0
# label_binary_vector = self.pattern_matching(full_label)
label_binary_vector = self.pattern_matching_binary(full_label)
# if there is no CQS question, the label_continuous_vector will become to empty
label_continuous_vector = self.pattern_matching_continous_position(full_label)
label_vector = numpy.concatenate([label_binary_vector, label_continuous_vector], axis = 1)
if len(temp_list)==1:
state_index = state_number
else:
for i in range(state_number - 1):
line = utt_labels[current_index + i + 1].strip()
temp_list = re.split('\s+', line)
phone_duration += int((int(temp_list[1]) - int(temp_list[0]))/50000)
if self.subphone_feats == "coarse_coding":
cc_feat_matrix = self.extract_coarse_coding_features_relative(phone_duration)
if self.add_frame_features:
current_block_binary_array = numpy.zeros((frame_number, self.dict_size+self.frame_feature_size))
for i in range(frame_number):
current_block_binary_array[i, 0:self.dict_size] = label_vector
if self.subphone_feats == 'full':
## Zhizheng's original 9 subphone features:
current_block_binary_array[i, self.dict_size] = float(i+1) / float(frame_number) ## fraction through state (forwards)
current_block_binary_array[i, self.dict_size+1] = float(frame_number - i) / float(frame_number) ## fraction through state (backwards)
current_block_binary_array[i, self.dict_size+2] = float(frame_number) ## length of state in frames
current_block_binary_array[i, self.dict_size+3] = float(state_index) ## state index (counting forwards)
current_block_binary_array[i, self.dict_size+4] = float(state_index_backward) ## state index (counting backwards)
current_block_binary_array[i, self.dict_size+5] = float(phone_duration) ## length of phone in frames
current_block_binary_array[i, self.dict_size+6] = float(frame_number) / float(phone_duration) ## fraction of the phone made up by current state
current_block_binary_array[i, self.dict_size+7] = float(phone_duration - i - state_duration_base) / float(phone_duration) ## fraction through phone (backwards)
current_block_binary_array[i, self.dict_size+8] = float(state_duration_base + i + 1) / float(phone_duration) ## fraction through phone (forwards)
elif self.subphone_feats == 'state_only':
## features which only distinguish state:
current_block_binary_array[i, self.dict_size] = float(state_index) ## state index (counting forwards)
elif self.subphone_feats == 'frame_only':
## features which distinguish frame position in phoneme:
current_frame_number += 1
current_block_binary_array[i, self.dict_size] = float(current_frame_number) / float(phone_duration) ## fraction through phone (counting forwards)
elif self.subphone_feats == 'uniform_state':
## features which distinguish frame position in phoneme:
current_frame_number += 1
current_block_binary_array[i, self.dict_size] = float(current_frame_number) / float(phone_duration) ## fraction through phone (counting forwards)
new_state_index = max(1, round(float(current_frame_number)/float(phone_duration)*5))
current_block_binary_array[i, self.dict_size+1] = float(new_state_index) ## state index (counting forwards)
elif self.subphone_feats == "coarse_coding":
## features which distinguish frame position in phoneme using three continous numerical features
current_block_binary_array[i, self.dict_size+0] = cc_feat_matrix[current_frame_number, 0]
current_block_binary_array[i, self.dict_size+1] = cc_feat_matrix[current_frame_number, 1]
current_block_binary_array[i, self.dict_size+2] = cc_feat_matrix[current_frame_number, 2]
current_block_binary_array[i, self.dict_size+3] = float(phone_duration)
current_frame_number += 1
elif self.subphone_feats == 'minimal_frame':
## features which distinguish state and minimally frame position in state:
current_block_binary_array[i, self.dict_size] = float(i+1) / float(frame_number) ## fraction through state (forwards)
current_block_binary_array[i, self.dict_size+1] = float(state_index) ## state index (counting forwards)
elif self.subphone_feats == 'none':
pass
else:
sys.exit('unknown subphone_feats type')
label_feature_matrix[label_feature_index:label_feature_index+frame_number,] = current_block_binary_array
label_feature_index = label_feature_index + frame_number
elif self.subphone_feats == 'state_only' and state_index == state_number:
current_block_binary_array = numpy.zeros((state_number, self.dict_size+self.frame_feature_size))
for i in range(state_number):
current_block_binary_array[i, 0:self.dict_size] = label_vector
current_block_binary_array[i, self.dict_size] = float(i+1) ## state index (counting forwards)
label_feature_matrix[label_feature_index:label_feature_index+state_number,] = current_block_binary_array
label_feature_index = label_feature_index + state_number
elif self.subphone_feats == 'none' and state_index == state_number:
current_block_binary_array = label_vector
label_feature_matrix[label_feature_index:label_feature_index+1,] = current_block_binary_array
label_feature_index = label_feature_index + 1
state_duration_base += frame_number
current_index += 1
label_feature_matrix = label_feature_matrix[0:label_feature_index,]
# logger.debug('made label matrix of %d frames x %d labels' % label_feature_matrix.shape )
return label_feature_matrix
def extract_durational_features(self, dur_file_name=None, dur_data=None):
if dur_file_name:
io_funcs = BinaryIOCollection()
dur_dim = 1 ## hard coded for now
dur_data = io_funcs.load_binary_file(dur_file_name, dur_dim)
ph_count = len(dur_data)
total_num_of_frames = int(sum(dur_data))
duration_feature_array = numpy.zeros((total_num_of_frames, self.frame_feature_size))
frame_index=0
for i in range(ph_count):
frame_number = int(dur_data[i])
if self.subphone_feats == "coarse_coding":
cc_feat_matrix = self.extract_coarse_coding_features_relative(frame_number)
for j in range(frame_number):
duration_feature_array[frame_index, 0] = cc_feat_matrix[j, 0]
duration_feature_array[frame_index, 1] = cc_feat_matrix[j, 1]
duration_feature_array[frame_index, 2] = cc_feat_matrix[j, 2]
duration_feature_array[frame_index, 3] = float(frame_number)
frame_index+=1
elif self.subphone_feats == 'full':
state_number = 5 # hard coded here
phone_duration = sum(dur_data[i, :])
state_duration_base = 0
for state_index in xrange(1, state_number+1):
state_index_backward = (state_number - state_index) + 1
frame_number = int(dur_data[i][state_index-1])
for j in xrange(frame_number):
duration_feature_array[frame_index, 0] = float(j+1) / float(frame_number) ## fraction through state (forwards)
duration_feature_array[frame_index, 1] = float(frame_number - j) / float(frame_number) ## fraction through state (backwards)
duration_feature_array[frame_index, 2] = float(frame_number) ## length of state in frames
duration_feature_array[frame_index, 3] = float(state_index) ## state index (counting forwards)
duration_feature_array[frame_index, 4] = float(state_index_backward) ## state index (counting backwards)
duration_feature_array[frame_index, 5] = float(phone_duration) ## length of phone in frames
duration_feature_array[frame_index, 6] = float(frame_number) / float(phone_duration) ## fraction of the phone made up by current state
duration_feature_array[frame_index, 7] = float(phone_duration - j - state_duration_base) / float(phone_duration) ## fraction through phone (forwards)
duration_feature_array[frame_index, 8] = float(state_duration_base + j + 1) / float(phone_duration) ## fraction through phone (backwards)
frame_index+=1
state_duration_base += frame_number
return duration_feature_array
def compute_coarse_coding_features(self, num_states):
assert num_states == 3
npoints = 600
cc_features = numpy.zeros((num_states, npoints))
x1 = numpy.linspace(-1.5, 1.5, npoints)
x2 = numpy.linspace(-1.0, 2.0, npoints)
x3 = numpy.linspace(-0.5, 2.5, npoints)
mu1 = 0.0
mu2 = 0.5
mu3 = 1.0
sigma = 0.4
cc_features[0, :] = mlab.normpdf(x1, mu1, sigma)
cc_features[1, :] = mlab.normpdf(x2, mu2, sigma)
cc_features[2, :] = mlab.normpdf(x3, mu3, sigma)
return cc_features
def extract_coarse_coding_features_relative(self, phone_duration):
dur = int(phone_duration)
cc_feat_matrix = numpy.zeros((dur, 3))
for i in range(dur):
rel_indx = int((200/float(dur))*i)
cc_feat_matrix[i,0] = self.cc_features[0, 300+rel_indx]
cc_feat_matrix[i,1] = self.cc_features[1, 200+rel_indx]
cc_feat_matrix[i,2] = self.cc_features[2, 100+rel_indx]
return cc_feat_matrix
### this function is not used now
def extract_coarse_coding_features_absolute(self, phone_duration):
dur = int(phone_duration)
cc_feat_matrix = numpy.zeros((dur, 3))
npoints1 = (dur*2)*10+1
npoints2 = (dur-1)*10+1
npoints3 = (2*dur-1)*10+1
x1 = numpy.linspace(-dur, dur, npoints1)
x2 = numpy.linspace(1, dur, npoints2)
x3 = numpy.linspace(1, 2*dur-1, npoints3)
mu1 = 0
mu2 = (1+dur)/2
mu3 = dur
variance = 1
sigma = variance*((dur/10)+2)
sigma1 = sigma
sigma2 = sigma-1
sigma3 = sigma
y1 = mlab.normpdf(x1, mu1, sigma1)
y2 = mlab.normpdf(x2, mu2, sigma2)
y3 = mlab.normpdf(x3, mu3, sigma3)
for i in range(dur):
cc_feat_matrix[i,0] = y1[(dur+1+i)*10]
cc_feat_matrix[i,1] = y2[i*10]
cc_feat_matrix[i,2] = y3[i*10]
for i in range(3):
cc_feat_matrix[:,i] = cc_feat_matrix[:,i]/max(cc_feat_matrix[:,i])
return cc_feat_matrix
### this function is not used now
def pattern_matching(self, label):
# this function is where most time is spent during label preparation
#
# it might be possible to speed it up by using pre-compiled regular expressions?
# (not trying this now, since we may change to to XML tree format for input instead of HTS labels)
#
label_size = len(label)
lab_binary_vector = numpy.zeros((1, self.dict_size))
for i in range(self.dict_size):
current_question_list = self.question_dict[str(i)]
binary_flag = 0
for iq in range(len(current_question_list)):
current_question = current_question_list[iq]
current_size = len(current_question)
if current_question[0] == '*' and current_question[current_size-1] == '*':
temp_question = current_question[1:current_size-1]
for il in range(1, label_size-current_size+2):
if temp_question == label[il:il+current_size-2]:
binary_flag = 1
elif current_question[current_size-1] != '*':
temp_question = current_question[1:current_size]
if temp_question == label[label_size-current_size+1:label_size]:
binary_flag = 1
elif current_question[0] != '*':
temp_question = current_question[0:current_size-1]
if temp_question == label[0:current_size-1]:
binary_flag = 1
if binary_flag == 1:
break
lab_binary_vector[0, i] = binary_flag
return lab_binary_vector
def pattern_matching_binary(self, label):
dict_size = len(self.discrete_dict)
lab_binary_vector = numpy.zeros((1, dict_size))
for i in range(dict_size):
current_question_list = self.discrete_dict[str(i)]
binary_flag = 0
for iq in range(len(current_question_list)):
current_compiled = current_question_list[iq]
ms = current_compiled.search(label)
if ms is not None:
binary_flag = 1
break
lab_binary_vector[0, i] = binary_flag
return lab_binary_vector
def pattern_matching_continous_position(self, label):
dict_size = len(self.continuous_dict)
lab_continuous_vector = numpy.zeros((1, dict_size))
for i in range(dict_size):
continuous_value = -1.0
current_compiled = self.continuous_dict[str(i)]
ms = current_compiled.search(label)
if ms is not None:
# assert len(ms.group()) == 1
continuous_value = ms.group(1)
lab_continuous_vector[0, i] = continuous_value
return lab_continuous_vector
def load_question_set(self, qs_file_name):
fid = open(qs_file_name)
question_index = 0
question_dict = {}
ori_question_dict = {}
for line in fid.readlines():
line = line.replace('\n', '')
if len(line) > 5:
temp_list = line.split('{')
temp_line = temp_list[1]
temp_list = temp_line.split('}')
temp_line = temp_list[0]
question_list = temp_line.split(',')
question_dict[str(question_index)] = question_list
ori_question_dict[str(question_index)] = line
question_index += 1
fid.close()
# logger = logging.getLogger("labels")
# logger.debug('loaded question set with %d questions' % len(question_dict))
return question_dict, ori_question_dict
def load_question_set_continous(self, qs_file_name):
# logger = logging.getLogger("labels")
fid = open(qs_file_name)
binary_qs_index = 0
continuous_qs_index = 0
binary_dict = {}
continuous_dict = {}
LL=re.compile(re.escape('LL-'))
for line in fid.readlines():
line = line.replace('\n', '')
if len(line) > 5:
temp_list = line.split('{')
temp_line = temp_list[1]
temp_list = temp_line.split('}')
temp_line = temp_list[0]
temp_line = temp_line.strip()
question_list = temp_line.split(',')
temp_list = line.split(' ')
question_key = temp_list[1]
# print line
if temp_list[0] == 'CQS':
assert len(question_list) == 1
processed_question = self.wildcards2regex(question_list[0], convert_number_pattern=True)
continuous_dict[str(continuous_qs_index)] = re.compile(processed_question) #save pre-compiled regular expression
continuous_qs_index = continuous_qs_index + 1
elif temp_list[0] == 'QS':
re_list = []
for temp_question in question_list:
processed_question = self.wildcards2regex(temp_question)
if LL.search(question_key):
processed_question = '^'+processed_question
re_list.append(re.compile(processed_question))
binary_dict[str(binary_qs_index)] = re_list
binary_qs_index = binary_qs_index + 1
else:
# logger.critical('The question set is not defined correctly: %s' %(line))
raise Exception
# question_index = question_index + 1
return binary_dict, continuous_dict
def wildcards2regex(self, question, convert_number_pattern=False):
"""
Convert HTK-style question into regular expression for searching labels.
If convert_number_pattern, keep the following sequences unescaped for
extracting continuous values):
(\d+) -- handles digit without decimal point
([\d\.]+) -- handles digits with and without decimal point
"""
## handle HTK wildcards (and lack of them) at ends of label:
prefix = ""
postfix = ""
if '*' in question:
if not question.startswith('*'):
prefix = "\A"
if not question.endswith('*'):
postfix = "\Z"
question = question.strip('*')
question = re.escape(question)
## convert remaining HTK wildcards * and ? to equivalent regex:
question = question.replace('\\*', '.*')
question = question.replace('\\?', '.')
question = prefix + question + postfix
if convert_number_pattern:
question = question.replace('\\(\\\\d\\+\\)', '(\d+)')
question = question.replace('\\(\\[\\\\d\\\\\\.\\]\\+\\)', '([\d\.]+)')
return question
class HTSDurationLabelNormalisation(HTSLabelNormalisation):
"""
Unlike HTSLabelNormalisation, HTSDurationLabelNormalisation does not accept timings.
One line of labels is converted into 1 datapoint, that is, the label is not 'unpacked'
into frames. HTK state index [\d] is not handled in any special way.
"""
def __init__(self, question_file_name=None, subphone_feats='full', continuous_flag=True):
super(HTSDurationLabelNormalisation, self).__init__(question_file_name=question_file_name, \
subphone_feats=subphone_feats, continuous_flag=continuous_flag)
## don't use extra features beyond those in questions for duration labels:
self.dimension = self.dict_size
def load_labels_with_state_alignment(self, file_name, add_frame_features=False):
## add_frame_features not used in HTSLabelNormalisation -- only in XML version
# logger = logging.getLogger("labels")
assert self.dimension == self.dict_size
label_feature_matrix = numpy.empty((100000, self.dimension))
label_feature_index = 0
lab_binary_vector = numpy.zeros((1, self.dict_size))
fid = open(file_name)
utt_labels = fid.readlines()
fid.close()
current_index = 0
label_number = len(utt_labels)
# logger.info('loaded %s, %3d labels' % (file_name, label_number) )
## remove empty lines
utt_labels = [line for line in utt_labels if line != '']
for (line_number, line) in enumerate(utt_labels):
temp_list = re.split('\s+', line.strip())
full_label = temp_list[-1] ## take last entry -- ignore timings if present
label_binary_vector = self.pattern_matching_binary(full_label)
# if there is no CQS question, the label_continuous_vector will become to empty
label_continuous_vector = self.pattern_matching_continous_position(full_label)
label_vector = numpy.concatenate([label_binary_vector, label_continuous_vector], axis = 1)
label_feature_matrix[line_number, :] = label_vector[:]
label_feature_matrix = label_feature_matrix[:line_number+1,:]
# logger.debug('made label matrix of %d frames x %d labels' % label_feature_matrix.shape )
return label_feature_matrix
# -----------------------------
if __name__ == '__main__':
qs_file_name = '/afs/inf.ed.ac.uk/group/cstr/projects/blizzard_entries/blizzard2016/straight_voice/Hybrid_duration_experiments/dnn_tts_release/lstm_rnn/data/questions.hed'
print(qs_file_name)
ori_file_list = ['/afs/inf.ed.ac.uk/group/cstr/projects/blizzard_entries/blizzard2016/straight_voice/Hybrid_duration_experiments/dnn_tts_release/lstm_rnn/data/label_state_align/AMidsummerNightsDream_000_000.lab']
output_file_list = ['/afs/inf.ed.ac.uk/group/cstr/projects/blizzard_entries/blizzard2016/straight_voice/Hybrid_duration_experiments/dnn_tts_release/lstm_rnn/data/binary_label_601/AMidsummerNightsDream_000_000.lab']
#output_file_list = ['/afs/inf.ed.ac.uk/group/cstr/projects/blizzard_entries/blizzard2016/straight_voice/Hybrid_duration_experiments/dnn_tts_release/lstm_rnn/data/dur/AMidsummerNightsDream_000_000.dur']
label_operater = HTSLabelNormalisation(qs_file_name)
label_operater.perform_normalisation(ori_file_list, output_file_list)
#feature_type="binary"
#unit_size = "phoneme"
#feat_size = "phoneme"
#label_operater.prepare_dur_data(ori_file_list, output_file_list, feature_type, unit_size, feat_size)
#label_operater.prepare_dur_data(ori_file_list, output_file_list, feature_type)
print(label_operater.dimension)
| [
"re.split",
"numpy.fromfile",
"matplotlib.mlab.normpdf",
"re.escape",
"re.compile",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.empty",
"numpy.concatenate",
"sys.exit"
] | [((2644, 2688), 'numpy.fromfile', 'numpy.fromfile', (['fid_lab'], {'dtype': 'numpy.float32'}), '(fid_lab, dtype=numpy.float32)\n', (2658, 2688), False, 'import numpy\n'), ((3058, 3086), 'numpy.array', 'numpy.array', (['data', '"""float32"""'], {}), "(data, 'float32')\n", (3069, 3086), False, 'import numpy\n'), ((3296, 3340), 'numpy.fromfile', 'numpy.fromfile', (['fid_lab'], {'dtype': 'numpy.float32'}), '(fid_lab, dtype=numpy.float32)\n', (3310, 3340), False, 'import numpy\n'), ((22759, 22796), 'numpy.empty', 'numpy.empty', (['(100000, self.dimension)'], {}), '((100000, self.dimension))\n', (22770, 22796), False, 'import numpy\n'), ((27299, 27336), 'numpy.empty', 'numpy.empty', (['(100000, self.dimension)'], {}), '((100000, self.dimension))\n', (27310, 27336), False, 'import numpy\n'), ((27425, 27457), 'numpy.zeros', 'numpy.zeros', (['(1, self.dict_size)'], {}), '((1, self.dict_size))\n', (27436, 27457), False, 'import numpy\n'), ((35868, 35927), 'numpy.zeros', 'numpy.zeros', (['(total_num_of_frames, self.frame_feature_size)'], {}), '((total_num_of_frames, self.frame_feature_size))\n', (35879, 35927), False, 'import numpy\n'), ((38573, 38607), 'numpy.zeros', 'numpy.zeros', (['(num_states, npoints)'], {}), '((num_states, npoints))\n', (38584, 38607), False, 'import numpy\n'), ((38622, 38656), 'numpy.linspace', 'numpy.linspace', (['(-1.5)', '(1.5)', 'npoints'], {}), '(-1.5, 1.5, npoints)\n', (38636, 38656), False, 'import numpy\n'), ((38670, 38704), 'numpy.linspace', 'numpy.linspace', (['(-1.0)', '(2.0)', 'npoints'], {}), '(-1.0, 2.0, npoints)\n', (38684, 38704), False, 'import numpy\n'), ((38718, 38752), 'numpy.linspace', 'numpy.linspace', (['(-0.5)', '(2.5)', 'npoints'], {}), '(-0.5, 2.5, npoints)\n', (38732, 38752), False, 'import numpy\n'), ((38858, 38886), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['x1', 'mu1', 'sigma'], {}), '(x1, mu1, sigma)\n', (38870, 38886), True, 'import matplotlib.mlab as mlab\n'), ((38915, 38943), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['x2', 'mu2', 'sigma'], {}), '(x2, mu2, sigma)\n', (38927, 38943), True, 'import matplotlib.mlab as mlab\n'), ((38972, 39000), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['x3', 'mu3', 'sigma'], {}), '(x3, mu3, sigma)\n', (38984, 39000), True, 'import matplotlib.mlab as mlab\n'), ((39161, 39182), 'numpy.zeros', 'numpy.zeros', (['(dur, 3)'], {}), '((dur, 3))\n', (39172, 39182), False, 'import numpy\n'), ((39665, 39686), 'numpy.zeros', 'numpy.zeros', (['(dur, 3)'], {}), '((dur, 3))\n', (39676, 39686), False, 'import numpy\n'), ((39800, 39835), 'numpy.linspace', 'numpy.linspace', (['(-dur)', 'dur', 'npoints1'], {}), '(-dur, dur, npoints1)\n', (39814, 39835), False, 'import numpy\n'), ((39849, 39881), 'numpy.linspace', 'numpy.linspace', (['(1)', 'dur', 'npoints2'], {}), '(1, dur, npoints2)\n', (39863, 39881), False, 'import numpy\n'), ((39895, 39935), 'numpy.linspace', 'numpy.linspace', (['(1)', '(2 * dur - 1)', 'npoints3'], {}), '(1, 2 * dur - 1, npoints3)\n', (39909, 39935), False, 'import numpy\n'), ((40135, 40164), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['x1', 'mu1', 'sigma1'], {}), '(x1, mu1, sigma1)\n', (40147, 40164), True, 'import matplotlib.mlab as mlab\n'), ((40178, 40207), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['x2', 'mu2', 'sigma2'], {}), '(x2, mu2, sigma2)\n', (40190, 40207), True, 'import matplotlib.mlab as mlab\n'), ((40221, 40250), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['x3', 'mu3', 'sigma3'], {}), '(x3, mu3, sigma3)\n', (40233, 40250), True, 'import matplotlib.mlab as mlab\n'), ((40989, 41021), 'numpy.zeros', 'numpy.zeros', (['(1, self.dict_size)'], {}), '((1, self.dict_size))\n', (41000, 41021), False, 'import numpy\n'), ((42420, 42447), 'numpy.zeros', 'numpy.zeros', (['(1, dict_size)'], {}), '((1, dict_size))\n', (42431, 42447), False, 'import numpy\n'), ((43069, 43096), 'numpy.zeros', 'numpy.zeros', (['(1, dict_size)'], {}), '((1, dict_size))\n', (43080, 43096), False, 'import numpy\n'), ((47132, 47151), 're.escape', 're.escape', (['question'], {}), '(question)\n', (47141, 47151), False, 'import numpy, re, sys\n'), ((48635, 48672), 'numpy.empty', 'numpy.empty', (['(100000, self.dimension)'], {}), '((100000, self.dimension))\n', (48646, 48672), False, 'import numpy\n'), ((48736, 48768), 'numpy.zeros', 'numpy.zeros', (['(1, self.dict_size)'], {}), '((1, self.dict_size))\n', (48747, 48768), False, 'import numpy\n'), ((4622, 4633), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4630, 4633), False, 'import numpy, re, sys\n'), ((11233, 11244), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (11241, 11244), False, 'import numpy, re, sys\n'), ((13251, 13275), 'numpy.empty', 'numpy.empty', (['(100000, 1)'], {}), '((100000, 1))\n', (13262, 13275), False, 'import numpy\n'), ((14128, 14150), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (14136, 14150), False, 'import numpy, re, sys\n'), ((19892, 19922), 'numpy.empty', 'numpy.empty', (['(100000, dur_dim)'], {}), '((100000, dur_dim))\n', (19903, 19922), False, 'import numpy\n'), ((20473, 20495), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (20481, 20495), False, 'import numpy, re, sys\n'), ((23066, 23088), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (23074, 23088), False, 'import numpy, re, sys\n'), ((24258, 24331), 'numpy.concatenate', 'numpy.concatenate', (['[label_binary_vector, label_continuous_vector]'], {'axis': '(1)'}), '([label_binary_vector, label_continuous_vector], axis=1)\n', (24275, 24331), False, 'import numpy\n'), ((27890, 27912), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (27898, 27912), False, 'import numpy, re, sys\n'), ((44649, 44665), 're.escape', 're.escape', (['"""LL-"""'], {}), "('LL-')\n", (44658, 44665), False, 'import numpy, re, sys\n'), ((49581, 49654), 'numpy.concatenate', 'numpy.concatenate', (['[label_binary_vector, label_continuous_vector]'], {'axis': '(1)'}), '([label_binary_vector, label_continuous_vector], axis=1)\n', (49598, 49654), False, 'import numpy\n'), ((12021, 12032), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12029, 12032), False, 'import numpy, re, sys\n'), ((12800, 12811), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12808, 12811), False, 'import numpy, re, sys\n'), ((15830, 15860), 'numpy.zeros', 'numpy.zeros', (['(frame_number, 1)'], {}), '((frame_number, 1))\n', (15841, 15860), False, 'import numpy\n'), ((20813, 20843), 'numpy.zeros', 'numpy.zeros', (['(frame_number, 1)'], {}), '((frame_number, 1))\n', (20824, 20843), False, 'import numpy\n'), ((24420, 24489), 'numpy.zeros', 'numpy.zeros', (['(frame_number, self.dict_size + self.frame_feature_size)'], {}), '((frame_number, self.dict_size + self.frame_feature_size))\n', (24431, 24489), False, 'import numpy\n'), ((29139, 29212), 'numpy.concatenate', 'numpy.concatenate', (['[label_binary_vector, label_continuous_vector]'], {'axis': '(1)'}), '([label_binary_vector, label_continuous_vector], axis=1)\n', (29156, 29212), False, 'import numpy\n'), ((29853, 29922), 'numpy.zeros', 'numpy.zeros', (['(frame_number, self.dict_size + self.frame_feature_size)'], {}), '((frame_number, self.dict_size + self.frame_feature_size))\n', (29864, 29922), False, 'import numpy\n'), ((13388, 13418), 'numpy.empty', 'numpy.empty', (['(100000, dur_dim)'], {}), '((100000, dur_dim))\n', (13399, 13418), False, 'import numpy\n'), ((13455, 13480), 'numpy.zeros', 'numpy.zeros', (['(dur_dim, 1)'], {}), '((dur_dim, 1))\n', (13466, 13480), False, 'import numpy\n'), ((13561, 13585), 'numpy.empty', 'numpy.empty', (['(100000, 1)'], {}), '((100000, 1))\n', (13572, 13585), False, 'import numpy\n'), ((14856, 14878), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (14864, 14878), False, 'import numpy, re, sys\n'), ((19220, 19250), 'numpy.array', 'numpy.array', (['MLU_dur[seg_indx]'], {}), '(MLU_dur[seg_indx])\n', (19231, 19250), False, 'import numpy\n'), ((20037, 20067), 'numpy.empty', 'numpy.empty', (['(100000, dur_dim)'], {}), '((100000, dur_dim))\n', (20048, 20067), False, 'import numpy\n'), ((21099, 21110), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21107, 21110), False, 'import numpy, re, sys\n'), ((34371, 34440), 'numpy.zeros', 'numpy.zeros', (['(state_number, self.dict_size + self.frame_feature_size)'], {}), '((state_number, self.dict_size + self.frame_feature_size))\n', (34382, 34440), False, 'import numpy\n'), ((45437, 45467), 're.compile', 're.compile', (['processed_question'], {}), '(processed_question)\n', (45447, 45467), False, 'import numpy, re, sys\n'), ((16270, 16281), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (16278, 16281), False, 'import numpy, re, sys\n'), ((21242, 21271), 'numpy.array', 'numpy.array', (['[phone_duration]'], {}), '([phone_duration])\n', (21253, 21271), False, 'import numpy\n'), ((29486, 29508), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (29494, 29508), False, 'import numpy, re, sys\n'), ((16890, 16919), 'numpy.array', 'numpy.array', (['[phone_duration]'], {}), '([phone_duration])\n', (16901, 16919), False, 'import numpy\n'), ((25867, 25906), 'sys.exit', 'sys.exit', (['"""unknown subphone_feats type"""'], {}), "('unknown subphone_feats type')\n", (25875, 25906), False, 'import numpy, re, sys\n'), ((45948, 45978), 're.compile', 're.compile', (['processed_question'], {}), '(processed_question)\n', (45958, 45978), False, 'import numpy, re, sys\n'), ((17016, 17048), 'numpy.array', 'numpy.array', (['[syllable_duration]'], {}), '([syllable_duration])\n', (17027, 17048), False, 'import numpy\n'), ((10346, 10411), 'sys.exit', 'sys.exit', (["('Unknown value for subphone_feats: %s' % subphone_feats)"], {}), "('Unknown value for subphone_feats: %s' % subphone_feats)\n", (10354, 10411), False, 'import numpy, re, sys\n'), ((17141, 17169), 'numpy.array', 'numpy.array', (['[word_duration]'], {}), '([word_duration])\n', (17152, 17169), False, 'import numpy\n'), ((34005, 34044), 'sys.exit', 'sys.exit', (['"""unknown subphone_feats type"""'], {}), "('unknown subphone_feats type')\n", (34013, 34044), False, 'import numpy, re, sys\n')] |
import os
import argparse
def main(ops):
if ops.train:
os.system("python train.py \
--dataroot ./datasets/lol/final_dataset \
--no_dropout \
--name enlightening \
--model single \
--dataset_mode unaligned \
--which_model_netG sid_unet_resize \
--which_model_netD no_norm_4 \
--patchD \
--patch_vgg \
--patchD_3 5 \
--n_layers_D 5 \
--n_layers_patchD 4 \
--fineSize 320 \
--patchSize 32 \
--skip 1 \
--batchSize 32 \
--self_attention \
--use_norm 1 \
--use_wgan 0 \
--use_ragan \
--hybrid_loss \
--times_residual \
--instance_norm 0 \
--vgg 1 \
--vgg_choose relu5_1 \
--gpu_ids 0\
--display_port=" + ops.port)
elif ops.predict:
for i in range(1):
os.system("python predict.py \
--dataroot D:/sangkny/pyTest/MLDL/codes/enlightenGAN/datasets/lol/test_data \
--name enlightening \
--model single \
--which_direction AtoB \
--no_dropout \
--dataset_mode unaligned \
--which_model_netG sid_unet_resize \
--skip 1 \
--use_norm 1 \
--use_wgan 0 \
--self_attention \
--times_residual \
--instance_norm 0 --resize_or_crop='no'\
--which_epoch " + str(200 - i*5))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=str, default="8097")
parser.add_argument("--train", action='store_true')
parser.add_argument("--predict", action='store_true')
opt = parser.parse_args()
# opt.train = True
main(opt) | [
"os.system",
"argparse.ArgumentParser"
] | [((1254, 1279), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1277, 1279), False, 'import argparse\n'), ((59, 730), 'os.system', 'os.system', (["('python train.py \\t\\t\\t--dataroot ./datasets/lol/final_dataset \\t\\t\\t--no_dropout \\t\\t\\t--name enlightening \\t\\t\\t--model single \\t\\t\\t--dataset_mode unaligned \\t\\t\\t--which_model_netG sid_unet_resize \\t\\t\\t--which_model_netD no_norm_4 \\t\\t\\t--patchD \\t\\t\\t--patch_vgg \\t\\t\\t--patchD_3 5 \\t\\t\\t--n_layers_D 5 \\t\\t\\t--n_layers_patchD 4 \\t\\t\\t--fineSize 320 \\t\\t\\t--patchSize 32 \\t\\t\\t--skip 1 \\t\\t\\t--batchSize 32 \\t\\t\\t--self_attention \\t\\t\\t--use_norm 1 \\t\\t\\t--use_wgan 0 \\t\\t\\t--use_ragan \\t\\t\\t--hybrid_loss \\t\\t\\t--times_residual \\t\\t\\t--instance_norm 0 \\t\\t\\t--vgg 1 \\t\\t\\t--vgg_choose relu5_1 \\t\\t\\t--gpu_ids 0\\t\\t\\t--display_port='\n + ops.port)"], {}), "(\n 'python train.py \\t\\t\\t--dataroot ./datasets/lol/final_dataset \\t\\t\\t--no_dropout \\t\\t\\t--name enlightening \\t\\t\\t--model single \\t\\t\\t--dataset_mode unaligned \\t\\t\\t--which_model_netG sid_unet_resize \\t\\t\\t--which_model_netD no_norm_4 \\t\\t\\t--patchD \\t\\t\\t--patch_vgg \\t\\t\\t--patchD_3 5 \\t\\t\\t--n_layers_D 5 \\t\\t\\t--n_layers_patchD 4 \\t\\t\\t--fineSize 320 \\t\\t\\t--patchSize 32 \\t\\t\\t--skip 1 \\t\\t\\t--batchSize 32 \\t\\t\\t--self_attention \\t\\t\\t--use_norm 1 \\t\\t\\t--use_wgan 0 \\t\\t\\t--use_ragan \\t\\t\\t--hybrid_loss \\t\\t\\t--times_residual \\t\\t\\t--instance_norm 0 \\t\\t\\t--vgg 1 \\t\\t\\t--vgg_choose relu5_1 \\t\\t\\t--gpu_ids 0\\t\\t\\t--display_port='\n + ops.port)\n", (68, 730), False, 'import os\n')] |
import os
import sys
import glob
all_files = glob.glob("*.py")
for file in all_files:
print(file)
cmd = "autopep8 --max-line-length=160 --in-place --aggressive --aggressive " + file
os.system(cmd)
all_files = glob.glob("src/*.py")
for file in all_files:
print(file)
cmd = "autopep8 --max-line-length=160 --in-place --aggressive --aggressive " + file
os.system(cmd)
| [
"os.system",
"glob.glob"
] | [((46, 63), 'glob.glob', 'glob.glob', (['"""*.py"""'], {}), "('*.py')\n", (55, 63), False, 'import glob\n'), ((223, 244), 'glob.glob', 'glob.glob', (['"""src/*.py"""'], {}), "('src/*.py')\n", (232, 244), False, 'import glob\n'), ((195, 209), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (204, 209), False, 'import os\n'), ((376, 390), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (385, 390), False, 'import os\n')] |
# Setup Python module
from setuptools import setup, find_packages
modules = ["ALLSorts." + p for p in sorted(find_packages("./ALLSorts"))]
setup(
name="ALLSorts",
version="0.1.3",
description="BALL Subtype Classifier/Investigator.",
url="https://github.com/breons/ALLSorts",
author="<NAME>",
license="MIT",
packages=["ALLSorts", *modules],
zip_safe=False,
include_package_data=True,
install_requires=[
"joblib==0.15.1",
"matplotlib==3.2.1",
"numpy==1.18.1",
"pandas==1.0.3",
"scikit-learn==0.22.1",
"scipy==1.4.1",
"umap-learn==0.4.4",
"plotly==4.14.3",
"kaleido==0.1.0"
],
entry_points={
"console_scripts": ["ALLSorts=ALLSorts.allsorts:run"]
}
) | [
"setuptools.find_packages",
"setuptools.setup"
] | [((141, 674), 'setuptools.setup', 'setup', ([], {'name': '"""ALLSorts"""', 'version': '"""0.1.3"""', 'description': '"""BALL Subtype Classifier/Investigator."""', 'url': '"""https://github.com/breons/ALLSorts"""', 'author': '"""<NAME>"""', 'license': '"""MIT"""', 'packages': "['ALLSorts', *modules]", 'zip_safe': '(False)', 'include_package_data': '(True)', 'install_requires': "['joblib==0.15.1', 'matplotlib==3.2.1', 'numpy==1.18.1', 'pandas==1.0.3',\n 'scikit-learn==0.22.1', 'scipy==1.4.1', 'umap-learn==0.4.4',\n 'plotly==4.14.3', 'kaleido==0.1.0']", 'entry_points': "{'console_scripts': ['ALLSorts=ALLSorts.allsorts:run']}"}), "(name='ALLSorts', version='0.1.3', description=\n 'BALL Subtype Classifier/Investigator.', url=\n 'https://github.com/breons/ALLSorts', author='<NAME>', license='MIT',\n packages=['ALLSorts', *modules], zip_safe=False, include_package_data=\n True, install_requires=['joblib==0.15.1', 'matplotlib==3.2.1',\n 'numpy==1.18.1', 'pandas==1.0.3', 'scikit-learn==0.22.1',\n 'scipy==1.4.1', 'umap-learn==0.4.4', 'plotly==4.14.3', 'kaleido==0.1.0'\n ], entry_points={'console_scripts': ['ALLSorts=ALLSorts.allsorts:run']})\n", (146, 674), False, 'from setuptools import setup, find_packages\n'), ((110, 137), 'setuptools.find_packages', 'find_packages', (['"""./ALLSorts"""'], {}), "('./ALLSorts')\n", (123, 137), False, 'from setuptools import setup, find_packages\n')] |
from src.binary_search import binary_search
def main():
given_array_length = int(input("Długość tablicy: "))
given_array = list(map(int, input("\nLiczny do tablicy: ").strip().split()))[:given_array_length]
wanted_number = int(input("Szukany element: "))
given_array.sort()
result_index = binary_search(given_array, wanted_number)
print("Szukam liczby ", wanted_number, "... Jest na indeksie ", result_index)
if __name__ == "__main__":
main() | [
"src.binary_search.binary_search"
] | [((313, 354), 'src.binary_search.binary_search', 'binary_search', (['given_array', 'wanted_number'], {}), '(given_array, wanted_number)\n', (326, 354), False, 'from src.binary_search import binary_search\n')] |
#!/usr/bin/python3 -I
import sys
import os
import os.path
import json
import subprocess
# check system behavior for suid-sudo
original_sys_path = sys.path[0:]
sys.path.insert(0, os.path.dirname(__file__))
import common_lib
from common_lib import *
sys.path[0:] = original_sys_path
def extlang_test(lang, cmd):
subtest_start("path_examine")
l = subprocess.check_output(cmd)
if not isinstance(l, str):
l = l.decode('utf-8')
l = json.loads(l)
test_debug(repr(l))
test_ok()
libpath, exepath = l
subtest_start("exe_safety")
ap = check_affected_by(exepath, noexistok=True)
if len(ap):
test_ng ("%s path %s is affected by some users" % (lang, e,))
else:
test_ok ("%s seems to be only affected by root. OK." % (exepath,))
subtest_start("libpath_safety")
warn = False
for e in libpath:
ap = check_affected_by(e, noexistok=True)
if len(ap):
test_debug ("%s library path %s is affected by some users" % (lang, e,))
warn = True
if warn:
test_warn ("%s library is affected by the some users: check it." % (lang,))
else:
test_ok ("%s library path seems to be only affected by root. OK." % (lang,))
def ruby_test():
for e in [x for x in os.environ.keys() if x.startswith("RUBY")]:
del os.environ[e]
extlang_test("ruby", ['ruby', '-e', 'require "json"; print JSON.dump([$:, File.readlink("/proc/self/exe")])'])
def perl_test():
for e in [x for x in os.environ.keys() if x.startswith("PERL")]:
del os.environ[e]
extlang_test("perl", ['perl', '-MJSON::PP', '-MConfig', '-e', 'print encode_json([\@INC, $Config{perlpath}])'])
if __name__ == '__main__':
do_test(ruby_test)
do_test(perl_test)
test_summary()
| [
"subprocess.check_output",
"os.path.dirname",
"json.loads",
"os.environ.keys"
] | [((181, 206), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import os\n'), ((358, 386), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (381, 386), False, 'import subprocess\n'), ((456, 469), 'json.loads', 'json.loads', (['l'], {}), '(l)\n', (466, 469), False, 'import json\n'), ((1281, 1298), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (1296, 1298), False, 'import os\n'), ((1509, 1526), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (1524, 1526), False, 'import os\n')] |
from tensorboardX import SummaryWriter
import time
import shutil
import sys
import numpy as np
import json
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from classifier import loader
from classifier import model as M
from classifier import utils
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def cmd_train(context):
"""Main command do train the network.
:param context: this is a dictionary with all data from the
configuration file:
- 'command': run the specified command (e.g. train, test)
- 'gpu': ID of the used GPU
- 'bids_path_train': list of relative paths of the BIDS folders of each training center
- 'bids_path_validation': list of relative paths of the BIDS folders of each validation center
- 'bids_path_test': list of relative paths of the BIDS folders of each test center
- 'batch_size'
- 'dropout_rate'
- 'batch_norm_momentum'
- 'num_epochs'
- 'initial_lr': initial learning rate
- 'log_directory': folder name where log files are saved
"""
# Set the GPU
gpu_number = context["gpu"]
torch.cuda.set_device(gpu_number)
# This code will iterate over the folders and load the data, filtering
# the slices without labels and then concatenating all the datasets together
# Training dataset -------------------------------------------------------
ds_train = loader.BIDSIterator(context["bids_path_train"], "training")
print(f"Loaded {len(ds_train)} axial slices for the training set.")
train_loader = DataLoader(ds_train, batch_size=context["batch_size"],
shuffle=True, pin_memory=True,
num_workers=1)
# Validation dataset ------------------------------------------------------
ds_val = loader.BIDSIterator(context["bids_path_validation"], "validation")
print(f"Loaded {len(ds_val)} axial slices for the validation set.")
val_loader = DataLoader(ds_val, batch_size=context["batch_size"],
shuffle=True, pin_memory=True,
num_workers=1)
# Model definition ---------------------------------------------------------
model = M.Classifier(drop_rate=context["dropout_rate"],
bn_momentum=context["batch_norm_momentum"])
model.cuda()
num_epochs = context["num_epochs"]
initial_lr = context["initial_lr"]
# Using SGD with cosine annealing learning rate
optimizer = optim.SGD(model.parameters(), lr=initial_lr)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)
# Write the metrics, images, etc to TensorBoard format
writer = SummaryWriter(logdir=context["log_directory"])
# Binary Cross Entropy Loss
criterion = nn.CrossEntropyLoss()
# Training loop -----------------------------------------------------------
best_validation_loss = float("inf")
for epoch in tqdm(range(1, num_epochs + 1), desc="Training"):
start_time = time.time()
scheduler.step()
lr = scheduler.get_lr()[0]
writer.add_scalar('learning_rate', lr, epoch)
model.train()
train_loss_total = 0.0
num_steps = 0
for i, batch in enumerate(train_loader):
input_samples = batch["data"]
input_labels = batch["label"]
var_input = input_samples.cuda()
var_labels = input_labels.cuda(non_blocking=True)
outputs = model(var_input)
loss = criterion(outputs, var_labels)
train_loss_total += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
num_steps += 1
train_loss_total_avg = train_loss_total / num_steps
tqdm.write(f"Epoch {epoch} training loss: {train_loss_total_avg:.4f}.")
# Validation loop -----------------------------------------------------
model.eval()
val_loss_total = 0.0
num_steps = 0
# setting the lists for confusion matrix
true_labels = []
guessed_labels = []
for i, batch in enumerate(val_loader):
input_samples = batch["data"]
input_labels = batch["label"]
true_labels += [int(x) for x in input_labels]
with torch.no_grad():
var_input = input_samples.cuda()
var_labels = input_labels.cuda(non_blocking=True)
outputs = model(var_input)
_, preds = torch.max(outputs, 1)
lst_labels = [int(x) for x in preds]
guessed_labels += lst_labels
loss = criterion(outputs, var_labels)
val_loss_total += loss.item()
num_steps += 1
accuracy = accuracy_score(true_labels, guessed_labels)
recall = recall_score(true_labels, guessed_labels, average='macro')
precision = precision_score(true_labels, guessed_labels, average='macro')
val_loss_total_avg = val_loss_total / num_steps
tqdm.write(f"Epoch {epoch} validation loss: {val_loss_total_avg:.4f}.")
tqdm.write(f"Epoch {epoch} accuracy : {accuracy :.4f}.")
# add metrics for tensorboard
writer.add_scalars('validation metrics', {
'accuracy': accuracy,
'recall_avg': recall,
'precision_avg': precision,
}, epoch)
writer.add_scalars('losses', {
'train_loss': train_loss_total_avg,
'val_loss': val_loss_total_avg,
}, epoch)
end_time = time.time()
total_time = end_time - start_time
tqdm.write("Epoch {} took {:.2f} seconds.".format(epoch, total_time))
if val_loss_total_avg < best_validation_loss:
best_validation_loss = val_loss_total_avg
torch.save(model.state_dict(), "./" + context["log_directory"] + "/best_model.pt")
# save final model
torch.save(model.state_dict(), "./" + context["log_directory"] + "/final_model.pt")
return
def cmd_test(context):
"""
Main function to test the accuracy of the model
"""
# Set the GPU
gpu_number = context["gpu"]
torch.cuda.set_device(gpu_number)
# Testing dataset -------------------------------------------------------
ds_test = loader.BIDSIterator(context["bids_path_test"], "testing")
print(f"Loaded {len(ds_test)} axial slices for the testing set.")
test_loader = DataLoader(ds_test, batch_size=context["batch_size"],
shuffle=True, pin_memory=True,
num_workers=1)
model = M.Classifier()
model.load_state_dict(
torch.load("./" + context["log_directory"] + "/best_model.pt", map_location="cuda:" + str(gpu_number)))
model.cuda()
model.eval()
# setting the lists for confusion matrix
true_labels = []
guessed_labels = []
for i, batch in enumerate(test_loader):
input_samples = batch["data"]
input_labels = batch["label"]
true_labels += [int(x) for x in input_labels]
with torch.no_grad():
test_input = input_samples.cuda()
outputs = model(test_input)
_, preds = torch.max(outputs, 1)
lst_labels = [int(x) for x in preds]
guessed_labels += lst_labels
accuracy = accuracy_score(true_labels, guessed_labels)
recall = recall_score(true_labels, guessed_labels, average=None)
precision = precision_score(true_labels, guessed_labels, average=None)
np.set_printoptions(precision=2)
if not (os.path.exists("./temp/")):
os.makedirs("./temp/")
class_names = ["T1w", "T2star", "T2w"]
# Plot normalized confusion matrix
utils.plot_confusion_matrix(true_labels, guessed_labels, classes=class_names, normalize=True,
title='Normalized confusion matrix')
# plt.savefig("./temp/test_cm.png")
utils.plot_metrics(np.array([recall, precision]), accuracy, class_names)
# plt.savefig("./temp/test_accuracy.png")
tqdm.write(f"Accuracy over test slices : {accuracy}")
tqdm.write(f"Recall over test slices : {recall}")
tqdm.write(f"Precision over test slices : {precision}")
return
def run_main():
if len(sys.argv) <= 1:
print("\npython main.py [config.json]\n")
return
with open(sys.argv[1], "r") as fhandle:
context = json.load(fhandle)
command = context["command"]
if command == 'train':
cmd_train(context)
shutil.copyfile(sys.argv[1], "./" + context["log_directory"] + "/config_file.json")
elif command == 'test':
cmd_test(context)
if __name__ == "__main__":
run_main()
| [
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.max",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"classifier.loader.BIDSIterator",
"os.path.exists",
"tensorboardX.SummaryWriter",
"tqdm.tqdm.write",
"classifier.model.Classifier",
"shutil.copyfile",
"time.time",
"torch.cuda.set_device",
"sklearn.metrics.accuracy_score",
"numpy.set_printoptions",
"os.makedirs",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"classifier.utils.plot_confusion_matrix",
"json.load",
"torch.no_grad"
] | [((1571, 1604), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpu_number'], {}), '(gpu_number)\n', (1592, 1604), False, 'import torch\n'), ((1857, 1916), 'classifier.loader.BIDSIterator', 'loader.BIDSIterator', (["context['bids_path_train']", '"""training"""'], {}), "(context['bids_path_train'], 'training')\n", (1876, 1916), False, 'from classifier import loader\n'), ((2009, 2113), 'torch.utils.data.DataLoader', 'DataLoader', (['ds_train'], {'batch_size': "context['batch_size']", 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(1)'}), "(ds_train, batch_size=context['batch_size'], shuffle=True,\n pin_memory=True, num_workers=1)\n", (2019, 2113), False, 'from torch.utils.data import DataLoader\n'), ((2264, 2330), 'classifier.loader.BIDSIterator', 'loader.BIDSIterator', (["context['bids_path_validation']", '"""validation"""'], {}), "(context['bids_path_validation'], 'validation')\n", (2283, 2330), False, 'from classifier import loader\n'), ((2421, 2523), 'torch.utils.data.DataLoader', 'DataLoader', (['ds_val'], {'batch_size': "context['batch_size']", 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(1)'}), "(ds_val, batch_size=context['batch_size'], shuffle=True,\n pin_memory=True, num_workers=1)\n", (2431, 2523), False, 'from torch.utils.data import DataLoader\n'), ((2670, 2766), 'classifier.model.Classifier', 'M.Classifier', ([], {'drop_rate': "context['dropout_rate']", 'bn_momentum': "context['batch_norm_momentum']"}), "(drop_rate=context['dropout_rate'], bn_momentum=context[\n 'batch_norm_momentum'])\n", (2682, 2766), True, 'from classifier import model as M\n'), ((3014, 3073), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['optimizer', 'num_epochs'], {}), '(optimizer, num_epochs)\n', (3050, 3073), False, 'from torch import optim\n'), ((3147, 3193), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'logdir': "context['log_directory']"}), "(logdir=context['log_directory'])\n", (3160, 3193), False, 'from tensorboardX import SummaryWriter\n'), ((3243, 3264), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3262, 3264), False, 'from torch import nn\n'), ((6655, 6688), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpu_number'], {}), '(gpu_number)\n', (6676, 6688), False, 'import torch\n'), ((6782, 6839), 'classifier.loader.BIDSIterator', 'loader.BIDSIterator', (["context['bids_path_test']", '"""testing"""'], {}), "(context['bids_path_test'], 'testing')\n", (6801, 6839), False, 'from classifier import loader\n'), ((6929, 7032), 'torch.utils.data.DataLoader', 'DataLoader', (['ds_test'], {'batch_size': "context['batch_size']", 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(1)'}), "(ds_test, batch_size=context['batch_size'], shuffle=True,\n pin_memory=True, num_workers=1)\n", (6939, 7032), False, 'from torch.utils.data import DataLoader\n'), ((7100, 7114), 'classifier.model.Classifier', 'M.Classifier', ([], {}), '()\n', (7112, 7114), True, 'from classifier import model as M\n'), ((7825, 7868), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'guessed_labels'], {}), '(true_labels, guessed_labels)\n', (7839, 7868), False, 'from sklearn.metrics import accuracy_score\n'), ((7882, 7937), 'sklearn.metrics.recall_score', 'recall_score', (['true_labels', 'guessed_labels'], {'average': 'None'}), '(true_labels, guessed_labels, average=None)\n', (7894, 7937), False, 'from sklearn.metrics import recall_score\n'), ((7954, 8012), 'sklearn.metrics.precision_score', 'precision_score', (['true_labels', 'guessed_labels'], {'average': 'None'}), '(true_labels, guessed_labels, average=None)\n', (7969, 8012), False, 'from sklearn.metrics import precision_score\n'), ((8018, 8050), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (8037, 8050), True, 'import numpy as np\n'), ((8210, 8345), 'classifier.utils.plot_confusion_matrix', 'utils.plot_confusion_matrix', (['true_labels', 'guessed_labels'], {'classes': 'class_names', 'normalize': '(True)', 'title': '"""Normalized confusion matrix"""'}), "(true_labels, guessed_labels, classes=\n class_names, normalize=True, title='Normalized confusion matrix')\n", (8237, 8345), False, 'from classifier import utils\n'), ((8543, 8596), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Accuracy over test slices : {accuracy}"""'], {}), "(f'Accuracy over test slices : {accuracy}')\n", (8553, 8596), False, 'from tqdm import tqdm\n'), ((8601, 8650), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Recall over test slices : {recall}"""'], {}), "(f'Recall over test slices : {recall}')\n", (8611, 8650), False, 'from tqdm import tqdm\n'), ((8655, 8710), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Precision over test slices : {precision}"""'], {}), "(f'Precision over test slices : {precision}')\n", (8665, 8710), False, 'from tqdm import tqdm\n'), ((3474, 3485), 'time.time', 'time.time', ([], {}), '()\n', (3483, 3485), False, 'import time\n'), ((4246, 4317), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Epoch {epoch} training loss: {train_loss_total_avg:.4f}."""'], {}), "(f'Epoch {epoch} training loss: {train_loss_total_avg:.4f}.')\n", (4256, 4317), False, 'from tqdm import tqdm\n'), ((5256, 5299), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'guessed_labels'], {}), '(true_labels, guessed_labels)\n', (5270, 5299), False, 'from sklearn.metrics import accuracy_score\n'), ((5317, 5375), 'sklearn.metrics.recall_score', 'recall_score', (['true_labels', 'guessed_labels'], {'average': '"""macro"""'}), "(true_labels, guessed_labels, average='macro')\n", (5329, 5375), False, 'from sklearn.metrics import recall_score\n'), ((5396, 5457), 'sklearn.metrics.precision_score', 'precision_score', (['true_labels', 'guessed_labels'], {'average': '"""macro"""'}), "(true_labels, guessed_labels, average='macro')\n", (5411, 5457), False, 'from sklearn.metrics import precision_score\n'), ((5524, 5595), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Epoch {epoch} validation loss: {val_loss_total_avg:.4f}."""'], {}), "(f'Epoch {epoch} validation loss: {val_loss_total_avg:.4f}.')\n", (5534, 5595), False, 'from tqdm import tqdm\n'), ((5604, 5659), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Epoch {epoch} accuracy : {accuracy:.4f}."""'], {}), "(f'Epoch {epoch} accuracy : {accuracy:.4f}.')\n", (5614, 5659), False, 'from tqdm import tqdm\n'), ((6047, 6058), 'time.time', 'time.time', ([], {}), '()\n', (6056, 6058), False, 'import time\n'), ((8064, 8089), 'os.path.exists', 'os.path.exists', (['"""./temp/"""'], {}), "('./temp/')\n", (8078, 8089), False, 'import os\n'), ((8100, 8122), 'os.makedirs', 'os.makedirs', (['"""./temp/"""'], {}), "('./temp/')\n", (8111, 8122), False, 'import os\n'), ((8437, 8466), 'numpy.array', 'np.array', (['[recall, precision]'], {}), '([recall, precision])\n', (8445, 8466), True, 'import numpy as np\n'), ((8896, 8914), 'json.load', 'json.load', (['fhandle'], {}), '(fhandle)\n', (8905, 8914), False, 'import json\n'), ((9012, 9099), 'shutil.copyfile', 'shutil.copyfile', (['sys.argv[1]', "('./' + context['log_directory'] + '/config_file.json')"], {}), "(sys.argv[1], './' + context['log_directory'] +\n '/config_file.json')\n", (9027, 9099), False, 'import shutil\n'), ((7569, 7584), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7582, 7584), False, 'import torch\n'), ((7696, 7717), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (7705, 7717), False, 'import torch\n'), ((4783, 4798), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4796, 4798), False, 'import torch\n'), ((4986, 5007), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (4995, 5007), False, 'import torch\n')] |
from ADLmainloop import ADLmain, ADLmainId
from ADLbasic import ADL
from utilsADL import dataLoader, plotPerformance
import random
import torch
import numpy as np
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/Users/ng98/Desktop/datasets/NEW/mat/',
help='Save Directory')
parser.add_argument('--save_dir', type=str, default='/Users/ng98/Desktop/datasets/NEW/mat/',
help='Save Directory')
parser.add_argument('--random_seed', type=int, default=1, help='Random seed')
parser.add_argument('--batch_size', type=int, default=1, help='Batch size for test and train')
args = parser.parse_args()
# random seed control
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
random.seed(args.random_seed)
batch_size = args.batch_size
trainingBatchSize = args.batch_size
datasets = [
"elecNormNew",
"nomao",
"WISDM_ar_v1.1_transformed",
"covtypeNorm",
#
"airlines",
"RBF_f",
"RBF_m",
"AGR_a",
"AGR_g",
"LED_a",
"LED_g",
# #
"kdd99",
# #
"gisette_scale_class_Nominal",
"epsilon_normalized.t_class_Nominal",
"SVHN.scale.t.libsvm.sparse_class_Nominal",
"spam_corpus",
"sector.scale.libsvm.class_Nominal_sparse"
]
# for f in AGR_a.arff AGR_g.arff WISDM_ar_v1.1_transformed.arff airlines.arff elecNormNew.arff kdd99.arff nomao.arff; do printf "'%s': [" $f; grep '@attribute' $f |grep -n '{'|awk -F ':' '{printf "%d, ", $1-1}'; printf "],\n"; done
# 'AGR_a.arff': [3, 4, 5, 9, ],
# 'AGR_g.arff': [3, 4, 5, 9, ],
# 'WISDM_ar_v1.1_transformed.arff': [1, 45, ],
# 'airlines.arff': [0, 2, 3, 4, 7, ],
# 'elecNormNew.arff': [1, 8, ],
# 'kdd99.arff': [1, 2, 3, 6, 11, 20, 21, 41, ],
# 'nomao.arff': [6, 7, 14, 15, 22, 23, 30, 31, 38, 39, 46, 47, 54, 55, 62, 63, 70, 71, 78, 79, 86, 87, 91, 95, 99, 103, 107, 111, 115, 118, ],
onehot_columns = {
'AGR_a': [3, 4, 5],
'AGR_g': [3, 4, 5],
'WISDM_ar_v1.1_transformed': [1],
'airlines': [0, 2, 3, 4],
'elecNormNew': [1],
'kdd99': [1, 2, 3, 6, 11, 20, 21],
'nomao': [6, 7, 14, 15, 22, 23, 30, 31, 38, 39, 46, 47, 54, 55, 62, 63, 70, 71, 78, 79, 86, 87, 91, 95, 99, 103, 107, 111, 115],
}
for d in datasets:
f = os.path.join(args.data_dir, d + '.mat')
p_dump_f = os.path.join(args.save_dir, d + '_predictions.csv')
# load data
dataStreams = dataLoader(f,
batchSize=batch_size,
onehot_columns = onehot_columns[d] if d in onehot_columns.keys() else None)
print('All labeled')
# initialization
ADLnet = ADL(dataStreams.nInput,dataStreams.nOutput, predictions_dump_file=p_dump_f)
ADLnet0, performanceHistory0, allPerformance0 = ADLmain(ADLnet,dataStreams, trainingBatchSize=trainingBatchSize, normalize=True)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4],performanceHistory0[5])
print("Dataset,{},{}".format(d, allPerformance0[0]))
| [
"torch.manual_seed",
"utilsADL.plotPerformance",
"argparse.ArgumentParser",
"os.path.join",
"ADLbasic.ADL",
"random.seed",
"numpy.random.seed",
"ADLmainloop.ADLmain"
] | [((199, 224), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (222, 224), False, 'import argparse\n'), ((720, 752), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (734, 752), True, 'import numpy as np\n'), ((753, 788), 'torch.manual_seed', 'torch.manual_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (770, 788), False, 'import torch\n'), ((789, 818), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (800, 818), False, 'import random\n'), ((2444, 2483), 'os.path.join', 'os.path.join', (['args.data_dir', "(d + '.mat')"], {}), "(args.data_dir, d + '.mat')\n", (2456, 2483), False, 'import os\n'), ((2499, 2550), 'os.path.join', 'os.path.join', (['args.save_dir', "(d + '_predictions.csv')"], {}), "(args.save_dir, d + '_predictions.csv')\n", (2511, 2550), False, 'import os\n'), ((2816, 2892), 'ADLbasic.ADL', 'ADL', (['dataStreams.nInput', 'dataStreams.nOutput'], {'predictions_dump_file': 'p_dump_f'}), '(dataStreams.nInput, dataStreams.nOutput, predictions_dump_file=p_dump_f)\n', (2819, 2892), False, 'from ADLbasic import ADL\n'), ((2945, 3031), 'ADLmainloop.ADLmain', 'ADLmain', (['ADLnet', 'dataStreams'], {'trainingBatchSize': 'trainingBatchSize', 'normalize': '(True)'}), '(ADLnet, dataStreams, trainingBatchSize=trainingBatchSize, normalize\n =True)\n', (2952, 3031), False, 'from ADLmainloop import ADLmain, ADLmainId\n'), ((3031, 3198), 'utilsADL.plotPerformance', 'plotPerformance', (['performanceHistory0[0]', 'performanceHistory0[1]', 'performanceHistory0[2]', 'performanceHistory0[3]', 'performanceHistory0[4]', 'performanceHistory0[5]'], {}), '(performanceHistory0[0], performanceHistory0[1],\n performanceHistory0[2], performanceHistory0[3], performanceHistory0[4],\n performanceHistory0[5])\n', (3046, 3198), False, 'from utilsADL import dataLoader, plotPerformance\n')] |
from django.db import models
from django.utils import timezone
from taggit.managers import TaggableManager
class Game(models.Model):
id = models.BigAutoField(primary_key=True)
created_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
deleted_at = models.DateTimeField(blank=True, null=True)
name = models.CharField(unique=True, max_length=128)
img_path = models.CharField(max_length=256, blank=True, null=True)
publish_time = models.DateTimeField(blank=True, null=True)
class Meta:
managed = False
db_table = 'monitor_game'
verbose_name = '游戏'
verbose_name_plural = verbose_name
class Article(models.Model):
"""文章"""
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
id = models.AutoField(primary_key=True, verbose_name='ID')
website_name = models.CharField(max_length=50, verbose_name='来源网站的名称')
url = models.CharField(max_length=500, unique=True, verbose_name='文章链接')
title = models.CharField(max_length=250, verbose_name='文章标题')
content = models.TextField(verbose_name='文章内容')
click = models.BigIntegerField(default=0, verbose_name='点击次数')
creator = models.CharField(default='niracler', max_length=64, verbose_name='创建者用户名')
publish_time = models.DateTimeField(default=timezone.now, verbose_name='发布时间')
created = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
updated = models.DateTimeField(auto_now=True, verbose_name='更新时间')
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='draft', verbose_name='状态')
tags = TaggableManager()
class Meta:
ordering = ('-publish_time',)
verbose_name = '游戏新闻'
verbose_name_plural = verbose_name
def __str__(self):
return self.title
class Event(models.Model):
id = models.BigAutoField(primary_key=True, verbose_name='ID')
name = models.CharField(max_length=128,unique=True, verbose_name='事件名称')
articles = models.ManyToManyField(Article, related_name='articles', verbose_name='新闻列表')
creator = models.CharField(default='niracler', max_length=64, verbose_name='创建者用户名')
click = models.BigIntegerField(default=0, verbose_name='点击次数')
created = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
updated = models.DateTimeField(default=timezone.now, verbose_name='更新时间')
class Meta:
ordering = ('-updated',)
verbose_name = '游戏新闻事件'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
| [
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"taggit.managers.TaggableManager",
"django.db.models.AutoField",
"django.db.models.BigIntegerField",
"django.db.models.BigAutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((143, 180), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (162, 180), False, 'from django.db import models\n'), ((198, 241), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (218, 241), False, 'from django.db import models\n'), ((259, 302), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (279, 302), False, 'from django.db import models\n'), ((320, 363), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (340, 363), False, 'from django.db import models\n'), ((375, 420), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(128)'}), '(unique=True, max_length=128)\n', (391, 420), False, 'from django.db import models\n'), ((436, 491), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'blank': '(True)', 'null': '(True)'}), '(max_length=256, blank=True, null=True)\n', (452, 491), False, 'from django.db import models\n'), ((511, 554), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (531, 554), False, 'from django.db import models\n'), ((847, 900), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'verbose_name': '"""ID"""'}), "(primary_key=True, verbose_name='ID')\n", (863, 900), False, 'from django.db import models\n'), ((920, 975), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""来源网站的名称"""'}), "(max_length=50, verbose_name='来源网站的名称')\n", (936, 975), False, 'from django.db import models\n'), ((986, 1052), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'unique': '(True)', 'verbose_name': '"""文章链接"""'}), "(max_length=500, unique=True, verbose_name='文章链接')\n", (1002, 1052), False, 'from django.db import models\n'), ((1065, 1118), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'verbose_name': '"""文章标题"""'}), "(max_length=250, verbose_name='文章标题')\n", (1081, 1118), False, 'from django.db import models\n'), ((1133, 1170), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""文章内容"""'}), "(verbose_name='文章内容')\n", (1149, 1170), False, 'from django.db import models\n'), ((1183, 1237), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)', 'verbose_name': '"""点击次数"""'}), "(default=0, verbose_name='点击次数')\n", (1205, 1237), False, 'from django.db import models\n'), ((1252, 1326), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""niracler"""', 'max_length': '(64)', 'verbose_name': '"""创建者用户名"""'}), "(default='niracler', max_length=64, verbose_name='创建者用户名')\n", (1268, 1326), False, 'from django.db import models\n'), ((1346, 1409), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now', 'verbose_name': '"""发布时间"""'}), "(default=timezone.now, verbose_name='发布时间')\n", (1366, 1409), False, 'from django.db import models\n'), ((1424, 1484), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""创建时间"""'}), "(auto_now_add=True, verbose_name='创建时间')\n", (1444, 1484), False, 'from django.db import models\n'), ((1499, 1555), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""更新时间"""'}), "(auto_now=True, verbose_name='更新时间')\n", (1519, 1555), False, 'from django.db import models\n'), ((1569, 1664), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'choices': 'STATUS_CHOICES', 'default': '"""draft"""', 'verbose_name': '"""状态"""'}), "(max_length=10, choices=STATUS_CHOICES, default='draft',\n verbose_name='状态')\n", (1585, 1664), False, 'from django.db import models\n'), ((1672, 1689), 'taggit.managers.TaggableManager', 'TaggableManager', ([], {}), '()\n', (1687, 1689), False, 'from taggit.managers import TaggableManager\n'), ((1905, 1961), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'primary_key': '(True)', 'verbose_name': '"""ID"""'}), "(primary_key=True, verbose_name='ID')\n", (1924, 1961), False, 'from django.db import models\n'), ((1973, 2039), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'unique': '(True)', 'verbose_name': '"""事件名称"""'}), "(max_length=128, unique=True, verbose_name='事件名称')\n", (1989, 2039), False, 'from django.db import models\n'), ((2054, 2131), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Article'], {'related_name': '"""articles"""', 'verbose_name': '"""新闻列表"""'}), "(Article, related_name='articles', verbose_name='新闻列表')\n", (2076, 2131), False, 'from django.db import models\n'), ((2146, 2220), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""niracler"""', 'max_length': '(64)', 'verbose_name': '"""创建者用户名"""'}), "(default='niracler', max_length=64, verbose_name='创建者用户名')\n", (2162, 2220), False, 'from django.db import models\n'), ((2233, 2287), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)', 'verbose_name': '"""点击次数"""'}), "(default=0, verbose_name='点击次数')\n", (2255, 2287), False, 'from django.db import models\n'), ((2302, 2362), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""创建时间"""'}), "(auto_now_add=True, verbose_name='创建时间')\n", (2322, 2362), False, 'from django.db import models\n'), ((2377, 2440), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now', 'verbose_name': '"""更新时间"""'}), "(default=timezone.now, verbose_name='更新时间')\n", (2397, 2440), False, 'from django.db import models\n')] |
from django.contrib.auth import models
import factory
from committee_admissions.admissions.models import (
Admission,
Committee,
CommitteeApplication,
UserApplication,
)
class RandomUserFactory(factory.DjangoModelFactory):
class Meta:
model = models.User
first_name = factory.Faker("first_name")
last_name = factory.Faker("last_name")
email = factory.Faker("safe_email")
username = factory.Sequence(lambda n: "user_%d" % n)
password = factory.PostGenerationMethodCall("set_password", "<PASSWORD>")
is_active = True
class RandomAdmissionFactory(factory.DjangoModelFactory):
class Meta:
model = Admission
title = factory.Sequence(lambda n: "Opptak %d" % n)
open_from = factory.Faker("past_datetime", start_date="-10d", tzinfo=None)
public_deadline = factory.Faker("future_datetime", end_date="+15d", tzinfo=None)
application_deadline = factory.Faker(
"future_datetime", end_date="+15d", tzinfo=None
)
class RandomCommitteeFactory(factory.DjangoModelFactory):
class Meta:
model = Committee
django_get_or_create = ("name",)
name = factory.Iterator(
["Webkom", "Arrkom", "Bedkom", "Fagkom", "Koskom", "LaBamba", "readme", "PR"]
)
description = factory.Faker("text", max_nb_chars=200)
response_label = factory.Faker("text", max_nb_chars=50)
logo = factory.django.FileField(filename="committee.png")
class RandomUserApplicationFactory(factory.DjangoModelFactory):
class Meta:
model = UserApplication
admission = RandomAdmissionFactory()
user = factory.SubFactory(RandomUserFactory)
text = factory.Faker("text", max_nb_chars=50)
time_sent = factory.Faker("past_datetime", start_date="-0d", tzinfo=None)
class RandomCommitteeApplicationFactory(factory.DjangoModelFactory):
class Meta:
model = CommitteeApplication
application = factory.SubFactory(RandomUserApplicationFactory)
committee = factory.SubFactory(RandomCommitteeFactory)
text = factory.Faker("text", max_nb_chars=200)
| [
"factory.Iterator",
"factory.SubFactory",
"factory.Faker",
"factory.django.FileField",
"factory.PostGenerationMethodCall",
"factory.Sequence"
] | [((305, 332), 'factory.Faker', 'factory.Faker', (['"""first_name"""'], {}), "('first_name')\n", (318, 332), False, 'import factory\n'), ((349, 375), 'factory.Faker', 'factory.Faker', (['"""last_name"""'], {}), "('last_name')\n", (362, 375), False, 'import factory\n'), ((388, 415), 'factory.Faker', 'factory.Faker', (['"""safe_email"""'], {}), "('safe_email')\n", (401, 415), False, 'import factory\n'), ((431, 472), 'factory.Sequence', 'factory.Sequence', (["(lambda n: 'user_%d' % n)"], {}), "(lambda n: 'user_%d' % n)\n", (447, 472), False, 'import factory\n'), ((488, 550), 'factory.PostGenerationMethodCall', 'factory.PostGenerationMethodCall', (['"""set_password"""', '"""<PASSWORD>"""'], {}), "('set_password', '<PASSWORD>')\n", (520, 550), False, 'import factory\n'), ((687, 730), 'factory.Sequence', 'factory.Sequence', (["(lambda n: 'Opptak %d' % n)"], {}), "(lambda n: 'Opptak %d' % n)\n", (703, 730), False, 'import factory\n'), ((747, 809), 'factory.Faker', 'factory.Faker', (['"""past_datetime"""'], {'start_date': '"""-10d"""', 'tzinfo': 'None'}), "('past_datetime', start_date='-10d', tzinfo=None)\n", (760, 809), False, 'import factory\n'), ((832, 894), 'factory.Faker', 'factory.Faker', (['"""future_datetime"""'], {'end_date': '"""+15d"""', 'tzinfo': 'None'}), "('future_datetime', end_date='+15d', tzinfo=None)\n", (845, 894), False, 'import factory\n'), ((922, 984), 'factory.Faker', 'factory.Faker', (['"""future_datetime"""'], {'end_date': '"""+15d"""', 'tzinfo': 'None'}), "('future_datetime', end_date='+15d', tzinfo=None)\n", (935, 984), False, 'import factory\n'), ((1154, 1253), 'factory.Iterator', 'factory.Iterator', (["['Webkom', 'Arrkom', 'Bedkom', 'Fagkom', 'Koskom', 'LaBamba', 'readme', 'PR']"], {}), "(['Webkom', 'Arrkom', 'Bedkom', 'Fagkom', 'Koskom',\n 'LaBamba', 'readme', 'PR'])\n", (1170, 1253), False, 'import factory\n'), ((1282, 1321), 'factory.Faker', 'factory.Faker', (['"""text"""'], {'max_nb_chars': '(200)'}), "('text', max_nb_chars=200)\n", (1295, 1321), False, 'import factory\n'), ((1343, 1381), 'factory.Faker', 'factory.Faker', (['"""text"""'], {'max_nb_chars': '(50)'}), "('text', max_nb_chars=50)\n", (1356, 1381), False, 'import factory\n'), ((1393, 1443), 'factory.django.FileField', 'factory.django.FileField', ([], {'filename': '"""committee.png"""'}), "(filename='committee.png')\n", (1417, 1443), False, 'import factory\n'), ((1611, 1648), 'factory.SubFactory', 'factory.SubFactory', (['RandomUserFactory'], {}), '(RandomUserFactory)\n', (1629, 1648), False, 'import factory\n'), ((1660, 1698), 'factory.Faker', 'factory.Faker', (['"""text"""'], {'max_nb_chars': '(50)'}), "('text', max_nb_chars=50)\n", (1673, 1698), False, 'import factory\n'), ((1715, 1776), 'factory.Faker', 'factory.Faker', (['"""past_datetime"""'], {'start_date': '"""-0d"""', 'tzinfo': 'None'}), "('past_datetime', start_date='-0d', tzinfo=None)\n", (1728, 1776), False, 'import factory\n'), ((1920, 1968), 'factory.SubFactory', 'factory.SubFactory', (['RandomUserApplicationFactory'], {}), '(RandomUserApplicationFactory)\n', (1938, 1968), False, 'import factory\n'), ((1985, 2027), 'factory.SubFactory', 'factory.SubFactory', (['RandomCommitteeFactory'], {}), '(RandomCommitteeFactory)\n', (2003, 2027), False, 'import factory\n'), ((2039, 2078), 'factory.Faker', 'factory.Faker', (['"""text"""'], {'max_nb_chars': '(200)'}), "('text', max_nb_chars=200)\n", (2052, 2078), False, 'import factory\n')] |
"""
Finds the number of pythagorean triangles where the perimeter is less than 100 million and central small square dividing the bigger square
Author: <NAME>
"""
import math
from time import time
from utils import decompose_primes, prime_factors
from decimal import *
"""
Finds the number of pythagorean triangles where the perimeter is less than 100 million and central small square dividing the bigger square
"""
def pythagorean_triplets(limit_n):
primes = prime_factors(1000)
total = 0
t0 = time()
for m in range(2,limit_n+1):
if 2*m**2+2*m>=limit_n:
break
factors = decompose_primes(m,primes,True)
inc = 1
if m%2==0:
inc = 2
complete = False
for n in range(1,m,inc):
if inc==1 and n%2==1:
continue
are_coprime=True
for p in factors:
if n%p==0:
are_coprime = False
break
if are_coprime:
a = m**2-n**2
b = 2*m*n
c = m**2+n**2
if (a+b+c)>=limit_n:
break
if c%(b-a)==0:
print(a,b,c)
total += limit_n//(a+b+c)
if complete:
break
t1= time()
print('Time to reach solution: {0} sec'.format(t1-t0))
return total
if __name__ == "__main__":
limit_n = 10**8
print('The number of pythagorean triangles where the perimeter is less than {0} and match condition is {1}'.format(limit_n,pythagorean_triplets(limit_n))) | [
"utils.decompose_primes",
"time.time",
"utils.prime_factors"
] | [((465, 484), 'utils.prime_factors', 'prime_factors', (['(1000)'], {}), '(1000)\n', (478, 484), False, 'from utils import decompose_primes, prime_factors\n'), ((508, 514), 'time.time', 'time', ([], {}), '()\n', (512, 514), False, 'from time import time\n'), ((1306, 1312), 'time.time', 'time', ([], {}), '()\n', (1310, 1312), False, 'from time import time\n'), ((616, 649), 'utils.decompose_primes', 'decompose_primes', (['m', 'primes', '(True)'], {}), '(m, primes, True)\n', (632, 649), False, 'from utils import decompose_primes, prime_factors\n')] |
import autoarray as aa
import autoarray.plot as aplt
from autoarray.plot import mat_objs
from os import path
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import pytest
import os, shutil
import numpy as np
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(autouse=True)
def set_config_path():
aa.conf.instance = aa.conf.Config(
path.join(directory, "../test_files/plot"), path.join(directory, "output")
)
class TestFigure:
def test__aspect_from_shape_2d(self):
figure = aplt.Figure(aspect="auto")
aspect = figure.aspect_from_shape_2d(shape_2d=(2, 2))
assert aspect == "auto"
figure = aplt.Figure(aspect="square")
aspect = figure.aspect_from_shape_2d(shape_2d=(2, 2))
assert aspect == 1.0
aspect = figure.aspect_from_shape_2d(shape_2d=(4, 2))
assert aspect == 0.5
def test__open_and_close__open_and_close_figures_correct(self):
figure = aplt.Figure()
figure.open()
assert plt.fignum_exists(num=1) == True
figure.close()
assert plt.fignum_exists(num=1) == False
class TestColorMap:
def test__norm_from_array__uses_input_norm_min_and_max_if_input(self):
cmap = aplt.ColorMap(norm_min=0.0, norm_max=1.0, norm="linear")
norm = cmap.norm_from_array(array=None)
assert isinstance(norm, colors.Normalize)
assert norm.vmin == 0.0
assert norm.vmax == 1.0
cmap = aplt.ColorMap(norm_min=0.0, norm_max=1.0, norm="log")
norm = cmap.norm_from_array(array=None)
assert isinstance(norm, colors.LogNorm)
assert norm.vmin == 1.0e-4 # Increased from 0.0 to ensure min isn't inf
assert norm.vmax == 1.0
cmap = aplt.ColorMap(
norm_min=0.0,
norm_max=1.0,
linthresh=2.0,
linscale=3.0,
norm="symmetric_log",
)
norm = cmap.norm_from_array(array=None)
assert isinstance(norm, colors.SymLogNorm)
assert norm.vmin == 0.0
assert norm.vmax == 1.0
assert norm.linthresh == 2.0
def test__norm_from_array__uses_array_to_get_norm_min_and_max_if_no_manual_input(
self
):
array = aa.array.ones(shape_2d=(2, 2))
array[0] = 0.0
cmap = aplt.ColorMap(norm_min=None, norm_max=None, norm="linear")
norm = cmap.norm_from_array(array=array)
assert isinstance(norm, colors.Normalize)
assert norm.vmin == 0.0
assert norm.vmax == 1.0
cmap = aplt.ColorMap(norm_min=None, norm_max=None, norm="log")
norm = cmap.norm_from_array(array=array)
assert isinstance(norm, colors.LogNorm)
assert norm.vmin == 1.0e-4 # Increased from 0.0 to ensure min isn't inf
assert norm.vmax == 1.0
cmap = aplt.ColorMap(
norm_min=None,
norm_max=None,
linthresh=2.0,
linscale=3.0,
norm="symmetric_log",
)
norm = cmap.norm_from_array(array=array)
assert isinstance(norm, colors.SymLogNorm)
assert norm.vmin == 0.0
assert norm.vmax == 1.0
assert norm.linthresh == 2.0
class TestColorBar:
def test__plot__works_for_reasonable_range_of_values(self):
figure = aplt.Figure()
figure.open()
plt.imshow(np.ones((2, 2)))
cb = aplt.ColorBar(ticksize=1, fraction=1.0, pad=2.0)
cb.set()
figure.close()
figure.open()
plt.imshow(np.ones((2, 2)))
cb = aplt.ColorBar(
ticksize=1,
fraction=0.1,
pad=0.5,
tick_values=[0.25, 0.5, 0.75],
tick_labels=[1.0, 2.0, 3.0],
)
cb.set()
figure.close()
figure.open()
plt.imshow(np.ones((2, 2)))
cb = aplt.ColorBar(ticksize=1, fraction=0.1, pad=0.5)
cb.set_with_values(cmap=aplt.ColorMap().cmap, color_values=[1.0, 2.0, 3.0])
figure.close()
class TestTicks:
def test__set_yx_ticks__works_for_good_values(self):
array = aa.array.ones(shape_2d=(2, 2), pixel_scales=1.0)
units = aplt.Units(use_scaled=True, conversion_factor=None)
ticks = aplt.Ticks(ysize=34, xsize=35)
ticks.set_yticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=False,
)
ticks.set_xticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=False,
)
ticks.set_yticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=True,
)
ticks.set_xticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=True,
)
ticks = aplt.Ticks(ysize=34, xsize=35)
units = aplt.Units(use_scaled=False, conversion_factor=None)
ticks.set_yticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=False,
)
ticks.set_xticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=False,
)
ticks.set_yticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=True,
)
ticks.set_xticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=True,
)
ticks = aplt.Ticks(ysize=34, xsize=35)
units = aplt.Units(use_scaled=True, conversion_factor=2.0)
ticks.set_yticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=False,
)
ticks.set_xticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=False,
)
ticks.set_yticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=True,
)
ticks.set_xticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=True,
)
ticks = aplt.Ticks(ysize=34, xsize=35)
units = aplt.Units(use_scaled=False, conversion_factor=2.0)
ticks.set_yticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=False,
)
ticks.set_xticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=False,
)
ticks.set_yticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=True,
)
ticks.set_xticks(
array=array,
extent=array.extent_of_zoomed_array(buffer=1),
units=units,
symmetric_around_centre=True,
)
class TestLabels:
def test__yx_units_use_plot_in_kpc_if_it_is_passed(self):
labels = aplt.Labels()
units = aplt.Units(in_kpc=True)
assert labels._yunits == None
assert labels._xunits == None
assert labels.yunits_from_units(units=units) == "kpc"
assert labels.xunits_from_units(units=units) == "kpc"
labels = aplt.Labels()
units = aplt.Units(in_kpc=False)
assert labels._yunits == None
assert labels._xunits == None
assert labels.yunits_from_units(units=units) == "arcsec"
assert labels.xunits_from_units(units=units) == "arcsec"
labels = aplt.Labels(yunits="hi", xunits="hi2")
units = aplt.Units(in_kpc=True)
assert labels._yunits == "hi"
assert labels._xunits == "hi2"
assert labels.yunits_from_units(units=units) == "hi"
assert labels.xunits_from_units(units=units) == "hi2"
labels = aplt.Labels(yunits="hi", xunits="hi2")
units = aplt.Units(in_kpc=False)
assert labels._yunits == "hi"
assert labels._xunits == "hi2"
assert labels.yunits_from_units(units=units) == "hi"
assert labels.xunits_from_units(units=units) == "hi2"
def test__title_from_func__uses_func_name_if_title_is_none(self):
def toy_func():
pass
labels = aplt.Labels(title=None)
title_from_func = labels.title_from_func(func=toy_func)
assert title_from_func == "Toy_func"
labels = aplt.Labels(title="Hi")
title_from_func = labels.title_from_func(func=toy_func)
assert title_from_func == "Hi"
def test__yx_units_from_func__uses_function_inputs_if_available(self):
def toy_func():
pass
labels = aplt.Labels(yunits=None, xunits=None)
yunits_from_func = labels.yunits_from_func(func=toy_func)
xunits_from_func = labels.xunits_from_func(func=toy_func)
assert yunits_from_func == None
assert xunits_from_func == None
def toy_func(label_yunits="Hi", label_xunits="Hi0"):
pass
labels = aplt.Labels()
yunits_from_func = labels.yunits_from_func(func=toy_func)
xunits_from_func = labels.xunits_from_func(func=toy_func)
assert yunits_from_func == "Hi"
assert xunits_from_func == "Hi0"
labels = aplt.Labels(yunits="Hi1", xunits="Hi2")
yunits_from_func = labels.yunits_from_func(func=toy_func)
xunits_from_func = labels.xunits_from_func(func=toy_func)
assert yunits_from_func == "Hi1"
assert xunits_from_func == "Hi2"
def toy_func(argument, label_yunits="Hi", label_xunits="Hi0"):
pass
labels = aplt.Labels()
yunits_from_func = labels.yunits_from_func(func=toy_func)
xunits_from_func = labels.xunits_from_func(func=toy_func)
assert yunits_from_func == "Hi"
assert xunits_from_func == "Hi0"
labels = aplt.Labels(yunits="Hi1", xunits="Hi2")
yunits_from_func = labels.yunits_from_func(func=toy_func)
xunits_from_func = labels.xunits_from_func(func=toy_func)
assert yunits_from_func == "Hi1"
assert xunits_from_func == "Hi2"
class TestLegend:
def test__set_legend_works_for_plot(self):
figure = aplt.Figure(aspect="auto")
figure.open()
liner = aplt.Liner(width=2, style="-", colors="k", pointsize=2)
liner.draw_y_vs_x(
y=[1.0, 2.0, 3.0], x=[1.0, 2.0, 3.0], plot_axis_type="linear", label="hi"
)
legend = aplt.Legend(include=True, fontsize=1)
legend.set()
figure.close()
class TestOutput:
def test__input_path_is_created(self):
test_path = path.join(directory, "../test_files/output_path")
if os.path.exists(test_path):
shutil.rmtree(test_path)
assert not os.path.exists(test_path)
output = aplt.Output(path=test_path)
assert os.path.exists(test_path)
def test__filename_from_func__returns_function_name_if_no_filename(self):
def toy_func():
pass
output = aplt.Output(filename=None)
filename_from_func = output.filename_from_func(func=toy_func)
assert filename_from_func == "toy_func"
output = aplt.Output(filename="Hi")
filename_from_func = output.filename_from_func(func=toy_func)
assert filename_from_func == "Hi"
class TestScatterer:
def test__scatter_grid__lists_of_coordinates_or_equivalent_2d_grids(self):
scatterer = mat_objs.Scatterer(size=2, marker="x", colors="k")
scatterer.scatter_grids(grids=[(1.0, 1.0), (2.0, 2.0)])
scatterer.scatter_grids(
grids=aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0)
)
def test__scatter_grid__lists_of_lists_of_coordinates_or_equivalent_2d_grids(self):
scatterer = mat_objs.Scatterer(size=2, marker="x", colors="k")
scatterer.scatter_grids(grids=[[(1.0, 1.0), (2.0, 2.0)]])
scatterer.scatter_grids(grids=[[(1.0, 1.0), (2.0, 2.0)], [(3.0, 3.0)]])
scatterer.scatter_grids(grids=[[(1.0, 1.0), (2.0, 2.0)], []])
scatterer.scatter_grids(grids=[[(1.0, 1.0), (2.0, 2.0)], [None]])
scatterer.scatter_grids(
grids=[
aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
]
)
def test__scatter_colored_grid__lists_of_coordinates_or_equivalent_2d_grids__with_color_array(
self
):
scatterer = mat_objs.Scatterer(size=2, marker="x", colors="k")
cmap = plt.get_cmap("jet")
scatterer.scatter_colored_grid(
grid=[(1.0, 1.0), (2.0, 2.0), (3.0, 3.0), (4.0, 4.0), (5.0, 5.0)],
color_array=np.array([2.0, 2.0, 2.0, 2.0, 2.0]),
cmap=cmap,
)
scatterer.scatter_colored_grid(
grid=aa.grid.uniform(shape_2d=(3, 2), pixel_scales=1.0),
color_array=np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0]),
cmap=cmap,
)
def test__scatter_grid_indexes_1d__input_grid_is_ndarray_and_indexes_are_valid(
self
):
scatterer = mat_objs.Scatterer(size=2, marker="x", colors="k")
scatterer.scatter_grid_indexes(
grid=aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0), indexes=[0, 1, 2]
)
scatterer.scatter_grid_indexes(
grid=aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0), indexes=[[0, 1, 2]]
)
scatterer.scatter_grid_indexes(
grid=aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
indexes=[[0, 1], [2]],
)
def test__scatter_grid_indexes_2d__input_grid_is_ndarray_and_indexes_are_valid(
self
):
scatterer = mat_objs.Scatterer(size=2, marker="x", colors="k")
scatterer.scatter_grid_indexes(
grid=aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
indexes=[(0, 0), (0, 1), (0, 2)],
)
scatterer.scatter_grid_indexes(
grid=aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
indexes=[[(0, 0), (0, 1), (0, 2)]],
)
scatterer.scatter_grid_indexes(
grid=aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
indexes=[[(0, 0), (0, 1)], [(0, 2)]],
)
scatterer.scatter_grid_indexes(
grid=aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
indexes=[[[0, 0], [0, 1]], [[0, 2]]],
)
class TestLiner:
def test__draw_y_vs_x__works_for_reasonable_values(self):
liner = aplt.Liner(width=2, style="-", colors="k", pointsize=2)
liner.draw_y_vs_x(y=[1.0, 2.0, 3.0], x=[1.0, 2.0, 3.0], plot_axis_type="linear")
liner.draw_y_vs_x(
y=[1.0, 2.0, 3.0], x=[1.0, 2.0, 3.0], plot_axis_type="semilogy"
)
liner.draw_y_vs_x(y=[1.0, 2.0, 3.0], x=[1.0, 2.0, 3.0], plot_axis_type="loglog")
liner.draw_y_vs_x(
y=[1.0, 2.0, 3.0], x=[1.0, 2.0, 3.0], plot_axis_type="scatter"
)
def test__draw_vertical_lines__works_for_reasonable_values(self):
liner = aplt.Liner(width=2, style="-", colors="k", pointsize=2)
liner.draw_vertical_lines(vertical_lines=[[0.0]])
liner.draw_vertical_lines(vertical_lines=[[1.0], [2.0]])
liner.draw_vertical_lines(vertical_lines=[[0.0]], vertical_line_labels=["hi"])
liner.draw_vertical_lines(
vertical_lines=[[1.0], [2.0]], vertical_line_labels=["hi1", "hi2"]
)
def test__draw_grid__lists_of_coordinates_or_equivalent_2d_grids(self):
liner = aplt.Liner(width=2, style="-", colors="k")
liner.draw_grids(grids=[(1.0, 1.0), (2.0, 2.0)])
liner.draw_grids(grids=aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0))
liner.draw_grids(grids=[[(1.0, 1.0), (2.0, 2.0)], [(3.0, 3.0), (4.0, 4.0)]])
liner.draw_grids(
grids=[
aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
]
)
def test__draw_grid__lists_of_lists_of_coordinates_or_equivalent_2d_grids(self):
liner = aplt.Liner(width=2, style="--", colors="k")
liner.draw_grids(grids=[[(1.0, 1.0), (2.0, 2.0)]])
liner.draw_grids(grids=[[(1.0, 1.0), (2.0, 2.0)], [(3.0, 3.0)]])
liner.draw_grids(grids=[[(1.0, 1.0), (2.0, 2.0)], []])
liner.draw_grids(grids=[[(1.0, 1.0), (2.0, 2.0)], [None]])
liner.draw_grids(
grids=[
aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0),
]
)
def test__draw_rectangular_grid_lines__draws_for_valid_extent_and_shape(self):
liner = aplt.Liner(width=2, style="--", colors="k")
liner.draw_rectangular_grid_lines(extent=[0.0, 1.0, 0.0, 1.0], shape_2d=(3, 2))
liner.draw_rectangular_grid_lines(
extent=[-4.0, 8.0, -3.0, 10.0], shape_2d=(8, 3)
)
class TestVoronoiDrawer:
def test__draws_voronoi_pixels_for_sensible_input(self, voronoi_mapper_9_3x3):
voronoi_drawer = aplt.VoronoiDrawer(edgewidth=0.5, edgecolor="r", alpha=1.0)
voronoi_drawer.draw_voronoi_pixels(
mapper=voronoi_mapper_9_3x3, values=None, cmap=None, cb=None
)
voronoi_drawer.draw_voronoi_pixels(
mapper=voronoi_mapper_9_3x3,
values=np.ones(9),
cmap="jet",
cb=aplt.ColorBar(ticksize=1, fraction=0.1, pad=0.05),
)
| [
"autoarray.grid.uniform",
"numpy.array",
"pytest.fixture",
"autoarray.array.ones",
"os.path.exists",
"autoarray.plot.Labels",
"autoarray.plot.mat_objs.Scatterer",
"autoarray.plot.ColorMap",
"autoarray.plot.Figure",
"numpy.ones",
"autoarray.plot.VoronoiDrawer",
"autoarray.plot.Liner",
"autoarray.plot.ColorBar",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.fignum_exists",
"autoarray.plot.Ticks",
"os.path.join",
"os.path.realpath",
"autoarray.plot.Legend",
"autoarray.plot.Output",
"shutil.rmtree",
"autoarray.plot.Units"
] | [((284, 312), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (298, 312), False, 'import pytest\n'), ((256, 279), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (269, 279), False, 'from os import path\n'), ((383, 425), 'os.path.join', 'path.join', (['directory', '"""../test_files/plot"""'], {}), "(directory, '../test_files/plot')\n", (392, 425), False, 'from os import path\n'), ((427, 457), 'os.path.join', 'path.join', (['directory', '"""output"""'], {}), "(directory, 'output')\n", (436, 457), False, 'from os import path\n'), ((544, 570), 'autoarray.plot.Figure', 'aplt.Figure', ([], {'aspect': '"""auto"""'}), "(aspect='auto')\n", (555, 570), True, 'import autoarray.plot as aplt\n'), ((685, 713), 'autoarray.plot.Figure', 'aplt.Figure', ([], {'aspect': '"""square"""'}), "(aspect='square')\n", (696, 713), True, 'import autoarray.plot as aplt\n'), ((987, 1000), 'autoarray.plot.Figure', 'aplt.Figure', ([], {}), '()\n', (998, 1000), True, 'import autoarray.plot as aplt\n'), ((1260, 1316), 'autoarray.plot.ColorMap', 'aplt.ColorMap', ([], {'norm_min': '(0.0)', 'norm_max': '(1.0)', 'norm': '"""linear"""'}), "(norm_min=0.0, norm_max=1.0, norm='linear')\n", (1273, 1316), True, 'import autoarray.plot as aplt\n'), ((1497, 1550), 'autoarray.plot.ColorMap', 'aplt.ColorMap', ([], {'norm_min': '(0.0)', 'norm_max': '(1.0)', 'norm': '"""log"""'}), "(norm_min=0.0, norm_max=1.0, norm='log')\n", (1510, 1550), True, 'import autoarray.plot as aplt\n'), ((1778, 1875), 'autoarray.plot.ColorMap', 'aplt.ColorMap', ([], {'norm_min': '(0.0)', 'norm_max': '(1.0)', 'linthresh': '(2.0)', 'linscale': '(3.0)', 'norm': '"""symmetric_log"""'}), "(norm_min=0.0, norm_max=1.0, linthresh=2.0, linscale=3.0, norm\n ='symmetric_log')\n", (1791, 1875), True, 'import autoarray.plot as aplt\n'), ((2268, 2298), 'autoarray.array.ones', 'aa.array.ones', ([], {'shape_2d': '(2, 2)'}), '(shape_2d=(2, 2))\n', (2281, 2298), True, 'import autoarray as aa\n'), ((2338, 2396), 'autoarray.plot.ColorMap', 'aplt.ColorMap', ([], {'norm_min': 'None', 'norm_max': 'None', 'norm': '"""linear"""'}), "(norm_min=None, norm_max=None, norm='linear')\n", (2351, 2396), True, 'import autoarray.plot as aplt\n'), ((2578, 2633), 'autoarray.plot.ColorMap', 'aplt.ColorMap', ([], {'norm_min': 'None', 'norm_max': 'None', 'norm': '"""log"""'}), "(norm_min=None, norm_max=None, norm='log')\n", (2591, 2633), True, 'import autoarray.plot as aplt\n'), ((2862, 2960), 'autoarray.plot.ColorMap', 'aplt.ColorMap', ([], {'norm_min': 'None', 'norm_max': 'None', 'linthresh': '(2.0)', 'linscale': '(3.0)', 'norm': '"""symmetric_log"""'}), "(norm_min=None, norm_max=None, linthresh=2.0, linscale=3.0,\n norm='symmetric_log')\n", (2875, 2960), True, 'import autoarray.plot as aplt\n'), ((3335, 3348), 'autoarray.plot.Figure', 'aplt.Figure', ([], {}), '()\n', (3346, 3348), True, 'import autoarray.plot as aplt\n'), ((3421, 3469), 'autoarray.plot.ColorBar', 'aplt.ColorBar', ([], {'ticksize': '(1)', 'fraction': '(1.0)', 'pad': '(2.0)'}), '(ticksize=1, fraction=1.0, pad=2.0)\n', (3434, 3469), True, 'import autoarray.plot as aplt\n'), ((3582, 3695), 'autoarray.plot.ColorBar', 'aplt.ColorBar', ([], {'ticksize': '(1)', 'fraction': '(0.1)', 'pad': '(0.5)', 'tick_values': '[0.25, 0.5, 0.75]', 'tick_labels': '[1.0, 2.0, 3.0]'}), '(ticksize=1, fraction=0.1, pad=0.5, tick_values=[0.25, 0.5, \n 0.75], tick_labels=[1.0, 2.0, 3.0])\n', (3595, 3695), True, 'import autoarray.plot as aplt\n'), ((3874, 3922), 'autoarray.plot.ColorBar', 'aplt.ColorBar', ([], {'ticksize': '(1)', 'fraction': '(0.1)', 'pad': '(0.5)'}), '(ticksize=1, fraction=0.1, pad=0.5)\n', (3887, 3922), True, 'import autoarray.plot as aplt\n'), ((4123, 4171), 'autoarray.array.ones', 'aa.array.ones', ([], {'shape_2d': '(2, 2)', 'pixel_scales': '(1.0)'}), '(shape_2d=(2, 2), pixel_scales=1.0)\n', (4136, 4171), True, 'import autoarray as aa\n'), ((4189, 4240), 'autoarray.plot.Units', 'aplt.Units', ([], {'use_scaled': '(True)', 'conversion_factor': 'None'}), '(use_scaled=True, conversion_factor=None)\n', (4199, 4240), True, 'import autoarray.plot as aplt\n'), ((4258, 4288), 'autoarray.plot.Ticks', 'aplt.Ticks', ([], {'ysize': '(34)', 'xsize': '(35)'}), '(ysize=34, xsize=35)\n', (4268, 4288), True, 'import autoarray.plot as aplt\n'), ((5057, 5087), 'autoarray.plot.Ticks', 'aplt.Ticks', ([], {'ysize': '(34)', 'xsize': '(35)'}), '(ysize=34, xsize=35)\n', (5067, 5087), True, 'import autoarray.plot as aplt\n'), ((5105, 5157), 'autoarray.plot.Units', 'aplt.Units', ([], {'use_scaled': '(False)', 'conversion_factor': 'None'}), '(use_scaled=False, conversion_factor=None)\n', (5115, 5157), True, 'import autoarray.plot as aplt\n'), ((5926, 5956), 'autoarray.plot.Ticks', 'aplt.Ticks', ([], {'ysize': '(34)', 'xsize': '(35)'}), '(ysize=34, xsize=35)\n', (5936, 5956), True, 'import autoarray.plot as aplt\n'), ((5974, 6024), 'autoarray.plot.Units', 'aplt.Units', ([], {'use_scaled': '(True)', 'conversion_factor': '(2.0)'}), '(use_scaled=True, conversion_factor=2.0)\n', (5984, 6024), True, 'import autoarray.plot as aplt\n'), ((6793, 6823), 'autoarray.plot.Ticks', 'aplt.Ticks', ([], {'ysize': '(34)', 'xsize': '(35)'}), '(ysize=34, xsize=35)\n', (6803, 6823), True, 'import autoarray.plot as aplt\n'), ((6841, 6892), 'autoarray.plot.Units', 'aplt.Units', ([], {'use_scaled': '(False)', 'conversion_factor': '(2.0)'}), '(use_scaled=False, conversion_factor=2.0)\n', (6851, 6892), True, 'import autoarray.plot as aplt\n'), ((7744, 7757), 'autoarray.plot.Labels', 'aplt.Labels', ([], {}), '()\n', (7755, 7757), True, 'import autoarray.plot as aplt\n'), ((7775, 7798), 'autoarray.plot.Units', 'aplt.Units', ([], {'in_kpc': '(True)'}), '(in_kpc=True)\n', (7785, 7798), True, 'import autoarray.plot as aplt\n'), ((8018, 8031), 'autoarray.plot.Labels', 'aplt.Labels', ([], {}), '()\n', (8029, 8031), True, 'import autoarray.plot as aplt\n'), ((8049, 8073), 'autoarray.plot.Units', 'aplt.Units', ([], {'in_kpc': '(False)'}), '(in_kpc=False)\n', (8059, 8073), True, 'import autoarray.plot as aplt\n'), ((8299, 8337), 'autoarray.plot.Labels', 'aplt.Labels', ([], {'yunits': '"""hi"""', 'xunits': '"""hi2"""'}), "(yunits='hi', xunits='hi2')\n", (8310, 8337), True, 'import autoarray.plot as aplt\n'), ((8355, 8378), 'autoarray.plot.Units', 'aplt.Units', ([], {'in_kpc': '(True)'}), '(in_kpc=True)\n', (8365, 8378), True, 'import autoarray.plot as aplt\n'), ((8598, 8636), 'autoarray.plot.Labels', 'aplt.Labels', ([], {'yunits': '"""hi"""', 'xunits': '"""hi2"""'}), "(yunits='hi', xunits='hi2')\n", (8609, 8636), True, 'import autoarray.plot as aplt\n'), ((8654, 8678), 'autoarray.plot.Units', 'aplt.Units', ([], {'in_kpc': '(False)'}), '(in_kpc=False)\n', (8664, 8678), True, 'import autoarray.plot as aplt\n'), ((9010, 9033), 'autoarray.plot.Labels', 'aplt.Labels', ([], {'title': 'None'}), '(title=None)\n', (9021, 9033), True, 'import autoarray.plot as aplt\n'), ((9163, 9186), 'autoarray.plot.Labels', 'aplt.Labels', ([], {'title': '"""Hi"""'}), "(title='Hi')\n", (9174, 9186), True, 'import autoarray.plot as aplt\n'), ((9427, 9464), 'autoarray.plot.Labels', 'aplt.Labels', ([], {'yunits': 'None', 'xunits': 'None'}), '(yunits=None, xunits=None)\n', (9438, 9464), True, 'import autoarray.plot as aplt\n'), ((9776, 9789), 'autoarray.plot.Labels', 'aplt.Labels', ([], {}), '()\n', (9787, 9789), True, 'import autoarray.plot as aplt\n'), ((10023, 10062), 'autoarray.plot.Labels', 'aplt.Labels', ([], {'yunits': '"""Hi1"""', 'xunits': '"""Hi2"""'}), "(yunits='Hi1', xunits='Hi2')\n", (10034, 10062), True, 'import autoarray.plot as aplt\n'), ((10386, 10399), 'autoarray.plot.Labels', 'aplt.Labels', ([], {}), '()\n', (10397, 10399), True, 'import autoarray.plot as aplt\n'), ((10633, 10672), 'autoarray.plot.Labels', 'aplt.Labels', ([], {'yunits': '"""Hi1"""', 'xunits': '"""Hi2"""'}), "(yunits='Hi1', xunits='Hi2')\n", (10644, 10672), True, 'import autoarray.plot as aplt\n'), ((10974, 11000), 'autoarray.plot.Figure', 'aplt.Figure', ([], {'aspect': '"""auto"""'}), "(aspect='auto')\n", (10985, 11000), True, 'import autoarray.plot as aplt\n'), ((11041, 11096), 'autoarray.plot.Liner', 'aplt.Liner', ([], {'width': '(2)', 'style': '"""-"""', 'colors': '"""k"""', 'pointsize': '(2)'}), "(width=2, style='-', colors='k', pointsize=2)\n", (11051, 11096), True, 'import autoarray.plot as aplt\n'), ((11239, 11276), 'autoarray.plot.Legend', 'aplt.Legend', ([], {'include': '(True)', 'fontsize': '(1)'}), '(include=True, fontsize=1)\n', (11250, 11276), True, 'import autoarray.plot as aplt\n'), ((11407, 11456), 'os.path.join', 'path.join', (['directory', '"""../test_files/output_path"""'], {}), "(directory, '../test_files/output_path')\n", (11416, 11456), False, 'from os import path\n'), ((11469, 11494), 'os.path.exists', 'os.path.exists', (['test_path'], {}), '(test_path)\n', (11483, 11494), False, 'import os, shutil\n'), ((11597, 11624), 'autoarray.plot.Output', 'aplt.Output', ([], {'path': 'test_path'}), '(path=test_path)\n', (11608, 11624), True, 'import autoarray.plot as aplt\n'), ((11641, 11666), 'os.path.exists', 'os.path.exists', (['test_path'], {}), '(test_path)\n', (11655, 11666), False, 'import os, shutil\n'), ((11805, 11831), 'autoarray.plot.Output', 'aplt.Output', ([], {'filename': 'None'}), '(filename=None)\n', (11816, 11831), True, 'import autoarray.plot as aplt\n'), ((11970, 11996), 'autoarray.plot.Output', 'aplt.Output', ([], {'filename': '"""Hi"""'}), "(filename='Hi')\n", (11981, 11996), True, 'import autoarray.plot as aplt\n'), ((12234, 12284), 'autoarray.plot.mat_objs.Scatterer', 'mat_objs.Scatterer', ([], {'size': '(2)', 'marker': '"""x"""', 'colors': '"""k"""'}), "(size=2, marker='x', colors='k')\n", (12252, 12284), False, 'from autoarray.plot import mat_objs\n'), ((12572, 12622), 'autoarray.plot.mat_objs.Scatterer', 'mat_objs.Scatterer', ([], {'size': '(2)', 'marker': '"""x"""', 'colors': '"""k"""'}), "(size=2, marker='x', colors='k')\n", (12590, 12622), False, 'from autoarray.plot import mat_objs\n'), ((13268, 13318), 'autoarray.plot.mat_objs.Scatterer', 'mat_objs.Scatterer', ([], {'size': '(2)', 'marker': '"""x"""', 'colors': '"""k"""'}), "(size=2, marker='x', colors='k')\n", (13286, 13318), False, 'from autoarray.plot import mat_objs\n'), ((13335, 13354), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (13347, 13354), True, 'import matplotlib.pyplot as plt\n'), ((13903, 13953), 'autoarray.plot.mat_objs.Scatterer', 'mat_objs.Scatterer', ([], {'size': '(2)', 'marker': '"""x"""', 'colors': '"""k"""'}), "(size=2, marker='x', colors='k')\n", (13921, 13953), False, 'from autoarray.plot import mat_objs\n'), ((14513, 14563), 'autoarray.plot.mat_objs.Scatterer', 'mat_objs.Scatterer', ([], {'size': '(2)', 'marker': '"""x"""', 'colors': '"""k"""'}), "(size=2, marker='x', colors='k')\n", (14531, 14563), False, 'from autoarray.plot import mat_objs\n'), ((15336, 15391), 'autoarray.plot.Liner', 'aplt.Liner', ([], {'width': '(2)', 'style': '"""-"""', 'colors': '"""k"""', 'pointsize': '(2)'}), "(width=2, style='-', colors='k', pointsize=2)\n", (15346, 15391), True, 'import autoarray.plot as aplt\n'), ((15884, 15939), 'autoarray.plot.Liner', 'aplt.Liner', ([], {'width': '(2)', 'style': '"""-"""', 'colors': '"""k"""', 'pointsize': '(2)'}), "(width=2, style='-', colors='k', pointsize=2)\n", (15894, 15939), True, 'import autoarray.plot as aplt\n'), ((16369, 16411), 'autoarray.plot.Liner', 'aplt.Liner', ([], {'width': '(2)', 'style': '"""-"""', 'colors': '"""k"""'}), "(width=2, style='-', colors='k')\n", (16379, 16411), True, 'import autoarray.plot as aplt\n'), ((16947, 16990), 'autoarray.plot.Liner', 'aplt.Liner', ([], {'width': '(2)', 'style': '"""--"""', 'colors': '"""k"""'}), "(width=2, style='--', colors='k')\n", (16957, 16990), True, 'import autoarray.plot as aplt\n'), ((17561, 17604), 'autoarray.plot.Liner', 'aplt.Liner', ([], {'width': '(2)', 'style': '"""--"""', 'colors': '"""k"""'}), "(width=2, style='--', colors='k')\n", (17571, 17604), True, 'import autoarray.plot as aplt\n'), ((17943, 18002), 'autoarray.plot.VoronoiDrawer', 'aplt.VoronoiDrawer', ([], {'edgewidth': '(0.5)', 'edgecolor': '"""r"""', 'alpha': '(1.0)'}), "(edgewidth=0.5, edgecolor='r', alpha=1.0)\n", (17961, 18002), True, 'import autoarray.plot as aplt\n'), ((1040, 1064), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', ([], {'num': '(1)'}), '(num=1)\n', (1057, 1064), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1137), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', ([], {'num': '(1)'}), '(num=1)\n', (1130, 1137), True, 'import matplotlib.pyplot as plt\n'), ((3391, 3406), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3398, 3406), True, 'import numpy as np\n'), ((3552, 3567), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3559, 3567), True, 'import numpy as np\n'), ((3844, 3859), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3851, 3859), True, 'import numpy as np\n'), ((11508, 11532), 'shutil.rmtree', 'shutil.rmtree', (['test_path'], {}), '(test_path)\n', (11521, 11532), False, 'import os, shutil\n'), ((11553, 11578), 'os.path.exists', 'os.path.exists', (['test_path'], {}), '(test_path)\n', (11567, 11578), False, 'import os, shutil\n'), ((12401, 12451), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (12416, 12451), True, 'import autoarray as aa\n'), ((13499, 13534), 'numpy.array', 'np.array', (['[2.0, 2.0, 2.0, 2.0, 2.0]'], {}), '([2.0, 2.0, 2.0, 2.0, 2.0])\n', (13507, 13534), True, 'import numpy as np\n'), ((13626, 13676), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 2)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 2), pixel_scales=1.0)\n', (13641, 13676), True, 'import autoarray as aa\n'), ((13702, 13742), 'numpy.array', 'np.array', (['[2.0, 2.0, 2.0, 2.0, 2.0, 2.0]'], {}), '([2.0, 2.0, 2.0, 2.0, 2.0, 2.0])\n', (13710, 13742), True, 'import numpy as np\n'), ((14012, 14062), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (14027, 14062), True, 'import autoarray as aa\n'), ((14150, 14200), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (14165, 14200), True, 'import autoarray as aa\n'), ((14290, 14340), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (14305, 14340), True, 'import autoarray as aa\n'), ((14622, 14672), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (14637, 14672), True, 'import autoarray as aa\n'), ((14788, 14838), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (14803, 14838), True, 'import autoarray as aa\n'), ((14956, 15006), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (14971, 15006), True, 'import autoarray as aa\n'), ((15126, 15176), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (15141, 15176), True, 'import autoarray as aa\n'), ((16501, 16551), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (16516, 16551), True, 'import autoarray as aa\n'), ((18236, 18246), 'numpy.ones', 'np.ones', (['(9)'], {}), '(9)\n', (18243, 18246), True, 'import numpy as np\n'), ((18287, 18336), 'autoarray.plot.ColorBar', 'aplt.ColorBar', ([], {'ticksize': '(1)', 'fraction': '(0.1)', 'pad': '(0.05)'}), '(ticksize=1, fraction=0.1, pad=0.05)\n', (18300, 18336), True, 'import autoarray.plot as aplt\n'), ((3955, 3970), 'autoarray.plot.ColorMap', 'aplt.ColorMap', ([], {}), '()\n', (3968, 3970), True, 'import autoarray.plot as aplt\n'), ((12983, 13033), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (12998, 13033), True, 'import autoarray as aa\n'), ((13051, 13101), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (13066, 13101), True, 'import autoarray as aa\n'), ((16700, 16750), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (16715, 16750), True, 'import autoarray as aa\n'), ((16768, 16818), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (16783, 16818), True, 'import autoarray as aa\n'), ((17316, 17366), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (17331, 17366), True, 'import autoarray as aa\n'), ((17384, 17434), 'autoarray.grid.uniform', 'aa.grid.uniform', ([], {'shape_2d': '(3, 3)', 'pixel_scales': '(1.0)'}), '(shape_2d=(3, 3), pixel_scales=1.0)\n', (17399, 17434), True, 'import autoarray as aa\n')] |
"""The tests for the Laica Smart Scale ble_parser."""
from ble_monitor.ble_parser import BleParser
class TestLaica:
"""Tests for the Mikrotik parser"""
def test_mikrotik_tg_bt5_in(self):
"""Test Mikrotik TG-BT5-IN parser."""
data_string = "043E2202010300DD7B146E2CDC1615FF4F09010010A90000FDFF010000806BE866000062D5"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Mikrotik"
assert sensor_msg["type"] == "TG-BT5-IN"
assert sensor_msg["mac"] == "DC2C6E147BDD"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["version"] == 1
assert sensor_msg["acceleration x"] == 0.0
assert sensor_msg["acceleration y"] == 255.98828125
assert sensor_msg["acceleration z"] == 0.00390625
assert sensor_msg["acceleration"] == 255.9882812798037
assert sensor_msg["uptime"] == 6744171
assert sensor_msg["battery"] == 98
assert sensor_msg["switch"] == 0
assert sensor_msg["tilt"] == 0
assert sensor_msg["dropping"] == 0
assert sensor_msg["impact"] == 0
assert sensor_msg["impact x"] == 0
assert sensor_msg["impact y"] == 0
assert sensor_msg["impact z"] == 0
assert sensor_msg["rssi"] == -43
def test_mikrotik_tg_bt5_out(self):
"""Test Mikrotik TG-BT5-OUT parser."""
data_string = "043E2202010300DD7B146E2CDC1615FF4F09010010A90000FDFF0100A1196BE866000062D5"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Mikrotik"
assert sensor_msg["type"] == "TG-BT5-OUT"
assert sensor_msg["mac"] == "DC2C6E147BDD"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg['temperature'] == 25.62890625
assert sensor_msg["version"] == 1
assert sensor_msg["acceleration x"] == 0.0
assert sensor_msg["acceleration y"] == 255.98828125
assert sensor_msg["acceleration z"] == 0.00390625
assert sensor_msg["acceleration"] == 255.9882812798037
assert sensor_msg["uptime"] == 6744171
assert sensor_msg["battery"] == 98
assert sensor_msg["switch"] == 0
assert sensor_msg["tilt"] == 0
assert sensor_msg["dropping"] == 0
assert sensor_msg["impact"] == 0
assert sensor_msg["impact x"] == 0
assert sensor_msg["impact y"] == 0
assert sensor_msg["impact z"] == 0
assert sensor_msg["rssi"] == -43
| [
"ble_monitor.ble_parser.BleParser"
] | [((459, 470), 'ble_monitor.ble_parser.BleParser', 'BleParser', ([], {}), '()\n', (468, 470), False, 'from ble_monitor.ble_parser import BleParser\n'), ((1776, 1787), 'ble_monitor.ble_parser.BleParser', 'BleParser', ([], {}), '()\n', (1785, 1787), False, 'from ble_monitor.ble_parser import BleParser\n')] |
# communicates the actor, the critic and the environment
from actor import Actor
from critic import Critic
class Confront():
def __init__(self, state_dim, actions_dim):
self.actor = Actor(state_dim, actions_dim)
self.critic = Critic(state_dim + actions_dim)
def act(self, state):
# Generate noisy-filled actions
noisy_actions = self.actor.act(state)
# Choose the action with better expected reward
choosed_action = self.critic.choose(noisy_actions, state)
# train the actor to choose the better action
self.actor.fit(state, choosed_action)
return choosed_action
def observe(self, action, state, reward, done):
# train the critic to improve expected reward
self.critic.observe(action, state, reward, done)
| [
"actor.Actor",
"critic.Critic"
] | [((196, 225), 'actor.Actor', 'Actor', (['state_dim', 'actions_dim'], {}), '(state_dim, actions_dim)\n', (201, 225), False, 'from actor import Actor\n'), ((248, 279), 'critic.Critic', 'Critic', (['(state_dim + actions_dim)'], {}), '(state_dim + actions_dim)\n', (254, 279), False, 'from critic import Critic\n')] |
from grtoolkit.Math import solveEqs
def Resistance(find, printEq=False, **kwargs):
"""variables:
R=resistance
p=density
l=length
A=cross sectional area"""
eq = list()
eq.append("Eq(R,p*l/A)")
return solveEqs(eq, find, printEq=printEq, **kwargs)
def InSeries(r_list):
"""Resistors connected in series is the sum of the individual resistances"""
return sum(r_list)
def InParallel(r_list):
sumOfInverse = sum([1/r for r in r_list])
return 1/sumOfInverse
def voltageDivision(v_in, r_list_ordered, showWork=False):
"""
Voltage is divided among the resistors in direct proportion to their resistances;
the larger the resistance, the larger the voltage drop.
"""
r_total = sum(r_list_ordered)
voltages = [r/r_total*v_in for r in r_list_ordered]
if showWork:
print("Resistor ordered voltage division: ", voltages)
print("Adjust directions as necessary after getting result.")
return voltages
def currentDivision(i_in, r_branch_list_ordered, showWork=False):
conductances = [Conductance(r) for r in r_branch_list_ordered]
g_total = sum(conductances)
currents = [g/g_total*i_in for g in conductances]
if showWork:
print("Branch ordered current division: ", currents)
print("Adjust directions as necessary after getting result.")
return currents
def delta2wye(Ra, Rb, Rc):
"""
''------RA-------''''R2'''''''''R3''''
'''dd''''''''dd'''''''' y'''''y'''''''
'''''RC''''RB''''''''''''''y''''''''''
'''''''d''d''''''''''''''''y''''''''''
''''''''dd'''''''''''''''''R1'''''''''
Returns R1, R2, R3
"""
Rt = Ra+Rb+Rc
R1 = Rb*Rc/Rt
R2 = Rc*Ra/Rt
R3 = Ra*Rb/Rt
return R1, R2, R3
def wye2delta(R1, R2, R3):
"""
''------RA-------''''R2'''''''''R3''''
'''dd''''''''dd'''''''' y'''''y'''''''
'''''RC''''RB''''''''''''''y''''''''''
'''''''d''d''''''''''''''''y''''''''''
''''''''dd'''''''''''''''''R1'''''''''
Returns Ra, Rb, Rc
"""
Rx = R1*R2 + R2*R3 + R3*R1
Ra = Rx/R1
Rb = Rx/R2
Rc = Rx/R3
return Ra, Rb, Rc | [
"grtoolkit.Math.solveEqs"
] | [((285, 330), 'grtoolkit.Math.solveEqs', 'solveEqs', (['eq', 'find'], {'printEq': 'printEq'}), '(eq, find, printEq=printEq, **kwargs)\n', (293, 330), False, 'from grtoolkit.Math import solveEqs\n')] |
import torch.nn as nn
import torch.optim as optim
import gym
import torch
from dqn import ReplayBuffer
from torch.distributions import Categorical, Normal
from torch.nn.functional import mse_loss
import numpy as np
from torch.optim.lr_scheduler import StepLR
from torch.optim.lr_scheduler import ReduceLROnPlateau
# In[]:
# TODO :
# 2. Fixing target
class Critic(nn.Module):
def __init__(self, input_size, output_size=1, hidden_size=12):
super(Critic, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU()
)
self.layer3 = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU()
)
self.output_layer = nn.Linear(hidden_size, output_size)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.output_layer(out)
return out
class QCritic(nn.Module):
def __init__(self, input_size, output_size, hidden_size=12):
super(QCritic, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU()
)
self.layer3 = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU()
)
self.output_layer = nn.Linear(hidden_size, output_size)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.output_layer(out)
return out
class Actor(nn.Module):
def __init__(self, input_size, output_size, hidden_size=12, continuous=False):
super(Actor, self).__init__()
self.continuous = continuous
self.layer1 = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU()
)
self.layer3 = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU()
)
if continuous:
# if its continuous action space then done use a softmax at the last layer
self.output_layer = nn.Sequential(
nn.Linear(hidden_size, output_size),
nn.Sigmoid() # sigmoid is needed to bring the output between 0 to 1, later in forward function we'll
# transform this to -1 to 1
)
else:
# else use a softmax
self.output_layer = nn.Sequential(
nn.Linear(hidden_size, output_size),
# TODO : Try out log here if any numerical instability occurs
nn.Softmax(dim=-1)
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.output_layer(out)
# transform the output between -1 to 1 for continuous action spaces
if self.continuous:
out = 2*out - 1
return out
def select_action(self, current_state):
"""
selects an action as per some decided exploration
:param current_state: the current state
:return: the chosen action and the log probility to act as the gradient
"""
if not self.continuous:
# if its not continuous action space then use epsilon greedy selection
probs = self(current_state) # probs is the probability of each of the discrete actions possible
# No gaussian exploration can be performed since the actions are discrete and not continuous
# gaussian would make sense and feasibility only when actions are continuous
m = Categorical(probs)
action = m.sample()
return action, m.log_prob(action)
else:
# use gaussian or other form of exploration in continuous action space
action = self(current_state) # action is the action predicted for this current_state
# now time to explore, so sample from a gaussian distribution centered at action
# TODO : This scale can be controlled, its the variance around the mean action
m = Normal(loc=action, scale=torch.Tensor([0.1]))
explored_action = m.sample()
# keep sampling new actions until it is within -1 to +1.
while not (explored_action <= +1 and explored_action >= -1):
explored_action = m.sample()
# Note that the log prob should be at the original action, not at the exploration since the gradient used
# will be the gradient of actor's prediction, not of actor's exploration
return explored_action, m.log_prob(action)
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.distributions.Categorical",
"torch.nn.Softmax",
"torch.Tensor",
"torch.nn.Linear"
] | [((867, 902), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (876, 902), True, 'import torch.nn as nn\n'), ((1591, 1626), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1600, 1626), True, 'import torch.nn as nn\n'), ((535, 569), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (544, 569), True, 'import torch.nn as nn\n'), ((583, 592), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (590, 592), True, 'import torch.nn as nn\n'), ((652, 687), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (661, 687), True, 'import torch.nn as nn\n'), ((701, 710), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (708, 710), True, 'import torch.nn as nn\n'), ((770, 805), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (779, 805), True, 'import torch.nn as nn\n'), ((819, 828), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (826, 828), True, 'import torch.nn as nn\n'), ((1259, 1293), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (1268, 1293), True, 'import torch.nn as nn\n'), ((1307, 1316), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1314, 1316), True, 'import torch.nn as nn\n'), ((1376, 1411), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1385, 1411), True, 'import torch.nn as nn\n'), ((1425, 1434), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1432, 1434), True, 'import torch.nn as nn\n'), ((1494, 1529), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1503, 1529), True, 'import torch.nn as nn\n'), ((1543, 1552), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1550, 1552), True, 'import torch.nn as nn\n'), ((2034, 2068), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (2043, 2068), True, 'import torch.nn as nn\n'), ((2082, 2091), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2089, 2091), True, 'import torch.nn as nn\n'), ((2151, 2186), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (2160, 2186), True, 'import torch.nn as nn\n'), ((2200, 2209), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2207, 2209), True, 'import torch.nn as nn\n'), ((2269, 2304), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (2278, 2304), True, 'import torch.nn as nn\n'), ((2318, 2327), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2325, 2327), True, 'import torch.nn as nn\n'), ((3992, 4010), 'torch.distributions.Categorical', 'Categorical', (['probs'], {}), '(probs)\n', (4003, 4010), False, 'from torch.distributions import Categorical, Normal\n'), ((2511, 2546), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (2520, 2546), True, 'import torch.nn as nn\n'), ((2564, 2576), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2574, 2576), True, 'import torch.nn as nn\n'), ((2833, 2868), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (2842, 2868), True, 'import torch.nn as nn\n'), ((2964, 2982), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (2974, 2982), True, 'import torch.nn as nn\n'), ((4508, 4527), 'torch.Tensor', 'torch.Tensor', (['[0.1]'], {}), '([0.1])\n', (4520, 4527), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-12 10:55
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
import tuiuiu.contrib.routablepage.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('tuiuiucore', '0024_alter_page_content_type_on_delete_behaviour'),
]
operations = [
migrations.CreateModel(
name='RoutablePageTest',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='tuiuiucore.Page')),
],
options={
'abstract': False,
},
bases=(tuiuiu.contrib.routablepage.models.RoutablePageMixin, 'tuiuiucore.page'),
),
]
| [
"django.db.models.OneToOneField"
] | [((534, 703), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""tuiuiucore.Page"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'tuiuiucore.Page')\n", (554, 703), False, 'from django.db import migrations, models\n')] |
from cuda import cuda, nvrtc
import numpy as np
import util.RotRep as Rot
import util.Simulation as Gsim
from config import Config
import h5py
import time
import gpuarray
from collections import Counter
from scipy.sparse import coo_matrix
from scipy.ndimage import gaussian_filter
from run_cuda import run_cuda_function
from cuda_python_compile import create_module
class Initializer:
def __init__(self, Cfg):
module = create_module('strain_device_mjw_3.cu')
self.sim_strain_func = cuda.cuModuleGetFunction(module, b'Simulate_for_Strain')[1]
self.sim_pos_func = cuda.cuModuleGetFunction(module, b'Simulate_for_Pos')[1]
self.KL_total_func = cuda.cuModuleGetFunction(module, b'KL_total')[1]
self.KL_diff_func = cuda.cuModuleGetFunction(module, b'KL_diff')[1]
self.One_func = cuda.cuModuleGetFunction(module, b'ChangeOne')[1]
self.hit_func = cuda.cuModuleGetFunction(module, b'Hit_Score')[1]
self.Cfg = Cfg
self.mode = Cfg.mode
self.ImLoaded = False
self.GsLoaded = False
self.GsGenerated = False
self.windowD = gpuarray.to_gpu(np.array(self.Cfg.window).astype(np.int32))
# Det parameters
self.Det = Gsim.Detector(psizeJ=Cfg.pixelSize / 1000.0,
psizeK=Cfg.pixelSize / 1000.0,
J=Cfg.JCenter,
K=Cfg.KCenter,
trans=np.array([Cfg.Ldistance, 0, 0]),
tilt=Rot.EulerZXZ2Mat(np.array(Cfg.tilt) / 180.0 * np.pi))
afDetInfoH = np.concatenate(
[[Cfg.JPixelNum, Cfg.KPixelNum, Cfg.pixelSize / 1000.0, Cfg.pixelSize / 1000.0],
self.Det.CoordOrigin,
self.Det.Norm,
self.Det.Jvector,
self.Det.Kvector]).astype(np.float32)
self.afDetInfoD = gpuarray.to_gpu(afDetInfoH)
# sample parameters
# hack!! only for Hexagonal
self.sample = Gsim.CrystalStr()
self.sample.PrimA = Cfg.lattice[0] * np.array([1, 0, 0])
self.sample.PrimB = Cfg.lattice[1] * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.sample.PrimC = Cfg.lattice[2] * np.array([0, 0, 1])
Atoms = Cfg.atoms
for ii in range(len(Atoms)):
self.sample.addAtom(list(map(eval, Atoms[ii][0:3])), Atoms[ii][3])
self.sample.getRecipVec()
self.sample.getGs(Cfg.maxQ)
if self.mode == 'rec':
f = h5py.File(Cfg.peakFile, 'r')
# Lim for window position
self.LimH = np.array(f['limits']).astype(np.int32)
self.LimD = gpuarray.to_gpu(self.LimH)
# whichOmega for choosing between omega1 and omega2
self.whichOmega = np.array(f['whichOmega']).astype(np.int32)
self.whichOmegaD = gpuarray.to_gpu(self.whichOmega)
# MaxInt for normalize the weight of each spot
# (because different spots have very different intensity but we want them equal weighted)
self.MaxInt = np.array(f['MaxInt'], dtype=np.float32)
self.MaxIntD = gpuarray.to_gpu(self.MaxInt)
self.Gs = np.array(f['Gs'], dtype=np.float32)
self.NumG = len(self.Gs)
print(self.NumG)
self.orienM = np.array(f['OrienM'])
self.avg_distortion = np.array(f['avg_distortion'])
self.GsGenerated = True
# transfer the ExpImgs and all Gs to texture memory
def loadIm(self):
f = h5py.File(self.Cfg.peakFile, 'r')
AllIm = np.zeros(shape=(self.Cfg.window[1], self.Cfg.window[0], self.NumG * self.Cfg.window[2]), dtype=np.uint32, order='F')
for ii in range(self.NumG):
tmp = np.array(f['Imgs']['Im{0:d}'.format(ii)])
AllIm[:tmp.shape[0], :tmp.shape[1], ii * self.Cfg.window[2]:(ii + 1) * self.Cfg.window[2]] = tmp
self.ImLoaded = True
Im = np.array(AllIm).astype(np.uint32)
self.tcExp = gpuarray.to_gpu(Im.ravel())
def loadGs(self):
if not self.GsGenerated:
raise RuntimeError('Gs are not generated yet')
self.tG = gpuarray.to_gpu(np.array(np.transpose(self.Gs).astype(np.float32),order='F'))
self.GsLoaded = True
def generateGs(self, pos, orien, avg_distortion):
self.pos = np.array(pos)
self.orien = np.array(orien)
self.orienM = Rot.EulerZXZ2Mat(self.orien / 180.0 * np.pi)
self.avg_distortion = avg_distortion
Ps, self.Gs, Info = Gsim.GetProjectedVertex(self.Det,
self.sample, self.avg_distortion.dot(self.orienM),
self.Cfg.etalimit / 180.0 * np.pi,
self.pos, getPeaksInfo=True,
omegaL=self.Cfg.omgRange[0],
omegaU=self.Cfg.omgRange[1], energy=self.Cfg.energy)
self.NumG = len(self.Gs)
Lims = []
dx = 150
dy = 80
for ii in range(self.NumG):
omegid = int((self.Cfg.omgRange[2] - Ps[ii, 2]) / self.Cfg.omgInterval) - 22 # becuase store 45 frames
if omegid < 0:
omegid += int(self.Cfg.omgRange[2] / self.Cfg.omgInterval)
elif omegid >= int(self.Cfg.omgRange[2] / self.Cfg.omgInterval):
omegid -= int(self.Cfg.omgRange[2] / self.Cfg.omgInterval)
x1 = int(2047 - Ps[ii, 0] - dx)
y1 = int(Ps[ii, 1] - dy)
x2 = x1 + 2 * dx
y2 = y1 + 2 * dy
# ignore camera boundary limit, I'm just lazy, will correct it later
Lims.append((x1, x2, y1, y2, omegid))
self.LimH = np.array(Lims, dtype=np.int32)
self.LimD = gpuarray.to_gpu(self.LimH)
# whichOmega for choosing between omega1 and omega2
self.whichOmega = np.zeros(len(Lims), dtype=np.int32)
for ii in range(len(Lims)):
if Info[ii]['WhichOmega'] == 'b':
self.whichOmega[ii] = 2
else:
self.whichOmega[ii] = 1
self.whichOmegaD = gpuarray.to_gpu(self.whichOmega)
self.GsGenerated = True
def MoveDet(self, dJ=0, dK=0, dD=0, dT=np.eye(3)):
self.Det.Move(dJ, dK, np.array([dD, 0, 0]), dT)
afDetInfoH = np.concatenate(
[[self.Cfg.JPixelNum, self.Cfg.KPixelNum,
self.Cfg.pixelSize / 1000.0, self.Cfg.pixelSize / 1000.0],
self.Det.CoordOrigin,
self.Det.Norm,
self.Det.Jvector,
self.Det.Kvector]).astype(np.float32)
self.afDetInfoD = gpuarray.to_gpu(afDetInfoH)
def ResetDet(self):
self.Det.Reset()
afDetInfoH = np.concatenate(
[[self.Cfg.JPixelNum, self.Cfg.KPixelNum,
self.Cfg.pixelSize / 1000.0, self.Cfg.pixelSize / 1000.0],
self.Det.CoordOrigin,
self.Det.Norm,
self.Det.Jvector,
self.Det.Kvector]).astype(np.float32)
self.afDetInfoD = gpuarray.to_gpu(afDetInfoH)
def sim_pos_wrapper(self, xs, ys, ss):
NumD = len(xs)
if self.GsLoaded == False:
self.loadGs()
XD = gpuarray.empty(self.NumG * NumD, dtype=np.int32)
YD = gpuarray.empty(self.NumG * NumD, dtype=np.int32)
OffsetD = gpuarray.empty(self.NumG * NumD, dtype=np.int32)
MaskD = gpuarray.empty(self.NumG * NumD, dtype=np.bool_)
TrueMaskD = gpuarray.empty(self.NumG * NumD, dtype=np.bool_)
xsD = gpuarray.to_gpu(xs.astype(np.float32))
ysD = gpuarray.to_gpu(ys.astype(np.float32))
ssD = gpuarray.to_gpu(ss.ravel(order='C').astype(np.float32))
args = [XD, YD, OffsetD, MaskD, TrueMaskD,
xsD, ysD, self.afDetInfoD, ssD,
self.whichOmegaD, np.array(NumD).astype(np.int32), np.array(self.NumG).astype(np.int32),
np.array(self.Cfg.energy).astype(np.float32), np.array(self.Cfg.window[2]).astype(np.int32), self.LimD,
np.array(5).astype(np.int32), np.array(self.Cfg.omgInterval).astype(np.float32),
self.tG]
err = run_cuda_function(self.sim_pos_func,args,(NumD,1,1),(self.NumG,1,1))
xtmp = XD.get().reshape((-1, self.NumG))
ytmp = YD.get().reshape((-1, self.NumG))
otmp = OffsetD.get().reshape((-1, self.NumG))
maskH = MaskD.get().reshape(-1, self.NumG)
#, ytmp, otmp, maskH
return xtmp, ytmp, otmp, maskH
def simMap(self, tmpxx, tmpyy, AllMaxS, blur=False, dtype=np.uint32):
if self.GsLoaded == False:
self.loadGs()
xtmp, ytmp, otmp, maskH = self.sim_pos_wrapper(tmpxx, tmpyy, AllMaxS)
print(xtmp.shape)
res = np.zeros(shape=(self.Cfg.window[1], self.Cfg.window[0], self.NumG * self.Cfg.window[2]), dtype=dtype)
for ii in range(self.NumG):
tmpMask = maskH[:, ii]
tmpX = xtmp[tmpMask, ii]
tmpY = ytmp[tmpMask, ii]
tmpO = otmp[tmpMask, ii]
myMaps = np.zeros((self.Cfg.window[2], self.LimH[ii][3] - self.LimH[ii][2], self.LimH[ii][1] - self.LimH[ii][0]),
dtype=dtype)
for jj in range(self.Cfg.window[2]):
idx = np.where(tmpO == jj)[0]
if len(idx) == 0:
myMaps[jj] = 0
continue
myCounter = Counter(zip(tmpX[idx], tmpY[idx]))
val = list(myCounter.values())
xx, yy = zip(*(myCounter.keys()))
tmp = coo_matrix((val, (yy, xx)),
shape=(
self.LimH[ii][3] - self.LimH[ii][2], self.LimH[ii][1] - self.LimH[ii][0])).toarray()
if blur:
myMaps[jj] = gaussian_filter(tmp, sigma=1, mode='nearest', truncate=4)
else:
myMaps[jj] = tmp
myMaps = np.moveaxis(myMaps, 0, 2)
res[:myMaps.shape[0], :myMaps.shape[1], ii * self.Cfg.window[2]:(ii + 1) * self.Cfg.window[2]] = myMaps
return res,[xtmp,ytmp,otmp,maskH]
# def pair_pix_to_vox(self,xx,yy,ss):
# if not self.ImLoaded:
# self.loadIm()
# if not self.GsLoaded:
# self.loadGs()
# for x,y,s in zip(xx,yy,ss):
# XD = gpuarray.empty(self.NumG , dtype=np.int32)
# YD = gpuarray.empty(self.NumG , dtype=np.int32)
# OffsetD = gpuarray.empty(self.NumG, dtype=np.int32)
# MaskD = gpuarray.empty(self.NumG, dtype=np.bool_)
# TrueMaskD = gpuarray.empty(self.NumG, dtype=np.bool_)
# S_gpu = gpuarray.to_gpu(s.ravel(order='C').astype(np.float32))
# x = np.array(x).astype(np.float32)
# y = np.array(y).astype(np.float32)
# args = [XD, YD, OffsetD, MaskD, TrueMaskD,
# x, y, self.afDetInfoD, S_gpu,
# self.whichOmegaD, np.array(1).astype(np.int32), np.array(self.NumG).astype(np.int32),
# np.array(self.Cfg.energy).astype(np.float32), np.array(self.Cfg.window[2]).astype(np.int32), self.LimD,
# np.array(5).astype(np.int32), np.array(self.Cfg.omgInterval).astype(np.float32),
# self.tG]
# err = run_cuda_function(self.sim_strain_func,args,(1,1,1),(self.NumG,1,1))
# print(err)
# return XD.get(),YD.get() | [
"util.RotRep.EulerZXZ2Mat",
"gpuarray.to_gpu",
"numpy.array",
"scipy.ndimage.gaussian_filter",
"numpy.sin",
"numpy.moveaxis",
"cuda.cuda.cuModuleGetFunction",
"numpy.where",
"numpy.concatenate",
"scipy.sparse.coo_matrix",
"util.Simulation.CrystalStr",
"numpy.eye",
"h5py.File",
"gpuarray.empty",
"numpy.cos",
"numpy.transpose",
"run_cuda.run_cuda_function",
"cuda_python_compile.create_module",
"numpy.zeros"
] | [((444, 483), 'cuda_python_compile.create_module', 'create_module', (['"""strain_device_mjw_3.cu"""'], {}), "('strain_device_mjw_3.cu')\n", (457, 483), False, 'from cuda_python_compile import create_module\n'), ((1929, 1956), 'gpuarray.to_gpu', 'gpuarray.to_gpu', (['afDetInfoH'], {}), '(afDetInfoH)\n', (1944, 1956), False, 'import gpuarray\n'), ((2044, 2061), 'util.Simulation.CrystalStr', 'Gsim.CrystalStr', ([], {}), '()\n', (2059, 2061), True, 'import util.Simulation as Gsim\n'), ((3586, 3619), 'h5py.File', 'h5py.File', (['self.Cfg.peakFile', '"""r"""'], {}), "(self.Cfg.peakFile, 'r')\n", (3595, 3619), False, 'import h5py\n'), ((3636, 3757), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.Cfg.window[1], self.Cfg.window[0], self.NumG * self.Cfg.window[2])', 'dtype': 'np.uint32', 'order': '"""F"""'}), "(shape=(self.Cfg.window[1], self.Cfg.window[0], self.NumG * self.\n Cfg.window[2]), dtype=np.uint32, order='F')\n", (3644, 3757), True, 'import numpy as np\n'), ((4437, 4450), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (4445, 4450), True, 'import numpy as np\n'), ((4472, 4487), 'numpy.array', 'np.array', (['orien'], {}), '(orien)\n', (4480, 4487), True, 'import numpy as np\n'), ((4510, 4554), 'util.RotRep.EulerZXZ2Mat', 'Rot.EulerZXZ2Mat', (['(self.orien / 180.0 * np.pi)'], {}), '(self.orien / 180.0 * np.pi)\n', (4526, 4554), True, 'import util.RotRep as Rot\n'), ((5900, 5930), 'numpy.array', 'np.array', (['Lims'], {'dtype': 'np.int32'}), '(Lims, dtype=np.int32)\n', (5908, 5930), True, 'import numpy as np\n'), ((5951, 5977), 'gpuarray.to_gpu', 'gpuarray.to_gpu', (['self.LimH'], {}), '(self.LimH)\n', (5966, 5977), False, 'import gpuarray\n'), ((6307, 6339), 'gpuarray.to_gpu', 'gpuarray.to_gpu', (['self.whichOmega'], {}), '(self.whichOmega)\n', (6322, 6339), False, 'import gpuarray\n'), ((6415, 6424), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6421, 6424), True, 'import numpy as np\n'), ((6818, 6845), 'gpuarray.to_gpu', 'gpuarray.to_gpu', (['afDetInfoH'], {}), '(afDetInfoH)\n', (6833, 6845), False, 'import gpuarray\n'), ((7231, 7258), 'gpuarray.to_gpu', 'gpuarray.to_gpu', (['afDetInfoH'], {}), '(afDetInfoH)\n', (7246, 7258), False, 'import gpuarray\n'), ((7399, 7447), 'gpuarray.empty', 'gpuarray.empty', (['(self.NumG * NumD)'], {'dtype': 'np.int32'}), '(self.NumG * NumD, dtype=np.int32)\n', (7413, 7447), False, 'import gpuarray\n'), ((7461, 7509), 'gpuarray.empty', 'gpuarray.empty', (['(self.NumG * NumD)'], {'dtype': 'np.int32'}), '(self.NumG * NumD, dtype=np.int32)\n', (7475, 7509), False, 'import gpuarray\n'), ((7528, 7576), 'gpuarray.empty', 'gpuarray.empty', (['(self.NumG * NumD)'], {'dtype': 'np.int32'}), '(self.NumG * NumD, dtype=np.int32)\n', (7542, 7576), False, 'import gpuarray\n'), ((7593, 7641), 'gpuarray.empty', 'gpuarray.empty', (['(self.NumG * NumD)'], {'dtype': 'np.bool_'}), '(self.NumG * NumD, dtype=np.bool_)\n', (7607, 7641), False, 'import gpuarray\n'), ((7662, 7710), 'gpuarray.empty', 'gpuarray.empty', (['(self.NumG * NumD)'], {'dtype': 'np.bool_'}), '(self.NumG * NumD, dtype=np.bool_)\n', (7676, 7710), False, 'import gpuarray\n'), ((8437, 8512), 'run_cuda.run_cuda_function', 'run_cuda_function', (['self.sim_pos_func', 'args', '(NumD, 1, 1)', '(self.NumG, 1, 1)'], {}), '(self.sim_pos_func, args, (NumD, 1, 1), (self.NumG, 1, 1))\n', (8454, 8512), False, 'from run_cuda import run_cuda_function\n'), ((9056, 9162), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.Cfg.window[1], self.Cfg.window[0], self.NumG * self.Cfg.window[2])', 'dtype': 'dtype'}), '(shape=(self.Cfg.window[1], self.Cfg.window[0], self.NumG * self.\n Cfg.window[2]), dtype=dtype)\n', (9064, 9162), True, 'import numpy as np\n'), ((540, 596), 'cuda.cuda.cuModuleGetFunction', 'cuda.cuModuleGetFunction', (['module', "b'Simulate_for_Strain'"], {}), "(module, b'Simulate_for_Strain')\n", (564, 596), False, 'from cuda import cuda, nvrtc\n'), ((628, 681), 'cuda.cuda.cuModuleGetFunction', 'cuda.cuModuleGetFunction', (['module', "b'Simulate_for_Pos'"], {}), "(module, b'Simulate_for_Pos')\n", (652, 681), False, 'from cuda import cuda, nvrtc\n'), ((714, 759), 'cuda.cuda.cuModuleGetFunction', 'cuda.cuModuleGetFunction', (['module', "b'KL_total'"], {}), "(module, b'KL_total')\n", (738, 759), False, 'from cuda import cuda, nvrtc\n'), ((791, 835), 'cuda.cuda.cuModuleGetFunction', 'cuda.cuModuleGetFunction', (['module', "b'KL_diff'"], {}), "(module, b'KL_diff')\n", (815, 835), False, 'from cuda import cuda, nvrtc\n'), ((863, 909), 'cuda.cuda.cuModuleGetFunction', 'cuda.cuModuleGetFunction', (['module', "b'ChangeOne'"], {}), "(module, b'ChangeOne')\n", (887, 909), False, 'from cuda import cuda, nvrtc\n'), ((937, 983), 'cuda.cuda.cuModuleGetFunction', 'cuda.cuModuleGetFunction', (['module', "b'Hit_Score'"], {}), "(module, b'Hit_Score')\n", (961, 983), False, 'from cuda import cuda, nvrtc\n'), ((2107, 2126), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2115, 2126), True, 'import numpy as np\n'), ((2277, 2296), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2285, 2296), True, 'import numpy as np\n'), ((2557, 2585), 'h5py.File', 'h5py.File', (['Cfg.peakFile', '"""r"""'], {}), "(Cfg.peakFile, 'r')\n", (2566, 2585), False, 'import h5py\n'), ((2711, 2737), 'gpuarray.to_gpu', 'gpuarray.to_gpu', (['self.LimH'], {}), '(self.LimH)\n', (2726, 2737), False, 'import gpuarray\n'), ((2906, 2938), 'gpuarray.to_gpu', 'gpuarray.to_gpu', (['self.whichOmega'], {}), '(self.whichOmega)\n', (2921, 2938), False, 'import gpuarray\n'), ((3127, 3166), 'numpy.array', 'np.array', (["f['MaxInt']"], {'dtype': 'np.float32'}), "(f['MaxInt'], dtype=np.float32)\n", (3135, 3166), True, 'import numpy as np\n'), ((3194, 3222), 'gpuarray.to_gpu', 'gpuarray.to_gpu', (['self.MaxInt'], {}), '(self.MaxInt)\n', (3209, 3222), False, 'import gpuarray\n'), ((3245, 3280), 'numpy.array', 'np.array', (["f['Gs']"], {'dtype': 'np.float32'}), "(f['Gs'], dtype=np.float32)\n", (3253, 3280), True, 'import numpy as np\n'), ((3373, 3394), 'numpy.array', 'np.array', (["f['OrienM']"], {}), "(f['OrienM'])\n", (3381, 3394), True, 'import numpy as np\n'), ((3429, 3458), 'numpy.array', 'np.array', (["f['avg_distortion']"], {}), "(f['avg_distortion'])\n", (3437, 3458), True, 'import numpy as np\n'), ((6457, 6477), 'numpy.array', 'np.array', (['[dD, 0, 0]'], {}), '([dD, 0, 0])\n', (6465, 6477), True, 'import numpy as np\n'), ((9379, 9501), 'numpy.zeros', 'np.zeros', (['(self.Cfg.window[2], self.LimH[ii][3] - self.LimH[ii][2], self.LimH[ii][1] -\n self.LimH[ii][0])'], {'dtype': 'dtype'}), '((self.Cfg.window[2], self.LimH[ii][3] - self.LimH[ii][2], self.\n LimH[ii][1] - self.LimH[ii][0]), dtype=dtype)\n', (9387, 9501), True, 'import numpy as np\n'), ((10333, 10358), 'numpy.moveaxis', 'np.moveaxis', (['myMaps', '(0)', '(2)'], {}), '(myMaps, 0, 2)\n', (10344, 10358), True, 'import numpy as np\n'), ((1503, 1534), 'numpy.array', 'np.array', (['[Cfg.Ldistance, 0, 0]'], {}), '([Cfg.Ldistance, 0, 0])\n', (1511, 1534), True, 'import numpy as np\n'), ((1649, 1828), 'numpy.concatenate', 'np.concatenate', (['[[Cfg.JPixelNum, Cfg.KPixelNum, Cfg.pixelSize / 1000.0, Cfg.pixelSize / \n 1000.0], self.Det.CoordOrigin, self.Det.Norm, self.Det.Jvector, self.\n Det.Kvector]'], {}), '([[Cfg.JPixelNum, Cfg.KPixelNum, Cfg.pixelSize / 1000.0, Cfg.\n pixelSize / 1000.0], self.Det.CoordOrigin, self.Det.Norm, self.Det.\n Jvector, self.Det.Kvector])\n', (1663, 1828), True, 'import numpy as np\n'), ((4001, 4016), 'numpy.array', 'np.array', (['AllIm'], {}), '(AllIm)\n', (4009, 4016), True, 'import numpy as np\n'), ((6504, 6702), 'numpy.concatenate', 'np.concatenate', (['[[self.Cfg.JPixelNum, self.Cfg.KPixelNum, self.Cfg.pixelSize / 1000.0, self\n .Cfg.pixelSize / 1000.0], self.Det.CoordOrigin, self.Det.Norm, self.Det\n .Jvector, self.Det.Kvector]'], {}), '([[self.Cfg.JPixelNum, self.Cfg.KPixelNum, self.Cfg.pixelSize /\n 1000.0, self.Cfg.pixelSize / 1000.0], self.Det.CoordOrigin, self.Det.\n Norm, self.Det.Jvector, self.Det.Kvector])\n', (6518, 6702), True, 'import numpy as np\n'), ((6917, 7115), 'numpy.concatenate', 'np.concatenate', (['[[self.Cfg.JPixelNum, self.Cfg.KPixelNum, self.Cfg.pixelSize / 1000.0, self\n .Cfg.pixelSize / 1000.0], self.Det.CoordOrigin, self.Det.Norm, self.Det\n .Jvector, self.Det.Kvector]'], {}), '([[self.Cfg.JPixelNum, self.Cfg.KPixelNum, self.Cfg.pixelSize /\n 1000.0, self.Cfg.pixelSize / 1000.0], self.Det.CoordOrigin, self.Det.\n Norm, self.Det.Jvector, self.Det.Kvector])\n', (6931, 7115), True, 'import numpy as np\n'), ((1171, 1196), 'numpy.array', 'np.array', (['self.Cfg.window'], {}), '(self.Cfg.window)\n', (1179, 1196), True, 'import numpy as np\n'), ((2182, 2203), 'numpy.cos', 'np.cos', (['(np.pi * 2 / 3)'], {}), '(np.pi * 2 / 3)\n', (2188, 2203), True, 'import numpy as np\n'), ((2205, 2226), 'numpy.sin', 'np.sin', (['(np.pi * 2 / 3)'], {}), '(np.pi * 2 / 3)\n', (2211, 2226), True, 'import numpy as np\n'), ((2648, 2669), 'numpy.array', 'np.array', (["f['limits']"], {}), "(f['limits'])\n", (2656, 2669), True, 'import numpy as np\n'), ((2832, 2857), 'numpy.array', 'np.array', (["f['whichOmega']"], {}), "(f['whichOmega'])\n", (2840, 2857), True, 'import numpy as np\n'), ((8058, 8072), 'numpy.array', 'np.array', (['NumD'], {}), '(NumD)\n', (8066, 8072), True, 'import numpy as np\n'), ((8091, 8110), 'numpy.array', 'np.array', (['self.NumG'], {}), '(self.NumG)\n', (8099, 8110), True, 'import numpy as np\n'), ((8159, 8184), 'numpy.array', 'np.array', (['self.Cfg.energy'], {}), '(self.Cfg.energy)\n', (8167, 8184), True, 'import numpy as np\n'), ((8205, 8233), 'numpy.array', 'np.array', (['self.Cfg.window[2]'], {}), '(self.Cfg.window[2])\n', (8213, 8233), True, 'import numpy as np\n'), ((8294, 8305), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (8302, 8305), True, 'import numpy as np\n'), ((8324, 8354), 'numpy.array', 'np.array', (['self.Cfg.omgInterval'], {}), '(self.Cfg.omgInterval)\n', (8332, 8354), True, 'import numpy as np\n'), ((9611, 9631), 'numpy.where', 'np.where', (['(tmpO == jj)'], {}), '(tmpO == jj)\n', (9619, 9631), True, 'import numpy as np\n'), ((10178, 10235), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['tmp'], {'sigma': '(1)', 'mode': '"""nearest"""', 'truncate': '(4)'}), "(tmp, sigma=1, mode='nearest', truncate=4)\n", (10193, 10235), False, 'from scipy.ndimage import gaussian_filter\n'), ((4243, 4264), 'numpy.transpose', 'np.transpose', (['self.Gs'], {}), '(self.Gs)\n', (4255, 4264), True, 'import numpy as np\n'), ((9916, 10030), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(val, (yy, xx))'], {'shape': '(self.LimH[ii][3] - self.LimH[ii][2], self.LimH[ii][1] - self.LimH[ii][0])'}), '((val, (yy, xx)), shape=(self.LimH[ii][3] - self.LimH[ii][2], \n self.LimH[ii][1] - self.LimH[ii][0]))\n', (9926, 10030), False, 'from scipy.sparse import coo_matrix\n'), ((1591, 1609), 'numpy.array', 'np.array', (['Cfg.tilt'], {}), '(Cfg.tilt)\n', (1599, 1609), True, 'import numpy as np\n')] |
import glob
import distutils.dir_util
import logging
import os
import shutil
import re
import magic
logger = logging.getLogger( "chibi.file.chibi_path" )
class Chibi_path( str ):
def __new__( cls, *args, chibi_file_class=None, **kw ):
args_2 = []
for a in args:
if '~' in a:
a = os.path.expanduser( a )
args_2.append( a )
result = str.__new__( cls, *args_2, **kw )
result._chibi_file_class = chibi_file_class
return result
def __add__( self, other ):
"""
une el path con otro path o una cadena
Parameters
==========
other: str or Chibi_path
Returns
=======
Chibi_path
"""
if isinstance( other, self.__class__ ):
if self.is_a_file:
return self.dir_name + other
return type( self )( os.path.join( str( self ), str( other ) ) )
if isinstance( other, str ):
return self + self.__class__( other )
def __eq__( self, other ):
if isinstance( other, Chibi_path ):
return str( self ) == str( other )
if isinstance( other, str ):
return str( self ) == other
return False
def __hash__( self ):
return hash( str( self ) )
def __contains__( self, other ):
if isinstance( other, Chibi_path ):
return other.startswith( self )
else:
return super().__contains__( other )
@property
def is_a_folder( self ):
"""
es una carpeta
"""
return os.path.isdir( self )
@property
def is_a_file( self ):
"""
es un archivo
"""
from chibi.file.snippets import is_a_file
return is_a_file( self )
@property
def is_glob( self ):
return glob.has_magic( self )
@property
def dir_name( self ):
"""
regresa la carpeta padre
"""
from chibi.file.snippets import file_dir
return type( self )( os.path.dirname( str( self ) ) )
return self.__class__( file_dir( self ) )
@property
def base_name( self ):
"""
regresa el nombre del archivo o de la carpeta
"""
return Chibi_path( os.path.basename( self ) )
@property
def file_name( self ):
"""
regresa el nombre del archivo sin la extencion
"""
file_name, ext = os.path.splitext( self.base_name )
return file_name
def open( self, chibi_file_class=None, encoding=None, newline=None ):
"""
abre el archivo usando un chibi file
"""
if self.is_a_folder:
raise NotImplementedError
if chibi_file_class is None:
if self._chibi_file_class is None:
from .file import Chibi_file
chibi_file_class = Chibi_file
else:
chibi_file_class = self._chibi_file_class
return chibi_file_class( self, encoding=encoding, newline=newline )
def relative_to( self, root ):
from .snippets import get_relative_path
return type( self )( get_relative_path( self, root=root ) )
def mkdir( self, **kw ):
"""
crea una carpeta en la direcion del chibi path
"""
try:
os.makedirs( self )
logger.info( "se creo el directorio '{}'".format( self ) )
except OSError:
pass
if kw:
logger.warning(
"mkdir de chibi path recibio parametros {}".format( kw ) )
def move( self, dest ):
"""
move the chibi path al destino
"""
dest = Chibi_path( dest )
if self.is_a_file:
if dest.is_a_folder:
dest += self.base_name
shutil.move( str( self ), str( dest ) )
logger.info( "{} -> {}".format( self, dest ) )
elif self.is_a_folder:
shutil.move( str( self ), str( dest ) )
logger.info( "{} -> {}".format( self, dest ) )
elif self.is_glob:
if not dest.exists:
dest.mkdir()
if dest.is_a_folder:
return [
f.move( dest + f.base_name ) for f in self.expand ]
else:
raise NotImplementedError(
"el destino no es un folder y la src "
"es un glob '{self}'" )
def copy( self, dest, **kw ):
"""
copia el archivo o carpeta al destino
"""
from.snippets import copy
dest = Chibi_path( dest )
if self.is_a_file:
if dest.is_a_folder:
dest += self.base_name
copy( self, dest, **kw )
return Chibi_path( dest )
elif self.is_a_folder:
if dest.is_a_file:
raise NotImplementedError(
"no se puede copiar un folder dentro de un archivo" )
distutils.dir_util.copy_tree( str( self ), str( dest ) )
return Chibi_path( dest )
elif self.is_glob:
if not dest.exists:
dest.mkdir()
if dest.is_a_folder:
return [ f.copy( dest + f.base_name ) for f in self.expand ]
else:
raise NotImplementedError(
"el destino no es un folder y la src "
"es un glob '{self}'" )
if not self.exists:
raise OSError( f"the file '{self}' not exists" )
else:
raise NotImplementedError(
f"no esta implementado el copy si "
f"no es un archivo o folder '{self}'" )
def delete( self ):
"""
elimina el archivo o la carpeta
"""
from.snippets import delete
delete( str( self ) )
logger.info( 'delete "{}"'.format( self ) )
def chown(
self, verbose=True, user_name=None, group_name=None,
recursive=False ):
"""
cambia el duano del archivo o carpeta
"""
from chibi.file.snippets import chown
chown(
self, user_name=user_name, group_name=group_name,
recursive=recursive, verbose=verbose )
def chmod( self, mod ):
"""
cambia los attributos del archivo o carpeta
"""
os.chmod( str( self ), mod )
@property
def properties( self ):
from chibi.file.snippets import stat
prop = stat( self )
prop.mime = magic.Magic( mime=True ).from_file( self )
prop.extension = os.path.splitext( self )[1][1:]
return prop
@property
def extension( self ):
"""
regresa la extencion del archivo
"""
if self.is_a_file:
return self.properties.extension
else:
raise NotImplementedError
def replace_extensions( self, *extensions ):
"""
cambia la extencion del archivo
"""
file_name, ext = os.path.splitext( self )
extensions = ".".join( extensions )
file_name = ".".join( ( file_name, extensions ) )
return type( self )( file_name )
def add_extensions( self, *extensions ):
"""
agrega mas extenciones
"""
file_name, ext = os.path.splitext( self )
extensions = ".".join( extensions )
file_name = ".".join( ( file_name, ext + extensions ) )
return type( self )( file_name )
def ls( self, dirs=True, files=True ):
"""
regresa un generador con el listado de archivos y carpetas
"""
from .snippets import ls, ls_only_dir, ls_only_files
if dirs and files:
return ls( self )
elif dirs and not files:
return ls_only_dir( self )
elif not dirs and files:
return ls_only_files( self )
else:
raise NotImplementedError
def find( self, search_term=".*", dirs=True, files=True ):
"""
busca archivos y carpetas usando una exprecion regular
"""
if self.is_a_file:
raise NotImplementedError(
"no esta implementa buscar en un archivo" )
from .snippets import find, find_only_files, find_only_folders
if dirs and files:
return find( self, search_term )
elif dirs and not files:
return find_only_folders( self, search_term )
elif not dirs and files:
return find_only_files( self, search_term )
else:
raise NotImplementedError
@property
def exists( self ):
"""
revisa si el archivo o directorio existe
Returns
=======
bool
"""
from .snippets import exists
return exists( str( self ) )
def replace( self, *args, **kw ):
return Chibi_path( super().replace( *args, **kw ) )
def made_safe( self ):
return Chibi_path( re.sub( r'[<>:"|?*]', '', str( self ) ) )
@classmethod
def current_dir( cls ):
"""
regresa el directorio actual de trabajo
Returns
=======
py:class:`chibi.file.Chibi_path`
"""
return Chibi_path( os.getcwd() )
@property
def expand( self ):
if self.is_glob:
return ( type( self )( f ) for f in glob.iglob( self ) )
else:
raise NotImplementedError( "no se que deberia de hacer" )
def __enter__( self ):
return self.open()
def __exit__( self, exc_type, exc_value, traceback ):
pass
def touch( self ):
if not self.exists or self.is_a_file:
self.open().touch()
elif self.is_a_folder:
raise NotADirectoryError(
f"no implementado touch a un folder '{self}'" )
else:
raise NotADirectoryError(
f"no implementado touch cuando no es un "
f"archivo o folder'{self}'" )
@property
def inflate( self ):
if '~' in self:
return os.path.expanduser( self )
else:
return os.path.abspath( self )
| [
"logging.getLogger",
"glob.has_magic",
"os.makedirs",
"glob.iglob",
"chibi.file.snippets.file_dir",
"os.path.splitext",
"os.getcwd",
"os.path.isdir",
"chibi.file.snippets.chown",
"os.path.basename",
"magic.Magic",
"os.path.abspath",
"chibi.file.snippets.is_a_file",
"os.path.expanduser",
"chibi.file.snippets.stat"
] | [((112, 154), 'logging.getLogger', 'logging.getLogger', (['"""chibi.file.chibi_path"""'], {}), "('chibi.file.chibi_path')\n", (129, 154), False, 'import logging\n'), ((1607, 1626), 'os.path.isdir', 'os.path.isdir', (['self'], {}), '(self)\n', (1620, 1626), False, 'import os\n'), ((1782, 1797), 'chibi.file.snippets.is_a_file', 'is_a_file', (['self'], {}), '(self)\n', (1791, 1797), False, 'from chibi.file.snippets import is_a_file\n'), ((1855, 1875), 'glob.has_magic', 'glob.has_magic', (['self'], {}), '(self)\n', (1869, 1875), False, 'import glob\n'), ((2457, 2489), 'os.path.splitext', 'os.path.splitext', (['self.base_name'], {}), '(self.base_name)\n', (2473, 2489), False, 'import os\n'), ((6139, 6236), 'chibi.file.snippets.chown', 'chown', (['self'], {'user_name': 'user_name', 'group_name': 'group_name', 'recursive': 'recursive', 'verbose': 'verbose'}), '(self, user_name=user_name, group_name=group_name, recursive=recursive,\n verbose=verbose)\n', (6144, 6236), False, 'from chibi.file.snippets import chown\n'), ((6505, 6515), 'chibi.file.snippets.stat', 'stat', (['self'], {}), '(self)\n', (6509, 6515), False, 'from chibi.file.snippets import stat\n'), ((7028, 7050), 'os.path.splitext', 'os.path.splitext', (['self'], {}), '(self)\n', (7044, 7050), False, 'import os\n'), ((7322, 7344), 'os.path.splitext', 'os.path.splitext', (['self'], {}), '(self)\n', (7338, 7344), False, 'import os\n'), ((2118, 2132), 'chibi.file.snippets.file_dir', 'file_dir', (['self'], {}), '(self)\n', (2126, 2132), False, 'from chibi.file.snippets import file_dir\n'), ((2284, 2306), 'os.path.basename', 'os.path.basename', (['self'], {}), '(self)\n', (2300, 2306), False, 'import os\n'), ((3341, 3358), 'os.makedirs', 'os.makedirs', (['self'], {}), '(self)\n', (3352, 3358), False, 'import os\n'), ((9246, 9257), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9255, 9257), False, 'import os\n'), ((10078, 10102), 'os.path.expanduser', 'os.path.expanduser', (['self'], {}), '(self)\n', (10096, 10102), False, 'import os\n'), ((10138, 10159), 'os.path.abspath', 'os.path.abspath', (['self'], {}), '(self)\n', (10153, 10159), False, 'import os\n'), ((332, 353), 'os.path.expanduser', 'os.path.expanduser', (['a'], {}), '(a)\n', (350, 353), False, 'import os\n'), ((6538, 6560), 'magic.Magic', 'magic.Magic', ([], {'mime': '(True)'}), '(mime=True)\n', (6549, 6560), False, 'import magic\n'), ((6606, 6628), 'os.path.splitext', 'os.path.splitext', (['self'], {}), '(self)\n', (6622, 6628), False, 'import os\n'), ((9372, 9388), 'glob.iglob', 'glob.iglob', (['self'], {}), '(self)\n', (9382, 9388), False, 'import glob\n')] |
# -*- coding: utf-8 -*-
"""Console script for audio_korpora_pipeline."""
import argparse
import os
import sys
from audio_korpora_pipeline.audio_korpora_pipeline import ExistingOutputAdapter, ExistingInputAdapter
from audio_korpora_pipeline.inputadapter.adapters import CommonVoiceAdapter, UntranscribedVideoAdapter, \
ChJugendspracheAdapter, ArchimobAdapter
from audio_korpora_pipeline.outputadapter.adapters import LjSpeechAdapter, MailabsAdapter, FairseqWav2VecAdapter, \
OpenSeq2SeqAdapter
from audio_korpora_pipeline.utils import load_config, config_logging
def _createInputAdapters(config, inputs):
adapters = []
# parse input
inputs = inputs.split(",")
accepted_input_corpora = [l.value for l in ExistingInputAdapter]
# Create Adapters
for input in inputs:
if input not in accepted_input_corpora:
raise ValueError('please enter valid input corpora type(s): {}'.format(accepted_input_corpora))
if (ExistingInputAdapter.COMMON_VOICE.value == input):
adapters.append(CommonVoiceAdapter(config))
if (ExistingInputAdapter.UNTRANSCRIBED_VIDEO.value == input):
adapters.append(UntranscribedVideoAdapter(config))
if (ExistingInputAdapter.CH_JUGENDSPRACHE.value == input):
adapters.append(ChJugendspracheAdapter(config))
if (ExistingInputAdapter.ARCHIMOB.value == input):
adapters.append(ArchimobAdapter(config))
return adapters
def _createOutputAdapters(config, outputs):
adapters = []
# parse input
outputs = outputs.split(",")
accepted_output_corpora = [l.value for l in ExistingOutputAdapter]
print(accepted_output_corpora)
# Create Adapters
for output in outputs:
if output not in accepted_output_corpora:
raise ValueError('please enter valid output corpora type(s): {}'.format(accepted_output_corpora))
if (ExistingOutputAdapter.MAILABS.value == output):
adapters.append(MailabsAdapter(config))
if (ExistingOutputAdapter.LJ_SPEECH.value == output):
adapters.append(LjSpeechAdapter(config))
if (ExistingOutputAdapter.FAIRSEQ_WAV2VEC.value == output):
adapters.append(FairseqWav2VecAdapter(config))
if (ExistingOutputAdapter.OPENSEQ2SEQ.value == output):
adapters.append(OpenSeq2SeqAdapter(config))
return adapters
def _transformMetamodelsToOutputs(metamodels, output_adapters):
for index, output_adapter in enumerate(output_adapters):
if (index == 0):
output_adapter.cleanOutputFolder()
for metamodel in metamodels:
output_adapter.fromMetamodel(metamodel)
pass
def _transformInputsToMetamodel(input_adapters):
metmodels = []
for input_adapter in input_adapters:
metmodels.append(input_adapter.toMetamodel())
return metmodels
def main():
"""
Console script for audio_korpora_pipeline.
Implement here CLI args parsing
"""
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", dest="config",
help="path to config file", required=True)
parser.add_argument("-i", "--input_corpora", dest="input",
help="comma separated list of which corpora to transform", required=True)
parser.add_argument("-o", "--output_corpora", dest="output",
help="comma separated list of which corpora to produce", required=True)
args = parser.parse_args()
config_path = args.config
if not os.path.isfile(config_path):
parser.print_help()
config = load_config(config_path)
config_logging(load_config(config_path))
# Creating Adapters
input_adapters = _createInputAdapters(config, args.input)
output_adapters = _createOutputAdapters(config, args.output)
print("Started with {} input corpora to transform".format(len(input_adapters)))
print("Started with {} output corpora as target format".format(len(output_adapters)))
# Creating metamodels
metamodels = _transformInputsToMetamodel(input_adapters)
# Doing output work
_transformMetamodelsToOutputs(metamodels, output_adapters)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| [
"audio_korpora_pipeline.outputadapter.adapters.MailabsAdapter",
"audio_korpora_pipeline.inputadapter.adapters.ArchimobAdapter",
"argparse.ArgumentParser",
"audio_korpora_pipeline.outputadapter.adapters.OpenSeq2SeqAdapter",
"audio_korpora_pipeline.inputadapter.adapters.UntranscribedVideoAdapter",
"audio_korpora_pipeline.inputadapter.adapters.ChJugendspracheAdapter",
"os.path.isfile",
"audio_korpora_pipeline.outputadapter.adapters.LjSpeechAdapter",
"audio_korpora_pipeline.outputadapter.adapters.FairseqWav2VecAdapter",
"audio_korpora_pipeline.inputadapter.adapters.CommonVoiceAdapter",
"audio_korpora_pipeline.utils.load_config"
] | [((2829, 2854), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2852, 2854), False, 'import argparse\n'), ((3422, 3446), 'audio_korpora_pipeline.utils.load_config', 'load_config', (['config_path'], {}), '(config_path)\n', (3433, 3446), False, 'from audio_korpora_pipeline.utils import load_config, config_logging\n'), ((3357, 3384), 'os.path.isfile', 'os.path.isfile', (['config_path'], {}), '(config_path)\n', (3371, 3384), False, 'import os\n'), ((3464, 3488), 'audio_korpora_pipeline.utils.load_config', 'load_config', (['config_path'], {}), '(config_path)\n', (3475, 3488), False, 'from audio_korpora_pipeline.utils import load_config, config_logging\n'), ((1012, 1038), 'audio_korpora_pipeline.inputadapter.adapters.CommonVoiceAdapter', 'CommonVoiceAdapter', (['config'], {}), '(config)\n', (1030, 1038), False, 'from audio_korpora_pipeline.inputadapter.adapters import CommonVoiceAdapter, UntranscribedVideoAdapter, ChJugendspracheAdapter, ArchimobAdapter\n'), ((1128, 1161), 'audio_korpora_pipeline.inputadapter.adapters.UntranscribedVideoAdapter', 'UntranscribedVideoAdapter', (['config'], {}), '(config)\n', (1153, 1161), False, 'from audio_korpora_pipeline.inputadapter.adapters import CommonVoiceAdapter, UntranscribedVideoAdapter, ChJugendspracheAdapter, ArchimobAdapter\n'), ((1248, 1278), 'audio_korpora_pipeline.inputadapter.adapters.ChJugendspracheAdapter', 'ChJugendspracheAdapter', (['config'], {}), '(config)\n', (1270, 1278), False, 'from audio_korpora_pipeline.inputadapter.adapters import CommonVoiceAdapter, UntranscribedVideoAdapter, ChJugendspracheAdapter, ArchimobAdapter\n'), ((1357, 1380), 'audio_korpora_pipeline.inputadapter.adapters.ArchimobAdapter', 'ArchimobAdapter', (['config'], {}), '(config)\n', (1372, 1380), False, 'from audio_korpora_pipeline.inputadapter.adapters import CommonVoiceAdapter, UntranscribedVideoAdapter, ChJugendspracheAdapter, ArchimobAdapter\n'), ((1884, 1906), 'audio_korpora_pipeline.outputadapter.adapters.MailabsAdapter', 'MailabsAdapter', (['config'], {}), '(config)\n', (1898, 1906), False, 'from audio_korpora_pipeline.outputadapter.adapters import LjSpeechAdapter, MailabsAdapter, FairseqWav2VecAdapter, OpenSeq2SeqAdapter\n'), ((1988, 2011), 'audio_korpora_pipeline.outputadapter.adapters.LjSpeechAdapter', 'LjSpeechAdapter', (['config'], {}), '(config)\n', (2003, 2011), False, 'from audio_korpora_pipeline.outputadapter.adapters import LjSpeechAdapter, MailabsAdapter, FairseqWav2VecAdapter, OpenSeq2SeqAdapter\n'), ((2099, 2128), 'audio_korpora_pipeline.outputadapter.adapters.FairseqWav2VecAdapter', 'FairseqWav2VecAdapter', (['config'], {}), '(config)\n', (2120, 2128), False, 'from audio_korpora_pipeline.outputadapter.adapters import LjSpeechAdapter, MailabsAdapter, FairseqWav2VecAdapter, OpenSeq2SeqAdapter\n'), ((2212, 2238), 'audio_korpora_pipeline.outputadapter.adapters.OpenSeq2SeqAdapter', 'OpenSeq2SeqAdapter', (['config'], {}), '(config)\n', (2230, 2238), False, 'from audio_korpora_pipeline.outputadapter.adapters import LjSpeechAdapter, MailabsAdapter, FairseqWav2VecAdapter, OpenSeq2SeqAdapter\n')] |
## Copyright 2021 <NAME>
## OpInMod is released under the open source MIT License, see
## https://github.com/hnnngt/OpInMod/blob/main/LICENSE
"""Modules for providing a convenient data structure for OpInMod results.
Check the oemof.solph documentation for further details
https://github.com/oemof/oemof-solph
"""
import oemof.solph.processing as osp
import pandas as pd
def results(om):
result_dict = osp.results(om)
for (ores,ires) in result_dict:
for (oin, iin) in om.sources_inertia:
if om.sources_inertia[oin,iin].provision_type == 'synchronous_generator' and oin == ores and ires == iin:
result_dict[ores, ires]['sequences']['inertia_constant'] = om.sources_inertia[oin,iin].inertia_constant
result_dict[ores, ires]['sequences']['apparent_power'] = om.sources_inertia[oin,iin].apparent_power
elif om.sources_inertia[oin,iin].provision_type == 'synchronous_storage' and oin == ores and ires == iin:
result_dict[ores, ires]['sequences']['inertia_constant'] = om.sources_inertia[oin,iin].inertia_constant
result_dict[ores, ires]['sequences']['apparent_power'] = om.sources_inertia[oin,iin].apparent_power
elif om.sources_inertia[oin,iin].provision_type == 'none' and oin == ores and ires == iin:
result_dict[ores, ires]['sequences']['inertia_constant'] = om.sources_inertia[oin,iin].inertia_constant
result_dict[ores, ires]['sequences']['apparent_power'] = om.sources_inertia[oin,iin].apparent_power
elif om.sources_inertia[oin,iin].provision_type == 'synthetic_wind' and oin == ores and ires == iin:
for t in om.TIMESTEPS:
if t == 0:
result_dict[ores, ires]['sequences']['inertia_constant'] = None
result_dict[ores, ires]['sequences']['apparent_power'] = None
result_dict[ores, ires]['sequences'].iloc[t, 1] = om.sources_inertia[oin,iin].inertia_constant[t]
result_dict[ores, ires]['sequences'].iloc[t, 2] = om.sources_inertia[oin,iin].apparent_power
else:
result_dict[ores, ires]['sequences'].iloc[t, 1] = om.sources_inertia[oin,iin].inertia_constant[t]
result_dict[ores, ires]['sequences'].iloc[t, 2] = om.sources_inertia[oin,iin].apparent_power
result_dict[ores, ires]['sequences']['inertia_constant'] = pd.to_numeric(result_dict[ores, ires]['sequences']['inertia_constant'])
result_dict[ores, ires]['sequences']['apparent_power'] = pd.to_numeric(result_dict[ores, ires]['sequences']['apparent_power'])
elif om.sources_inertia[oin,iin].provision_type == 'synthetic_storage' and oin == ores and ires == iin:
result_dict[ores, ires]['sequences']['inertia_constant'] = om.sources_inertia[oin,iin].inertia_constant
result_dict[ores, ires]['sequences']['apparent_power'] = om.sources_inertia[oin,iin].apparent_power
else:
None
return result_dict
| [
"oemof.solph.processing.results",
"pandas.to_numeric"
] | [((412, 427), 'oemof.solph.processing.results', 'osp.results', (['om'], {}), '(om)\n', (423, 427), True, 'import oemof.solph.processing as osp\n'), ((2493, 2564), 'pandas.to_numeric', 'pd.to_numeric', (["result_dict[ores, ires]['sequences']['inertia_constant']"], {}), "(result_dict[ores, ires]['sequences']['inertia_constant'])\n", (2506, 2564), True, 'import pandas as pd\n'), ((2638, 2707), 'pandas.to_numeric', 'pd.to_numeric', (["result_dict[ores, ires]['sequences']['apparent_power']"], {}), "(result_dict[ores, ires]['sequences']['apparent_power'])\n", (2651, 2707), True, 'import pandas as pd\n')] |
import pytest
from pymc3.distributions.transforms import Transform
import pymc3 as pm
class TestTransformName(object):
cases = [
('var', 'var_test__'),
('var_test_', 'var_test__test__')
]
transform_name = 'test'
def test_get_transformed_name(self):
test_transform = Transform()
test_transform.name = self.transform_name
for name, transformed in self.cases:
assert pm.util.get_transformed_name(name, test_transform) == transformed
def test_is_transformed_name(self):
for name, transformed in self.cases:
assert pm.util.is_transformed_name(transformed)
assert not pm.util.is_transformed_name(name)
def test_get_untransformed_name(self):
for name, transformed in self.cases:
assert pm.util.get_untransformed_name(transformed) == name
with pytest.raises(ValueError):
pm.util.get_untransformed_name(name)
| [
"pymc3.util.get_untransformed_name",
"pymc3.util.get_transformed_name",
"pymc3.distributions.transforms.Transform",
"pymc3.util.is_transformed_name",
"pytest.raises"
] | [((310, 321), 'pymc3.distributions.transforms.Transform', 'Transform', ([], {}), '()\n', (319, 321), False, 'from pymc3.distributions.transforms import Transform\n'), ((607, 647), 'pymc3.util.is_transformed_name', 'pm.util.is_transformed_name', (['transformed'], {}), '(transformed)\n', (634, 647), True, 'import pymc3 as pm\n'), ((436, 486), 'pymc3.util.get_transformed_name', 'pm.util.get_transformed_name', (['name', 'test_transform'], {}), '(name, test_transform)\n', (464, 486), True, 'import pymc3 as pm\n'), ((671, 704), 'pymc3.util.is_transformed_name', 'pm.util.is_transformed_name', (['name'], {}), '(name)\n', (698, 704), True, 'import pymc3 as pm\n'), ((813, 856), 'pymc3.util.get_untransformed_name', 'pm.util.get_untransformed_name', (['transformed'], {}), '(transformed)\n', (843, 856), True, 'import pymc3 as pm\n'), ((882, 907), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (895, 907), False, 'import pytest\n'), ((925, 961), 'pymc3.util.get_untransformed_name', 'pm.util.get_untransformed_name', (['name'], {}), '(name)\n', (955, 961), True, 'import pymc3 as pm\n')] |
from HR_Processor import HRProcessor
from queue import *
from tkinter import *
# must initialize a tkinter root for the HRProcessor to be able to initialize StringVar
root = Tk()
sampling_rate_easy = 2/60 # makes be 2 inst hr per 1 min, 10 inst per 5 min (30 second updates)
# the following are default values from Main
tachycardia = 200.0
bradycardia = 40.0
multi_minute_mean_1 = 1
multi_minute_mean_2 = 5
def test_queue_avg():
queue_avg_5 = Queue()
queue_avg_5.put(0)
queue_avg_5.put(10)
my_processor = HRProcessor(sampling_rate_easy, tachycardia, bradycardia, multi_minute_mean_1, multi_minute_mean_2)
assert 5 == my_processor.queue_avg(queue_avg_5)
assert not queue_avg_5.empty()
assert 0 == queue_avg_5.get()
assert 10 == queue_avg_5.get()
assert queue_avg_5.empty()
def test_update_queue():
putter = 4
queue_simple = Queue()
assert queue_simple.empty()
my_processor = HRProcessor(sampling_rate_easy, tachycardia, bradycardia, multi_minute_mean_1, multi_minute_mean_2)
my_processor.update_queue(queue_simple, putter)
assert queue_simple.get() == putter
assert queue_simple.empty()
| [
"HR_Processor.HRProcessor"
] | [((527, 630), 'HR_Processor.HRProcessor', 'HRProcessor', (['sampling_rate_easy', 'tachycardia', 'bradycardia', 'multi_minute_mean_1', 'multi_minute_mean_2'], {}), '(sampling_rate_easy, tachycardia, bradycardia,\n multi_minute_mean_1, multi_minute_mean_2)\n', (538, 630), False, 'from HR_Processor import HRProcessor\n'), ((934, 1037), 'HR_Processor.HRProcessor', 'HRProcessor', (['sampling_rate_easy', 'tachycardia', 'bradycardia', 'multi_minute_mean_1', 'multi_minute_mean_2'], {}), '(sampling_rate_easy, tachycardia, bradycardia,\n multi_minute_mean_1, multi_minute_mean_2)\n', (945, 1037), False, 'from HR_Processor import HRProcessor\n')] |
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>,
# <<EMAIL>>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
""" Tools to launch snpEff."""
import re
import sys
import os
import shutil
import subprocess as sp
from sequana.resources import snpeff
from sequana import FastA
import colorlog
logger = colorlog.getLogger(__name__)
class SnpEff(object):
""" SnpEff is a tool dedicated to annotate detected variants in a VCF file.
This wrapper eases the annotation with a genbank file. It create
automatically the custom database. Then, run snpEff with a subprocess.
Caution, the locus name (or chromosome name) in genbank file
and the sequence name in VCF file must be the same. Otherwise, snpEff is
not able to bind informations.
Example:
::
snpeff = SnpEff('file.gbk')
snpeff.launch_snpeff('variants.vcf', 'variant.ann.vcf')
If your input is in GFF format, you must also provide the fasta reference file.
Will save relevant snpeff data into ./data directory
"""
def __init__(self, annotation, log=None, snpeff_datadir="data",
fastafile=None):
""".. rubric:: Constructor
:param annotation: annotation reference.
:param file_format: format of your file. ('only genbank actually')
:param log: log file
:param snpeff_datadir:
:param fastafile: if a GFF is used, you must provide the FASTA input
file as well
"""
# Check if the input file exist
if os.path.isfile(annotation):
self.annotation = annotation
self.fastafile = fastafile
self.ref_name = os.path.basename(annotation).split('.')[0]
if self.annotation.endswith(".genbank") or self.annotation.endswith(".gbk"):
self.format = "gbk"
elif self.annotation.endswith(".gff3") or self.annotation.endswith(".gff"):
self.format = "gff3"
else:
logger.error("Format must be genbank or gff3")
sys.exit(1)
else:
logger.error("FileNotFoundError: The file " + annotation +
" does not exist")
sys.exit(1)
# Keep data directory where everything will be saved
self.snpeff_datadir = snpeff_datadir
# Set the log file
self.log_file = log
if log is not None:
if os.path.isfile(log):
os.remove(log)
# Check if snpEff.config is present
if not os.path.exists("snpEff.config"):
logger.info("snpEff.config file not found, creating one")
self._get_snpeff_config()
else:
logger.info("snpEff.config file exists already. Using it.")
# Create custom database
if not os.path.exists(self.snpeff_datadir + os.sep + self.ref_name
+ os.sep + "snpEffectPredictor.bin"):
self._add_custom_db()
elif not self._check_database(self.ref_name):
self._add_db_in_config()
else:
logger.info("DB already added in your config and database")
def _check_database(self, reference):
""" Check if your genbank/GFF is already added."""
proc_db = sp.Popen(["snpEff", "databases"], stdout=sp.PIPE)
snpeff_db = {line.split()[0] for line in proc_db.stdout}
if reference.encode("utf-8") in snpeff_db:
return True
return False
def _get_snpeff_config(self):
""" Copy and unzip the snpEff.config file.
"""
from sequana import sequana_data
CONFIG = sequana_data("snpEff.config", "snpeff")
shutil.copyfile(CONFIG, "./snpEff.config")
def _add_custom_db(self):
""" Add your custom file in the local snpEff database.
"""
# create directory and copy annotation file
logger.info("adding custom DB using your input file(s)")
logger.info(" - {}".format(self.annotation))
if self.fastafile:
logger.info(" - {}".format(self.fastafile))
genome_dir = "data" + os.sep + self.ref_name + os.sep
try:
os.makedirs(genome_dir)
except FileExistsError:
pass
# add new annotation file in config file
self._add_db_in_config()
if self.format == "gbk":
shutil.copyfile(self.annotation, genome_dir + "genes.gbk")
snpeff_build_line = ["snpEff", "build", "-genbank", '-v']
snpeff_build_line += [self.ref_name]
elif self.format == "gff3":
shutil.copyfile(self.annotation, genome_dir + "genes.gff")
if self.fastafile is None or not os.path.exists(self.fastafile):
logger.error("Input file {} does not exist".format(self.fastafile))
sys.exit(1)
shutil.copyfile(self.fastafile, genome_dir + "sequences.fa")
snpeff_build_line = ["snpEff", "build", "-gff3", '-v']
snpeff_build_line += [self.ref_name]
if self.log_file:
with open(self.log_file, "ab") as fl:
snp_build = sp.Popen(snpeff_build_line, stderr=fl, stdout=fl)
else:
snp_build = sp.Popen(snpeff_build_line)
snp_build.wait()
rc = snp_build.returncode
if rc != 0:
logger.error("snpEff build return a non-zero code")
sys.exit(rc)
def _add_db_in_config(self):
""" Add new annotation at the end of snpEff.config file.
"""
logger.info("Updating configuration file")
if not self._check_database(self.ref_name):
with open("snpEff.config", "a") as fp:
print(self.ref_name + ".genome : " + self.ref_name, file=fp)
def launch_snpeff(self, vcf_filename, output, html_output=None,
options=""):
""" Launch snpEff with the custom genbank file.
:param str vcf_filename: input VCF filename.
:param str output: output VCF filename.
:param str html_output: filename of the HTML creates by snpEff.
:param str options: any options recognised by snpEff.
"""
# Create command line for Popen
args_ann = ["snpEff", "-formatEff"]
if html_output is not None:
args_ann += ["-s", html_output]
args_ann += options.split()
args_ann += [self.ref_name, '-v', vcf_filename]
# Launch snpEff
if self.log_file:
with open(self.log_file, "ab") as fl, open(output, "wb") as fp:
proc_ann = sp.Popen(args_ann, stdout=fp, stderr=fl)
proc_ann.wait()
else:
with open(output, "wb") as fp:
proc_ann = sp.Popen(args_ann, stdout=fp)
proc_ann.wait()
def _get_seq_ids(self):
if self.format == "gbk":
regex = re.compile(r'^LOCUS\s+([\w\.\-]+)')
chrom_regex = re.compile(r'\\chromosome="([\w\.\-]+)"')
with open(self.annotation, "r") as fp:
line = fp.readline()
seq = regex.findall(line)
for line in fp:
if line.strip().startswith(('gene', 'CDS',)):
break
chrom = chrom_regex.search(line)
if chrom:
seq = [chrom.group(1)]
regex = chrom_regex
seq += [regex.search(line).group(1) for line in fp
if regex.search(line)]
return seq
else:
regex = re.compile(r'^##sequence-region\s+([\w\.\-]+)')
with open(self.annotation, "r") as fp:
line = fp.readline()
seq = regex.findall(line)
for line in fp:
chrom = regex.findall(line)
if chrom:
seq += chrom
return seq
def add_locus_in_fasta(self, fasta, output_file):
""" Add locus of annotation file in description line of fasta file. If
fasta file and genbank file do not have the same names.
:param str fasta: input fasta file where you want to add locus.
:param str output_file: output file.
FIXME: fasta is already known if provided in the init
"""
fasta_record = FastA(fasta)
ids_list = self._get_seq_ids()
# check if both files have same number of contigs
if len(fasta_record) != len(ids_list):
print("fasta and annotation files don't have the same number of "
"contigs. Found {} and {}".format(len(fasta_record), len(ids_list)))
sys.exit(1)
# check if directory exist
output_dir = os.path.dirname(output_file)
try:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
except FileNotFoundError:
pass
if sorted(fasta_record.names) == sorted(ids_list):
logger.info("Files have same sequence id.")
if os.path.isfile(output_file):
os.remove(output_file)
os.symlink(os.path.realpath(fasta), output_file)
return
else:
logger.info("fasta and GFF seem to have different IDs. Creating a"
"new coherent fasta file assuming the chromsome names appear "
"in the same order in the fasta and gff")
with open(output_file, "w") as fp:
# write fasta with seqid of annotation file
for n in range(len(fasta_record)):
seq_id = ">{0} {1}\n".format(ids_list[n], fasta_record.names[n])
seq = fasta_record.sequences[n]
sequence = "\n".join([seq[i:min(i+80, len(seq))]
for i in range(0, len(seq), 80)]) + "\n"
contigs = seq_id + sequence
fp.write(contigs)
def download_fasta_and_genbank(identifier, tag, genbank=True, fasta=True):
"""
:param identifier: valid identifier to retrieve from NCBI (genbank) and
ENA (fasta)
:param tag: name of the filename for the genbank and fasta files.
"""
if genbank:
from bioservices import EUtils
eu = EUtils()
data = eu.EFetch(db="nuccore",id=identifier, rettype="gbwithparts",
retmode="text")
if isinstance(data, int) and data == 400:
raise ValueError("{} not found on NCBI".format(identifier))
else:
with open("%s.gbk" % tag, "w") as fout:
fout.write(data.decode())
if fasta:
from bioservices import ENA
ena = ENA()
data = ena.get_data(identifier, 'fasta')
if isinstance(data, int) and data == 400:
raise ValueError("{} not found on ENA".format(identifier))
else:
with open("%s.fa" % tag, "w") as fout:
try:
# change in API in v1.7.8
fout.write(data)
except:
fout.write(data.decode())
| [
"os.path.exists",
"bioservices.EUtils",
"sequana.FastA",
"os.makedirs",
"re.compile",
"bioservices.ENA",
"subprocess.Popen",
"colorlog.getLogger",
"os.path.isfile",
"os.path.dirname",
"shutil.copyfile",
"sequana.sequana_data",
"os.path.realpath",
"os.path.basename",
"sys.exit",
"os.remove"
] | [((714, 742), 'colorlog.getLogger', 'colorlog.getLogger', (['__name__'], {}), '(__name__)\n', (732, 742), False, 'import colorlog\n'), ((1933, 1959), 'os.path.isfile', 'os.path.isfile', (['annotation'], {}), '(annotation)\n', (1947, 1959), False, 'import os\n'), ((3679, 3728), 'subprocess.Popen', 'sp.Popen', (["['snpEff', 'databases']"], {'stdout': 'sp.PIPE'}), "(['snpEff', 'databases'], stdout=sp.PIPE)\n", (3687, 3728), True, 'import subprocess as sp\n'), ((4046, 4085), 'sequana.sequana_data', 'sequana_data', (['"""snpEff.config"""', '"""snpeff"""'], {}), "('snpEff.config', 'snpeff')\n", (4058, 4085), False, 'from sequana import sequana_data\n'), ((4094, 4136), 'shutil.copyfile', 'shutil.copyfile', (['CONFIG', '"""./snpEff.config"""'], {}), "(CONFIG, './snpEff.config')\n", (4109, 4136), False, 'import shutil\n'), ((8770, 8782), 'sequana.FastA', 'FastA', (['fasta'], {}), '(fasta)\n', (8775, 8782), False, 'from sequana import FastA\n'), ((9173, 9201), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (9188, 9201), False, 'import os\n'), ((10671, 10679), 'bioservices.EUtils', 'EUtils', ([], {}), '()\n', (10677, 10679), False, 'from bioservices import EUtils\n'), ((11080, 11085), 'bioservices.ENA', 'ENA', ([], {}), '()\n', (11083, 11085), False, 'from bioservices import ENA\n'), ((2613, 2624), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2621, 2624), False, 'import sys\n'), ((2831, 2850), 'os.path.isfile', 'os.path.isfile', (['log'], {}), '(log)\n', (2845, 2850), False, 'import os\n'), ((2943, 2974), 'os.path.exists', 'os.path.exists', (['"""snpEff.config"""'], {}), "('snpEff.config')\n", (2957, 2974), False, 'import os\n'), ((3219, 3319), 'os.path.exists', 'os.path.exists', (["(self.snpeff_datadir + os.sep + self.ref_name + os.sep +\n 'snpEffectPredictor.bin')"], {}), "(self.snpeff_datadir + os.sep + self.ref_name + os.sep +\n 'snpEffectPredictor.bin')\n", (3233, 3319), False, 'import os\n'), ((4584, 4607), 'os.makedirs', 'os.makedirs', (['genome_dir'], {}), '(genome_dir)\n', (4595, 4607), False, 'import os\n'), ((4786, 4844), 'shutil.copyfile', 'shutil.copyfile', (['self.annotation', "(genome_dir + 'genes.gbk')"], {}), "(self.annotation, genome_dir + 'genes.gbk')\n", (4801, 4844), False, 'import shutil\n'), ((5642, 5669), 'subprocess.Popen', 'sp.Popen', (['snpeff_build_line'], {}), '(snpeff_build_line)\n', (5650, 5669), True, 'import subprocess as sp\n'), ((5825, 5837), 'sys.exit', 'sys.exit', (['rc'], {}), '(rc)\n', (5833, 5837), False, 'import sys\n'), ((7299, 7337), 're.compile', 're.compile', (['"""^LOCUS\\\\s+([\\\\w\\\\.\\\\-]+)"""'], {}), "('^LOCUS\\\\s+([\\\\w\\\\.\\\\-]+)')\n", (7309, 7337), False, 'import re\n'), ((7361, 7406), 're.compile', 're.compile', (['"""\\\\\\\\chromosome="([\\\\w\\\\.\\\\-]+)\\""""'], {}), '(\'\\\\\\\\chromosome="([\\\\w\\\\.\\\\-]+)"\')\n', (7371, 7406), False, 'import re\n'), ((8008, 8058), 're.compile', 're.compile', (['"""^##sequence-region\\\\s+([\\\\w\\\\.\\\\-]+)"""'], {}), "('^##sequence-region\\\\s+([\\\\w\\\\.\\\\-]+)')\n", (8018, 8058), False, 'import re\n'), ((9104, 9115), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9112, 9115), False, 'import sys\n'), ((9484, 9511), 'os.path.isfile', 'os.path.isfile', (['output_file'], {}), '(output_file)\n', (9498, 9511), False, 'import os\n'), ((2868, 2882), 'os.remove', 'os.remove', (['log'], {}), '(log)\n', (2877, 2882), False, 'import os\n'), ((5012, 5070), 'shutil.copyfile', 'shutil.copyfile', (['self.annotation', "(genome_dir + 'genes.gff')"], {}), "(self.annotation, genome_dir + 'genes.gff')\n", (5027, 5070), False, 'import shutil\n'), ((5272, 5332), 'shutil.copyfile', 'shutil.copyfile', (['self.fastafile', "(genome_dir + 'sequences.fa')"], {}), "(self.fastafile, genome_dir + 'sequences.fa')\n", (5287, 5332), False, 'import shutil\n'), ((5554, 5603), 'subprocess.Popen', 'sp.Popen', (['snpeff_build_line'], {'stderr': 'fl', 'stdout': 'fl'}), '(snpeff_build_line, stderr=fl, stdout=fl)\n', (5562, 5603), True, 'import subprocess as sp\n'), ((6998, 7038), 'subprocess.Popen', 'sp.Popen', (['args_ann'], {'stdout': 'fp', 'stderr': 'fl'}), '(args_ann, stdout=fp, stderr=fl)\n', (7006, 7038), True, 'import subprocess as sp\n'), ((7155, 7184), 'subprocess.Popen', 'sp.Popen', (['args_ann'], {'stdout': 'fp'}), '(args_ann, stdout=fp)\n', (7163, 7184), True, 'import subprocess as sp\n'), ((9234, 9260), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (9248, 9260), False, 'import os\n'), ((9278, 9301), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (9289, 9301), False, 'import os\n'), ((9529, 9551), 'os.remove', 'os.remove', (['output_file'], {}), '(output_file)\n', (9538, 9551), False, 'import os\n'), ((9575, 9598), 'os.path.realpath', 'os.path.realpath', (['fasta'], {}), '(fasta)\n', (9591, 9598), False, 'import os\n'), ((2460, 2471), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2468, 2471), False, 'import sys\n'), ((5248, 5259), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5256, 5259), False, 'import sys\n'), ((2070, 2098), 'os.path.basename', 'os.path.basename', (['annotation'], {}), '(annotation)\n', (2086, 2098), False, 'import os\n'), ((5116, 5146), 'os.path.exists', 'os.path.exists', (['self.fastafile'], {}), '(self.fastafile)\n', (5130, 5146), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from wiki_contribution.run import app, server
if __name__ == "__main__":
app.run_server(debug=False, port=5000) | [
"wiki_contribution.run.app.run_server"
] | [((102, 140), 'wiki_contribution.run.app.run_server', 'app.run_server', ([], {'debug': '(False)', 'port': '(5000)'}), '(debug=False, port=5000)\n', (116, 140), False, 'from wiki_contribution.run import app, server\n')] |
import os
from django.core.asgi import get_asgi_application
from channels.routing import ProtocolTypeRouter, URLRouter
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
django_asgi_app = get_asgi_application()
from chat.middlewares import TokenAuthMiddleware
import chat.routing
application = ProtocolTypeRouter({
'websocket': TokenAuthMiddleware(
URLRouter(
chat.routing.websocket_urlpatterns
)
),
})
| [
"os.environ.setdefault",
"channels.routing.URLRouter",
"django.core.asgi.get_asgi_application"
] | [((122, 188), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""config.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'config.settings')\n", (143, 188), False, 'import os\n'), ((207, 229), 'django.core.asgi.get_asgi_application', 'get_asgi_application', ([], {}), '()\n', (227, 229), False, 'from django.core.asgi import get_asgi_application\n'), ((384, 429), 'channels.routing.URLRouter', 'URLRouter', (['chat.routing.websocket_urlpatterns'], {}), '(chat.routing.websocket_urlpatterns)\n', (393, 429), False, 'from channels.routing import ProtocolTypeRouter, URLRouter\n')] |
import os
import webbrowser
import tkinter as tk
from tkinter import *
window = Tk()
window.geometry('200x200')
window.title("quit finder")
window.rowconfigure(0, weight=1)
window.columnconfigure(0, weight=1)
window.resizable(width=False, height=False)
window.eval('tk::PlaceWindow %s center' % window.winfo_toplevel())
def quit():
os.system("defaults write com.apple.finder QuitMenuItem -bool true")
os.system("""osascript -e 'quit app "Finder"'""")
lbl.configure(text="was it worth it?")
btn.config(state='disabled')
def tagbtn():
webbrowser.open("http://sahasramesh.com")
fr = Frame(window)
fr.grid(column=0, row=0)
lbl = Label(fr, text="let go of your fear.", font=("Times New Roman", 14))
lbl.grid(column=0, row=0)
btn = Button(fr, text="do it.", command=quit, font=("Times New Roman", 14))
btn.grid(column=0, row=1)
fr1 = Frame(window)
fr1.grid(column=0, row=2)
#tag
tag = Label(fr1, text="An original project by ", font=('Courier', 6))
tag.pack(side=LEFT)
#tag
btn4 = Button(fr1, text="<NAME>", fg='#FF4500', anchor=W, highlightbackground='white', highlightcolor='white', highlightthickness=0, command=tagbtn, font=('Courier', 6))
btn4.pack(side=LEFT)
window.mainloop()
| [
"os.system",
"webbrowser.open"
] | [((338, 406), 'os.system', 'os.system', (['"""defaults write com.apple.finder QuitMenuItem -bool true"""'], {}), "('defaults write com.apple.finder QuitMenuItem -bool true')\n", (347, 406), False, 'import os\n'), ((411, 458), 'os.system', 'os.system', (['"""osascript -e \'quit app "Finder"\'"""'], {}), '(\'osascript -e \\\'quit app "Finder"\\\'\')\n', (420, 458), False, 'import os\n'), ((557, 598), 'webbrowser.open', 'webbrowser.open', (['"""http://sahasramesh.com"""'], {}), "('http://sahasramesh.com')\n", (572, 598), False, 'import webbrowser\n')] |
#!/usr/bin/env python3
import torch
import torch.utils.data
import pandas as pd
class UNSW_NB15(torch.utils.data.Dataset):
def __init__(self, file_path, sequence_length=25, transform=None):
#TODO have a sequence_overlap=True flag? Does overlap matter?
self.transform = transform
self.sequence_length = sequence_length
self.columns = ['srcip', 'sport', 'dstip', 'dsport', 'proto', 'state', 'dur', 'sbytes',
'dbytes', 'sttl', 'dttl', 'sloss', 'dloss', 'service', 'sload', 'dload',
'spkts', 'dpkts', 'swin', 'dwin', 'stcpb', 'dtcpb', 'smeansz',
'dmeansz', 'trans_depth', 'res_bdy_len', 'sjit', 'djit', 'stime',
'ltime', 'sintpkt', 'dintpkt', 'tcprtt', 'synack', 'ackdat',
'is_sm_ips_ports', 'ct_state_ttl', 'ct_flw_http_mthd', 'is_ftp_login',
'ct_ftp_cmd', 'ct_srv_src', 'ct_srv_dst', 'ct_dst_ltm', 'ct_src_ltm',
'ct_src_dport_ltm', 'ct_dst_sport_ltm', 'ct_dst_src_ltm', 'attack_cat',
'label']
self.dtypes = dtypes = {"scrip": "str",
"sport": "int32",
"dstip": "str",
"dsport": "int32",
"proto": "str",
"state": "str",
"dur": "float64",
"sbytes": "int32",
"dbytes": "int32",
"sttl": "int32",
"dttl": "int32",
"sloss": "int32",
"dloss": "int32",
"service": "str",
"sload": "float64",
"dload": "float64",
"spkts": "int32",
"dpkts": "int32",
"swin": "int32",
"dwin": "int32",
"stcpb": "int32",
"dtcpb": "int32",
"smeansz": "int32",
"dmeansz": "int32",
"trans_depth": "int32",
"res_bdy_len": "int32",
"sjit": "float64",
"djit": "float64",
"stime": "int64",
"ltime": "int64",
"sintpkt": "float64",
"dintpkt": "float64",
"tcprtt": "float64",
"synack": "float64",
"ackdat": "float64",
#commenting these because they have mixed values and we aren't going to generate them anyway
#"is_sm_ips_ports": "int32",
#"ct_state_ttl": "int32",
#"ct_flw_httpd_mthd": "int32",
#"is_ftp_login": "int32",
#"is_ftp_cmd": "int32",
#"ct_ftp_cmd": "int32",
#"ct_srv_src": "int32",
##"ct_dst_ltm": "int32",
#"ct_src_ltm": "int32",
#"ct_src_dport_ltm": "int32",
#"ct_dst_sport_ltm": "int32",
#"ct_dst_src_ltm": "int32",
"attack_cat": "str",
"label": "int32"}
self.categorical_column_values = {"proto":None, "state":None, "service":None, "attack_cat":None}
self.dataframe = pd.read_csv(file_path, encoding="latin-1", names=self.columns, header=None, dtype=self.dtypes)
self.dataframe.sort_values(by=['stime']) #sort chronologically upon loading
#load all the unique values of categorical features at the start
#and make these accessible via a fast function call.
for key in self.categorical_column_values:
self.categorical_column_values[key] = self.dataframe[key].unique()
#cache all the maximum values in numeric columns since we'll be using these for feature extraction
self.maximums = {}
for key in self.dtypes:
if "int" in self.dtypes[key] or "float" in self.dtypes[key]:
self.maximums[key] = max(self.dataframe[key])
def __len__(self):
return len(self.dataframe.index) - self.sequence_length
def __getitem__(self, index):
#TODO need error checking for out of bounds?
#TODO return x,y where y is the category of the example
#since none corresponds to "normal" data
list_of_dicts = []
for i in range(index,index+self.sequence_length):
list_of_dicts.append(self.dataframe.loc[i, :].to_dict())
if self.transform is not None:
return self.transform(self, list_of_dicts)
return list_of_dicts
#get a list of all the unique labels in the dataset
def get_labels(self):
return self.dataframe['label'].unique().tolist()
#get a list of all the unique attack categories in the dataset
def get_attack_categories(self):
return self.dataframe['attack_cat'].unique().tolist()
def get_list_of_categories(self, column_name):
pass #TODO
#limit the dataset to only examples in the specified category
def use_only_category(self, category_name):
if category_name not in self.get_attack_categories():
return False
new_dataframe = self.dataframe[self.dataframe['attack_cat'] == category_name]
new_dataframe = new_dataframe.reset_index()
self.dataframe = new_dataframe
return True
#limit the dataset to only examples with the specified label
def use_only_label(self, label):
if label not in self.get_labels():
return False
new_dataframe = self.dataframe[self.dataframe['label'] == label]
new_dataframe = new_dataframe.reset_index()
self.dataframe = new_dataframe
return True | [
"pandas.read_csv"
] | [((3864, 3962), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'encoding': '"""latin-1"""', 'names': 'self.columns', 'header': 'None', 'dtype': 'self.dtypes'}), "(file_path, encoding='latin-1', names=self.columns, header=None,\n dtype=self.dtypes)\n", (3875, 3962), True, 'import pandas as pd\n')] |
from celery.utils.log import get_logger
from pocfullstack.celery import app
from .settings import IMPORT_CSV_URL, CSV_TMP_FILE
from .models import SaleFacts, SaleFactsBad
import csv
import urllib.request
import uuid
import traceback
logger = get_logger(__name__)
@app.task
def create_import_chain():
"""
Task creates import chain for CSV data
"""
fetch_csv_file_si = fetch_csv_file.si()
read_lines_si = read_lines.si()
(fetch_csv_file_si | read_lines_si).delay()
@app.task(default_retry_delay=300, max_retries=10)
def fetch_csv_file():
"""
Task fetching CSV file data. I must assume the URL is not being changed.
"""
sd = urllib.request.urlopen(IMPORT_CSV_URL)
with open(CSV_TMP_FILE, 'wb') as fd:
fd.write(sd.read())
@app.task
def read_lines():
"""
Task is used to insert rows into process queue
"""
with open(CSV_TMP_FILE, 'r') as csv_fh:
csv_reader = csv.reader(csv_fh)
for row in csv_reader:
process_data.delay(row)
@app.task
def process_data(row):
if len(row) != 15:
# wrong row
logger.error('Wrong "row" with data: %s' % row)
return
uid = uuid.UUID(row[0])
if row[14] == 'A':
# append row
try:
logger.info('Creating UID: %s' % uid)
SaleFacts.objects.create_from_row(row)
except:
SaleFactsBad.objects.create(row='%s' % row, exc=traceback.format_exc())
elif row[14] == 'C':
# change
fact = SaleFacts.objects.filter(transaction_uid=uid).first()
if fact is None:
try:
logger.info('Creating (instead of update) UID: %s' % uid)
SaleFacts.objects.create_from_row(row)
except:
SaleFactsBad.objects.create(row='%s' % row, exc=traceback.format_exc())
else:
fact.update_from_row(row)
fact.save()
logger.info('Updating UID: %s' % uid)
elif row[14] == 'D':
SaleFacts.objects.filter(transaction_uid=uid).delete()
logger.info('Deleting UID: %s' % uid)
| [
"pocfullstack.celery.app.task",
"traceback.format_exc",
"uuid.UUID",
"celery.utils.log.get_logger",
"csv.reader"
] | [((244, 264), 'celery.utils.log.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (254, 264), False, 'from celery.utils.log import get_logger\n'), ((495, 544), 'pocfullstack.celery.app.task', 'app.task', ([], {'default_retry_delay': '(300)', 'max_retries': '(10)'}), '(default_retry_delay=300, max_retries=10)\n', (503, 544), False, 'from pocfullstack.celery import app\n'), ((1184, 1201), 'uuid.UUID', 'uuid.UUID', (['row[0]'], {}), '(row[0])\n', (1193, 1201), False, 'import uuid\n'), ((939, 957), 'csv.reader', 'csv.reader', (['csv_fh'], {}), '(csv_fh)\n', (949, 957), False, 'import csv\n'), ((1436, 1458), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1456, 1458), False, 'import traceback\n'), ((1826, 1848), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1846, 1848), False, 'import traceback\n')] |
#coding=utf-8
'''
Created on 2014-1-14
@author: ETHAN
'''
from doraemon.automationtesting.models import AutoMobileDevice
from django.db.models import Q
class DAL_AutoMobileDevice(object):
'''
data access class for TestTask model
'''
@staticmethod
def get_all():
''' get all mobiledevice
'''
return AutoMobileDevice.objects.all().filter(MDIsActive=1)
@staticmethod
def add_automobiledevice(automobiledevice):
'''
add new mobiledevice
'''
automobiledevice.save()
@staticmethod
def get_automobiledevice(id):
''' get mobiledevice by id
'''
return AutoMobileDevice.objects.get(id=id)
@staticmethod
def get_device_byos(deviceos):
return AutoMobileDevice.objects.all().filter(MDIsActive=1).filter(MDeviceOS=deviceos).filter(~Q(MDeviceStatus=3))
| [
"django.db.models.Q",
"doraemon.automationtesting.models.AutoMobileDevice.objects.all",
"doraemon.automationtesting.models.AutoMobileDevice.objects.get"
] | [((680, 715), 'doraemon.automationtesting.models.AutoMobileDevice.objects.get', 'AutoMobileDevice.objects.get', ([], {'id': 'id'}), '(id=id)\n', (708, 715), False, 'from doraemon.automationtesting.models import AutoMobileDevice\n'), ((345, 375), 'doraemon.automationtesting.models.AutoMobileDevice.objects.all', 'AutoMobileDevice.objects.all', ([], {}), '()\n', (373, 375), False, 'from doraemon.automationtesting.models import AutoMobileDevice\n'), ((876, 894), 'django.db.models.Q', 'Q', ([], {'MDeviceStatus': '(3)'}), '(MDeviceStatus=3)\n', (877, 894), False, 'from django.db.models import Q\n'), ((789, 819), 'doraemon.automationtesting.models.AutoMobileDevice.objects.all', 'AutoMobileDevice.objects.all', ([], {}), '()\n', (817, 819), False, 'from doraemon.automationtesting.models import AutoMobileDevice\n')] |
import random
import pandas as pd
first_name = ["Mohammed", "Ali", "Bichara", "Hassan", "Jacques",
"Marie", "Isabelle", "Christine", "Magne", "Adele",
"Chriss", "Bayang", "Benjamin", "Djelassem", "Emile",
"Yannick", "Narcisse", "Josue", "Jean", "Idriss", "",
"Alain", "Billie", "Moussa", "Ghyslain", "Sylvain",]
last_name = ["Al-bachar", "Zakaria", "Oumar", "Djibril", "Abdel",
"Issa", "Kalman", "Edison", "Akoro", "Nick",
"Iboko", "Bouva", "Abba", "Souleyman", "Hissein",
"Adam", "Mounir", "Alphonse", "Itno", "Hinda",
"Sankara", "Hadje", "Ache", "Haoua", "Nasser",]
names = []
for i in range(1000050):
index_f = random.randint(0, len(first_name)-1)
index_l = random.randint(0, len(last_name)-1)
fname = first_name[index_f]
tmp = fname + " "+last_name[index_l]
# naissance
anne = random.randint(1990, 2000)
mois = random.randint(1, 12)
mois = "0"+ str(mois) if len(str(mois))==1 else str(mois)
jour = 28 if mois == 2 else random.randint(1, 31)
jour = "0"+str(jour) if len(str(jour))==1 else str(jour)
naissance = str(anne)+"/"+str(mois)+"/"+str(jour)
# status
status = ["Condamnee", "non Condamnee"]
status = status[random.randint(0,1)]
tribunal = ["Tribuanl 1", "Tribuanl 2", "Tribuanl 3", "Tribuanl 4", "Tribuanl 5"]
idx = random.randint(0, len(tribunal)-1)
tribunal = tribunal[idx]
if tmp != "":
if status == "Condamnee":
_anne = random.randint(2009, 2017)
_mois = random.randint(1, 12)
_mois = "0"+ str(_mois) if len(str(_mois))==1 else str(_mois)
_jour = 28 if _mois == 2 else random.randint(1, 31)
_jour = "0"+str(_jour) if len(str(_jour))==1 else str(_jour)
_naissance = str(_anne)+"/"+str(_mois)+"/"+str(_jour)
names.append([tmp, naissance, status, _naissance, tribunal])
else:
names.append([tmp, naissance, status, "n/a", "n/a"])
print("citoyen ", i, " : ", names[-1])
# saving to csv file
df = pd.DataFrame(names)
df.to_csv("citoyen.csv", index=False, header=["Noms", "Naissance", "Status", "cond", "Tribunal"]) | [
"pandas.DataFrame",
"random.randint"
] | [((1902, 1921), 'pandas.DataFrame', 'pd.DataFrame', (['names'], {}), '(names)\n', (1914, 1921), True, 'import pandas as pd\n'), ((829, 855), 'random.randint', 'random.randint', (['(1990)', '(2000)'], {}), '(1990, 2000)\n', (843, 855), False, 'import random\n'), ((864, 885), 'random.randint', 'random.randint', (['(1)', '(12)'], {}), '(1, 12)\n', (878, 885), False, 'import random\n'), ((974, 995), 'random.randint', 'random.randint', (['(1)', '(31)'], {}), '(1, 31)\n', (988, 995), False, 'import random\n'), ((1174, 1194), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1188, 1194), False, 'import random\n'), ((1404, 1430), 'random.randint', 'random.randint', (['(2009)', '(2017)'], {}), '(2009, 2017)\n', (1418, 1430), False, 'import random\n'), ((1442, 1463), 'random.randint', 'random.randint', (['(1)', '(12)'], {}), '(1, 12)\n', (1456, 1463), False, 'import random\n'), ((1562, 1583), 'random.randint', 'random.randint', (['(1)', '(31)'], {}), '(1, 31)\n', (1576, 1583), False, 'import random\n')] |
import pickle
import csv
from BART import write_to_csv_file
submissions = pickle.load(open("emergency_backup.p", "rb"))
for pid in submissions:
sessions = submissions[pid]
all_runs = []
for s in sessions:
all_runs += s
filename = "{}_emergency_backup".format(pid)
write_to_csv_file("data", filename, all_runs)
| [
"BART.write_to_csv_file"
] | [((295, 340), 'BART.write_to_csv_file', 'write_to_csv_file', (['"""data"""', 'filename', 'all_runs'], {}), "('data', filename, all_runs)\n", (312, 340), False, 'from BART import write_to_csv_file\n')] |
import requests
import os
import json
import time
path = '../../temp/2021-03-08 (copy)'
with os.scandir(path) as entries:
print("analyse path: " + path)
try:
os.mkdir(path + "/cars")
except:
pass
for entry in entries:
start = time.time()
if entry.is_file():
if entry.name.endswith('.jpg') or entry.name.endswith('.thumb'):
file = open(entry, 'rb')
files={"image": file}
res=requests.post(url='http://rpi4gb',
files=files)
if res.status_code == 200:
cars=res.json()['cars']
if cars == []:
print("no car in file, remove: " + entry.name)
os.remove(entry)
else:
extra_fn = ''
for car in cars:
extra_fn = extra_fn + "--{}-{}-{}".format(car['make'], car['model'], car['color'])
fn_parts = entry.name.split('.')
newPath=path + '/cars/' + fn_parts[0] + extra_fn + '.' + fn_parts[1]
print("move file to: " + newPath)
os.rename(entry.path, newPath)
else:
print("no picture file, remove: " + entry.name)
os.remove(entry)
end = time.time()
print("[INFO] Object detection took {:.6f} seconds for file {}".format(end - start, entry.name))
| [
"requests.post",
"os.rename",
"os.scandir",
"os.mkdir",
"time.time",
"os.remove"
] | [((100, 116), 'os.scandir', 'os.scandir', (['path'], {}), '(path)\n', (110, 116), False, 'import os\n'), ((186, 210), 'os.mkdir', 'os.mkdir', (["(path + '/cars')"], {}), "(path + '/cars')\n", (194, 210), False, 'import os\n'), ((284, 295), 'time.time', 'time.time', ([], {}), '()\n', (293, 295), False, 'import time\n'), ((1451, 1462), 'time.time', 'time.time', ([], {}), '()\n', (1460, 1462), False, 'import time\n'), ((505, 552), 'requests.post', 'requests.post', ([], {'url': '"""http://rpi4gb"""', 'files': 'files'}), "(url='http://rpi4gb', files=files)\n", (518, 552), False, 'import requests\n'), ((1417, 1433), 'os.remove', 'os.remove', (['entry'], {}), '(entry)\n', (1426, 1433), False, 'import os\n'), ((812, 828), 'os.remove', 'os.remove', (['entry'], {}), '(entry)\n', (821, 828), False, 'import os\n'), ((1285, 1315), 'os.rename', 'os.rename', (['entry.path', 'newPath'], {}), '(entry.path, newPath)\n', (1294, 1315), False, 'import os\n')] |
#!/usr/bin/python3
import pytest
from brownie import network, Contract, Wei, chain
@pytest.fixture(scope="module")
def requireMainnetFork():
assert (network.show_active() == "mainnet-fork" or network.show_active() == "mainnet-fork-alchemy")
@pytest.fixture(scope="module")
def setFeesController(bzx, stakingV1, accounts):
bzx.setFeesController(stakingV1, {"from": bzx.owner()})
assets = [
"0x56d811088235F11C8920698a204A5010a788f4b3", # BZRX
"0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", # WETH
"0x6B175474E89094C44Da98b954EedeAC495271d0F", # DAI
"0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", # USDC
"0xdAC17F958D2ee523a2206206994597C13D831ec7", # USDT
"0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599", # WBTC
"0x7Fc66500c84A76Ad7e9c93437bFc5Ac33E2DDaE9", # AAVE
"0xdd974D5C2e2928deA5F71b9825b8b646686BD200", # KNC
"0x9f8F72aA9304c8B593d555F12eF6589cC3A579A2", # MKR
"0x514910771AF9Ca656af840dff83E8264EcF986CA", # LINK
"0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e", # YFI
]
bzx.withdrawFees(assets, accounts[8], 0, {'from': stakingV1})
@pytest.fixture(scope="module")
def LPT(accounts, TestToken):
LPT = loadContractFromAbi(
"0xe26A220a341EAca116bDa64cF9D5638A935ae629", "LPT", TestToken.abi)
return LPT
@pytest.fixture(scope="module")
def vBZRX(accounts, BZRXVestingToken):
vBZRX = loadContractFromAbi(
"0xb72b31907c1c95f3650b64b2469e08edacee5e8f", "vBZRX", BZRXVestingToken.abi)
vBZRX.transfer(accounts[0], 1000*10**18, {'from': vBZRX.address})
return vBZRX
@pytest.fixture(scope="module")
def iUSDC(accounts, LoanTokenLogicStandard):
iUSDC = loadContractFromAbi(
"0x32E4c68B3A4a813b710595AebA7f6B7604Ab9c15", "iUSDC", LoanTokenLogicStandard.abi)
return iUSDC
@pytest.fixture(scope="module")
def WETH(accounts, TestWeth):
WETH = loadContractFromAbi(
"0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", "WETH", TestWeth.abi)
return WETH
@pytest.fixture(scope="module")
def USDC(accounts, TestToken):
USDC = loadContractFromAbi(
"0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", "USDC", TestToken.abi)
return USDC
@pytest.fixture(scope="module")
def BZRX(accounts, TestToken):
BZRX = loadContractFromAbi(
"0x56d811088235F11C8920698a204A5010a788f4b3", "BZRX", TestToken.abi)
BZRX.transfer(accounts[0], 1000*10**18, {'from': BZRX.address})
return BZRX
@pytest.fixture(scope="module")
def iBZRX(accounts, BZRX, LoanTokenLogicStandard):
iBZRX = loadContractFromAbi(
"0x18240BD9C07fA6156Ce3F3f61921cC82b2619157", "iBZRX", LoanTokenLogicStandard.abi)
BZRX.approve(iBZRX, 10*10**50, {'from': accounts[0]})
iBZRX.mint(accounts[0], 100*10**18, {'from': accounts[0]})
return iBZRX
# def loadContractFromEtherscan(address, alias):
# try:
# return Contract(alias)
# except ValueError:
# contract = Contract.from_explorer(address)
# contract.set_alias(alias)
# return contract
def loadContractFromAbi(address, alias, abi):
try:
return Contract(alias)
except ValueError:
contract = Contract.from_abi(alias, address=address, abi=abi)
return contract
# TODO add LPToken
def testStake_UnStake(requireMainnetFork, stakingV1, bzx, setFeesController, BZRX, vBZRX, iBZRX, accounts):
# tx =
# tx.info()
balanceOfBZRX = BZRX.balanceOf(accounts[0])
balanceOfvBZRX = vBZRX.balanceOf(accounts[0])
balanceOfiBZRX = iBZRX.balanceOf(accounts[0])
BZRX.approve(stakingV1, balanceOfBZRX, {'from': accounts[0]})
vBZRX.approve(stakingV1, balanceOfvBZRX, {'from': accounts[0]})
iBZRX.approve(stakingV1, balanceOfiBZRX, {'from': accounts[0]})
tokens = [BZRX, vBZRX, iBZRX]
amounts = [balanceOfBZRX, balanceOfvBZRX, balanceOfiBZRX]
tx = stakingV1.stake(
tokens, amounts)
# tx.info()
# print("tx", tx.events)
balanceOfBZRXAfter = BZRX.balanceOf(accounts[0])
balanceOfvBZRXAfter = vBZRX.balanceOf(accounts[0])
balanceOfiBZRXAfter = iBZRX.balanceOf(accounts[0])
# due to vesting starated we have small balance vested
assert(balanceOfBZRXAfter > 0 and balanceOfBZRXAfter/10**18 < 1)
assert(balanceOfvBZRXAfter == 0)
assert(balanceOfiBZRXAfter == 0)
stakedEvents = tx.events['Stake']
for index, stakedEvent in enumerate(stakedEvents, 0):
assert(stakedEvent['user'] == accounts[0])
assert(stakedEvent['token'] == tokens[index])
assert(stakedEvent['delegate'] == accounts[0])
assert(stakedEvent['amount'] == amounts[index])
transferEvents = tx.events['Transfer']
i = 0 # due to extra event transfer does not allighn
for index, transferEvent in enumerate(transferEvents, 0):
# most probably a bug in brownie not doing orderdic properly for events
if (transferEvent['from'] == accounts[i]):
assert(transferEvent['from'] == accounts[i])
assert(transferEvent['to'] == stakingV1)
assert(transferEvent['value'] == amounts[i])
i += 1
tx = stakingV1.unstake(tokens, amounts)
# tx.info()
unStakedEvents = tx.events['Unstake']
for index, unStakedEvent in enumerate(unStakedEvents, 0):
assert(unStakedEvent['user'] == accounts[0])
assert(unStakedEvent['token'] == tokens[index])
assert(unStakedEvent['delegate'] == accounts[0])
assert(unStakedEvent['amount'] == amounts[index])
transferEvents = tx.events['Transfer']
i = 0 # due to extra event transfer does not allighn
for index, transferEvent in enumerate(transferEvents, 0):
# most probably a bug in brownie not doing orderdic properly for events
if (transferEvent['from'] == accounts[i]):
assert(transferEvent['from'] == stakingV1)
assert(transferEvent['to'] == accounts[0])
assert(transferEvent['value'] == amounts[index])
i += 1
assert True
# delegate was removed for now
# def testStake_UnStake_WithDelegate(requireMainnetFork, stakingV1, bzx, setFeesController, BZRX, vBZRX, iBZRX, accounts):
# # tx =
# # tx.info()
# balanceOfBZRX = BZRX.balanceOf(accounts[0])
# balanceOfvBZRX = vBZRX.balanceOf(accounts[0])
# balanceOfiBZRX = iBZRX.balanceOf(accounts[0])
# BZRX.approve(stakingV1, balanceOfBZRX, {'from': accounts[0]})
# vBZRX.approve(stakingV1, balanceOfvBZRX, {'from': accounts[0]})
# iBZRX.approve(stakingV1, balanceOfiBZRX, {'from': accounts[0]})
# tokens = [BZRX, vBZRX, iBZRX]
# amounts = [balanceOfBZRX, balanceOfvBZRX, balanceOfiBZRX]
# tx = stakingV1.stake(tokens, amounts)
# tx = stakingV1.changeDelegate(accounts[1])
# delegateChanged = tx.events['ChangeDelegate']
# assert(delegateChanged['user'] == accounts[0])
# assert(delegateChanged['oldDelegate'] ==
# accounts[0])
# assert(delegateChanged['newDelegate'] == accounts[1])
# tx = stakingV1.unstake(tokens, amounts)
# # tx.info()
# unStakedEvents = tx.events['Unstake']
# for index, unStakedEvent in enumerate(unStakedEvents, 0):
# assert(unStakedEvent['user'] == accounts[0])
# assert(unStakedEvent['token'] == tokens[index])
# assert(unStakedEvent['delegate'] == accounts[1])
# assert(unStakedEvent['amount'] == amounts[index])
# transferEvents = tx.events['Transfer']
# for index, transferEvent in enumerate(transferEvents, 0):
# assert(transferEvent['from'] == stakingV1)
# assert(transferEvent['to'] == accounts[0])
# assert(transferEvent['value'] == amounts[index])
# balances = stakingV1.balanceOfByAssets.call(accounts[0])
# assert(balances[0] == 0)
# assert(balances[1] == 0)
# assert(balances[2] == 0)
# assert(balances[3] == 0)
# assert True
def testStake_SweeepFees(requireMainnetFork, stakingV1, bzx, setFeesController, BZRX, vBZRX, iBZRX, accounts, iUSDC, USDC):
tx = stakingV1.sweepFees()
withdrawFeesEvent = tx.events['WithdrawFees']
assert(withdrawFeesEvent[0]['sender'] == accounts[0])
convertFeesEvent = tx.events['ConvertFees']
assert(convertFeesEvent[0]['sender'] == accounts[0])
distributeFeesEvent = tx.events['DistributeFees']
assert(distributeFeesEvent[0]['sender'] == accounts[0])
assert True
def testStake_BZRXProfit(requireMainnetFork, stakingV1, bzx, setFeesController, BZRX, vBZRX, iBZRX, accounts, iUSDC, USDC, WETH):
earnedAmounts = stakingV1.earned(accounts[0])
assert(earnedAmounts == (0, 0, 0, 0))
print("earnedAmounts", earnedAmounts)
balanceOfBZRX = BZRX.balanceOf(accounts[0])
BZRX.approve(stakingV1, balanceOfBZRX, {'from': accounts[0]})
tokens = [BZRX, vBZRX, iBZRX]
amounts = [100*10**18, 0, 0]
tx = stakingV1.stake(tokens, amounts)
# iUSDC.borrow(0, 100*10**18, 1*10**18, "0x0000000000000000000000000000000000000000", accounts[0], accounts[0], {'value': Wei("1 ether")})
borrowAmount = 100*10**6
borrowTime = 7884000
collateralAmount = 1*10**18
collateralAddress = "0x0000000000000000000000000000000000000000"
txBorrow = iUSDC.borrow("", borrowAmount, borrowTime, collateralAmount, collateralAddress,
accounts[0], accounts[0], b"", {'from': accounts[0], 'value': Wei(collateralAmount)})
payBorrowingFeeEvent = filterEvents(
'0xfb6c38ae4fdd498b3a5003f02ca4ca5340dfedb36b1b100c679eb60633b2c0a7', txBorrow.events)
payBorrowingFeeAmount = int(str(payBorrowingFeeEvent['data']), 0)
payLendingFeeEvent = filterEvents(
'0x40a75ae5f7a5336e75f7c7977e12c4b46a9ac0f30de01a2d5b6c1a4f4af63587', txBorrow.events)
payLendingFeeAmount = int(str(payLendingFeeEvent['data']), 0)
txSweep = stakingV1.sweepFees()
borrowFee = txSweep.events['WithdrawBorrowingFees'][0]
assert(borrowFee['sender'] == stakingV1)
assert(borrowFee['token'] == WETH)
assert(borrowFee['sender'] == stakingV1)
assert(borrowFee['amount'] == payBorrowingFeeAmount)
lendingFee = txSweep.events['WithdrawLendingFees'][0]
assert(lendingFee['sender'] == stakingV1)
assert(lendingFee['token'] == USDC)
assert(lendingFee['sender'] == stakingV1)
assert(lendingFee['amount'] == payLendingFeeAmount)
assert(txSweep.events['AddRewards'][0]['sender'] == accounts[0])
bzrxAmount = txSweep.events['AddRewards'][0]['bzrxAmount']
stableCoinAmount = txSweep.events['AddRewards'][0]['stableCoinAmount']
assert(txSweep.events['DistributeFees'][0]['sender'] == accounts[0])
bzrxRewards = txSweep.events['DistributeFees'][0]['bzrxRewards']
stableCoinRewards = txSweep.events['DistributeFees'][0]['stableCoinRewards']
assert(bzrxAmount == bzrxRewards)
assert(stableCoinAmount == stableCoinRewards)
earned = stakingV1.earned(accounts[0])
# we have roundings for last 1 digit
print("roundings bzrx", str(bzrxRewards), str(earned[0]))
assert(bzrxRewards - earned[0] <= 1)
# we have roundings for last 1 digit
print("roundings stableCoin", str(stableCoinAmount), str(earned[1]))
assert(stableCoinAmount - earned[1] <= 1)
#stakingV1.claim(False, {'from': accounts[0]})
#earned = stakingV1.earned(accounts[0])
# second user staking. he should get zero rewards if he just staked
earnedAmounts = stakingV1.earned(accounts[1])
assert(earnedAmounts == (0, 0, 0, 0))
BZRX.transfer(accounts[1], 1000*10**18, {'from': BZRX.address})
balanceOfBZRX = BZRX.balanceOf(accounts[1])
BZRX.approve(stakingV1, balanceOfBZRX, {'from': accounts[1]})
tokens = [BZRX, vBZRX, iBZRX]
amounts2 = [100*10**18, 0, 0]
tx = stakingV1.stake(
tokens, amounts2, {'from': accounts[1]})
earnedAmounts = stakingV1.earned(accounts[1])
print(str(earnedAmounts))
assert(earnedAmounts == (0, 0, 0, 0))
txBorrow = iUSDC.borrow("", borrowAmount, borrowTime, collateralAmount, collateralAddress,
accounts[0], accounts[0], b"", {'from': accounts[0], 'value': Wei(collateralAmount)})
txSweepSecondAcc = stakingV1.sweepFees()
print(str(amounts), str(amounts2))
assert(amounts[0] == amounts2[0])
assert(stakingV1.balanceOfStored(
accounts[0]) == stakingV1.balanceOfStored(accounts[1]))
'''
earnedAfter = stakingV1.earned(accounts[0])
earned1After = stakingV1.earned(accounts[1])
print("account[0] before", str(earned[0]))
print("account[0] after", str(earnedAfter[0] - earned[0]))
print("account[1] after", str(earned1After[0]))
print("diff", str(earned1After[0] - earnedAfter[0] + earned[0]))
'''
assert True
def filterEvents(topic, events):
for event in events:
for key in event.keys():
if key == 'topic1':
if event[key] == topic:
payBorrowingFeeEvent = event
break
return payBorrowingFeeEvent
def testStake_VestingFees(requireMainnetFork, stakingV1, bzx, setFeesController, BZRX, vBZRX, iBZRX, accounts, iUSDC, USDC, WETH):
balanceOfvBZRX = vBZRX.balanceOf(accounts[0])
vBZRX.approve(stakingV1, balanceOfvBZRX, {'from': accounts[0]})
stakingV1.stake([vBZRX], [balanceOfvBZRX])
# borrowing to make fees
borrowAmount = 100*10**6
borrowTime = 7884000
collateralAmount = 1*10**18
collateralAddress = "0x0000000000000000000000000000000000000000"
txBorrow = iUSDC.borrow("", borrowAmount, borrowTime, collateralAmount, collateralAddress,
accounts[0], accounts[0], b"", {'from': accounts[0], 'value': Wei(collateralAmount)})
sweepTx = stakingV1.sweepFees()
earningsDuringVesting = stakingV1.earned(accounts[0])
# vesting already started
assert(earningsDuringVesting[0] >
0 and earningsDuringVesting[0]/10**18 < 1)
assert(earningsDuringVesting[1] > 0)
assert(earningsDuringVesting[2] > 0)
assert(earningsDuringVesting[3] > 0)
totalVestingFeesBzrx = earningsDuringVesting[2]
totalVestingFees3Poll = earningsDuringVesting[3]
# moving time after vesting end
chain.sleep(vBZRX.vestingEndTimestamp() - chain.time() + 100)
chain.mine()
earnings = stakingV1.earned(accounts[0])
assert(earnings[0] > 0)
assert(earnings[1] > 0)
assert(earnings[2] == 0)
assert(earnings[3] == 0)
assert(earnings[0] >= totalVestingFeesBzrx)
assert(earnings[1] >= totalVestingFees3Poll)
# assert False
def testStake_vestingClaimBZRX(requireMainnetFork, stakingV1, bzx, setFeesController, BZRX, vBZRX, iBZRX, accounts, iUSDC, USDC, WETH):
vBZRX.transfer(accounts[1], 1000*10**18, {'from': vBZRX.address})
balanceOfvBZRX = vBZRX.balanceOf(accounts[1])
vBZRX.approve(stakingV1, balanceOfvBZRX, {'from': accounts[1]})
stakingV1.stake([vBZRX], [balanceOfvBZRX], {'from': accounts[1]})
# moving time to somewhere 1000 sec after vesting start
chain.sleep(vBZRX.vestingCliffTimestamp() - chain.time() + 1000)
chain.mine()
# BZRX.balanceOf+ vBZRX.balanceOf_bzrx_remaining should be equal to 1000
stakingV1.exit({'from': accounts[1]})
assert(BZRX.balanceOf(accounts[1]) > 0)
assert True
def testStake_vBZRXVotingRigthsShouldDiminishOverTime(requireMainnetFork, stakingV1, bzx, setFeesController, BZRX, vBZRX, iBZRX, LPT, accounts, iUSDC, USDC, WETH):
vBZRX.transfer(accounts[1], 100e18, {'from': vBZRX})
balanceOfvBZRX = vBZRX.balanceOf(accounts[1])
vBZRX.approve(stakingV1, balanceOfvBZRX, {'from': accounts[1]})
tokens = [vBZRX]
amounts = [balanceOfvBZRX]
tx = stakingV1.stake(tokens, amounts, {'from': accounts[1]})
votingPower = stakingV1.delegateBalanceOf(accounts[1])
assert(votingPower <= balanceOfvBZRX/2)
# moving time to somewhere 1000 sec after vesting start
chain.sleep(vBZRX.vestingCliffTimestamp() - chain.time() + 1000)
chain.mine()
votingPower = stakingV1.delegateBalanceOf(accounts[1])
assert(votingPower <= balanceOfvBZRX/2)
# moving time after vesting end
chain.sleep(vBZRX.vestingEndTimestamp() - chain.time() + 100)
chain.mine()
votingPower = stakingV1.delegateBalanceOf(accounts[1])
assert(votingPower <= balanceOfvBZRX)
assert True
def testStake_vBZRXVotingRigthsShouldDiminishOverTime2(requireMainnetFork, stakingV1, bzx, setFeesController, BZRX, vBZRX, iBZRX, LPT, accounts, iUSDC, USDC, WETH):
vBZRX.transfer(accounts[1], 100e18, {'from': vBZRX})
balanceOfvBZRX = vBZRX.balanceOf(accounts[1])
vBZRX.approve(stakingV1, balanceOfvBZRX, {'from': accounts[1]})
tokens = [vBZRX]
amounts = [balanceOfvBZRX]
tx = stakingV1.stake(tokens, amounts, {'from': accounts[1]})
votingPower = stakingV1.delegateBalanceOf(accounts[1])
assert(votingPower < balanceOfvBZRX/2)
# moving time to somewhere 1000 sec after vesting start
chain.sleep(vBZRX.vestingCliffTimestamp() - chain.time() + 1000)
chain.mine()
votingPower = stakingV1.delegateBalanceOf(accounts[1])
assert(votingPower < balanceOfvBZRX/2)
# moving time after vesting end
chain.sleep(vBZRX.vestingEndTimestamp() - chain.time() + 100)
chain.mine()
votingPower = stakingV1.delegateBalanceOf(accounts[1])
assert(votingPower < balanceOfvBZRX)
assert True
| [
"brownie.Wei",
"brownie.Contract.from_abi",
"brownie.chain.time",
"brownie.Contract",
"brownie.network.show_active",
"brownie.chain.mine",
"pytest.fixture"
] | [((87, 117), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (101, 117), False, 'import pytest\n'), ((251, 281), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (265, 281), False, 'import pytest\n'), ((1159, 1189), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1173, 1189), False, 'import pytest\n'), ((1345, 1375), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1359, 1375), False, 'import pytest\n'), ((1623, 1653), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1637, 1653), False, 'import pytest\n'), ((1843, 1873), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1857, 1873), False, 'import pytest\n'), ((2031, 2061), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2045, 2061), False, 'import pytest\n'), ((2221, 2251), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2235, 2251), False, 'import pytest\n'), ((2479, 2509), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2493, 2509), False, 'import pytest\n'), ((14221, 14233), 'brownie.chain.mine', 'chain.mine', ([], {}), '()\n', (14231, 14233), False, 'from brownie import network, Contract, Wei, chain\n'), ((15041, 15053), 'brownie.chain.mine', 'chain.mine', ([], {}), '()\n', (15051, 15053), False, 'from brownie import network, Contract, Wei, chain\n'), ((15937, 15949), 'brownie.chain.mine', 'chain.mine', ([], {}), '()\n', (15947, 15949), False, 'from brownie import network, Contract, Wei, chain\n'), ((16161, 16173), 'brownie.chain.mine', 'chain.mine', ([], {}), '()\n', (16171, 16173), False, 'from brownie import network, Contract, Wei, chain\n'), ((16991, 17003), 'brownie.chain.mine', 'chain.mine', ([], {}), '()\n', (17001, 17003), False, 'from brownie import network, Contract, Wei, chain\n'), ((17214, 17226), 'brownie.chain.mine', 'chain.mine', ([], {}), '()\n', (17224, 17226), False, 'from brownie import network, Contract, Wei, chain\n'), ((3130, 3145), 'brownie.Contract', 'Contract', (['alias'], {}), '(alias)\n', (3138, 3145), False, 'from brownie import network, Contract, Wei, chain\n'), ((156, 177), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (175, 177), False, 'from brownie import network, Contract, Wei, chain\n'), ((199, 220), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (218, 220), False, 'from brownie import network, Contract, Wei, chain\n'), ((3188, 3238), 'brownie.Contract.from_abi', 'Contract.from_abi', (['alias'], {'address': 'address', 'abi': 'abi'}), '(alias, address=address, abi=abi)\n', (3205, 3238), False, 'from brownie import network, Contract, Wei, chain\n'), ((9363, 9384), 'brownie.Wei', 'Wei', (['collateralAmount'], {}), '(collateralAmount)\n', (9366, 9384), False, 'from brownie import network, Contract, Wei, chain\n'), ((12092, 12113), 'brownie.Wei', 'Wei', (['collateralAmount'], {}), '(collateralAmount)\n', (12095, 12113), False, 'from brownie import network, Contract, Wei, chain\n'), ((13644, 13665), 'brownie.Wei', 'Wei', (['collateralAmount'], {}), '(collateralAmount)\n', (13647, 13665), False, 'from brownie import network, Contract, Wei, chain\n'), ((14197, 14209), 'brownie.chain.time', 'chain.time', ([], {}), '()\n', (14207, 14209), False, 'from brownie import network, Contract, Wei, chain\n'), ((15016, 15028), 'brownie.chain.time', 'chain.time', ([], {}), '()\n', (15026, 15028), False, 'from brownie import network, Contract, Wei, chain\n'), ((15912, 15924), 'brownie.chain.time', 'chain.time', ([], {}), '()\n', (15922, 15924), False, 'from brownie import network, Contract, Wei, chain\n'), ((16137, 16149), 'brownie.chain.time', 'chain.time', ([], {}), '()\n', (16147, 16149), False, 'from brownie import network, Contract, Wei, chain\n'), ((16966, 16978), 'brownie.chain.time', 'chain.time', ([], {}), '()\n', (16976, 16978), False, 'from brownie import network, Contract, Wei, chain\n'), ((17190, 17202), 'brownie.chain.time', 'chain.time', ([], {}), '()\n', (17200, 17202), False, 'from brownie import network, Contract, Wei, chain\n')] |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Retrieve a list of device references that have specific software installed"
class Input:
SOFTWARE = "software"
class Output:
MACHINES = "machines"
class FindMachinesWithInstalledSoftwareInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"software": {
"type": "string",
"title": "Software",
"description": "Name of the software to be searched",
"order": 1
}
},
"required": [
"software"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class FindMachinesWithInstalledSoftwareOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"machines": {
"type": "array",
"title": "Machines",
"description": "List of machines with provided software",
"items": {
"$ref": "#/definitions/machine_software"
},
"order": 1
}
},
"required": [
"machines"
],
"definitions": {
"machine_software": {
"type": "object",
"title": "machine_software",
"properties": {
"computerDnsName": {
"type": "string",
"title": "Computer DNS Name",
"description": "Computer DNS name",
"order": 2
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 1
},
"osPlatform": {
"type": "string",
"title": "OS Platform",
"description": "OS platform",
"order": 3
},
"rbacGroupId": {
"type": "number",
"title": "RBAC Group ID",
"description": "RBAC group ID",
"order": 5
},
"rbacGroupName": {
"type": "string",
"title": "RBAC Group Name",
"description": "RBAC group name",
"order": 4
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| [
"json.loads"
] | [((392, 686), 'json.loads', 'json.loads', (['"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "software": {\n "type": "string",\n "title": "Software",\n "description": "Name of the software to be searched",\n "order": 1\n }\n },\n "required": [\n "software"\n ]\n}\n """'], {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "software": {\n "type": "string",\n "title": "Software",\n "description": "Name of the software to be searched",\n "order": 1\n }\n },\n "required": [\n "software"\n ]\n}\n """\n )\n', (402, 686), False, 'import json\n'), ((860, 2173), 'json.loads', 'json.loads', (['"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "machines": {\n "type": "array",\n "title": "Machines",\n "description": "List of machines with provided software",\n "items": {\n "$ref": "#/definitions/machine_software"\n },\n "order": 1\n }\n },\n "required": [\n "machines"\n ],\n "definitions": {\n "machine_software": {\n "type": "object",\n "title": "machine_software",\n "properties": {\n "computerDnsName": {\n "type": "string",\n "title": "Computer DNS Name",\n "description": "Computer DNS name",\n "order": 2\n },\n "id": {\n "type": "string",\n "title": "ID",\n "description": "ID",\n "order": 1\n },\n "osPlatform": {\n "type": "string",\n "title": "OS Platform",\n "description": "OS platform",\n "order": 3\n },\n "rbacGroupId": {\n "type": "number",\n "title": "RBAC Group ID",\n "description": "RBAC group ID",\n "order": 5\n },\n "rbacGroupName": {\n "type": "string",\n "title": "RBAC Group Name",\n "description": "RBAC group name",\n "order": 4\n }\n }\n }\n }\n}\n """'], {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "machines": {\n "type": "array",\n "title": "Machines",\n "description": "List of machines with provided software",\n "items": {\n "$ref": "#/definitions/machine_software"\n },\n "order": 1\n }\n },\n "required": [\n "machines"\n ],\n "definitions": {\n "machine_software": {\n "type": "object",\n "title": "machine_software",\n "properties": {\n "computerDnsName": {\n "type": "string",\n "title": "Computer DNS Name",\n "description": "Computer DNS name",\n "order": 2\n },\n "id": {\n "type": "string",\n "title": "ID",\n "description": "ID",\n "order": 1\n },\n "osPlatform": {\n "type": "string",\n "title": "OS Platform",\n "description": "OS platform",\n "order": 3\n },\n "rbacGroupId": {\n "type": "number",\n "title": "RBAC Group ID",\n "description": "RBAC group ID",\n "order": 5\n },\n "rbacGroupName": {\n "type": "string",\n "title": "RBAC Group Name",\n "description": "RBAC group name",\n "order": 4\n }\n }\n }\n }\n}\n """\n )\n', (870, 2173), False, 'import json\n')] |
import random
from locust import HttpLocust, TaskSet, task
from data import search_options, market_slugs
class UserBehavior(TaskSet):
@task(1)
def listing_page(self):
base_url = '/selling-online-overseas/markets/results'
category_count = random.randint(0, 5)
categories = []
for _ in xrange(category_count):
categories.append(random.choice(search_options['categories']))
country_count = random.randint(0, 5)
countries = []
for _ in xrange(country_count):
countries.append(random.choice(search_options['countries']))
category_filter = 'product_category={0}'.format('&product_category='.join(categories))
country_filter = 'operating_countries={0}'.format('&operating_countries='.join(countries))
url = "{0}?{1}&{2}".format(base_url, category_filter, country_filter)
self.client.get(url, name=base_url)
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 9000
| [
"random.choice",
"locust.task",
"random.randint"
] | [((144, 151), 'locust.task', 'task', (['(1)'], {}), '(1)\n', (148, 151), False, 'from locust import HttpLocust, TaskSet, task\n'), ((268, 288), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (282, 288), False, 'import random\n'), ((454, 474), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (468, 474), False, 'import random\n'), ((384, 427), 'random.choice', 'random.choice', (["search_options['categories']"], {}), "(search_options['categories'])\n", (397, 427), False, 'import random\n'), ((567, 609), 'random.choice', 'random.choice', (["search_options['countries']"], {}), "(search_options['countries'])\n", (580, 609), False, 'import random\n')] |
#! /usr/bin/env python3
from .localtyping import *
from collections import Counter
from .remapper import Remapper
def createGramMap(
sRawStrSets: Iterable[RawStringSet],
) -> GramMap:
gramFreqs = Counter()
for sRawStrSet in sRawStrSets:
gramFreqs.update(sRawStrSet)
gramMapper = Remapper()
return {
gram: gramMapper.next()
for gram, _ in sorted(
gramFreqs.items(),
key=lambda x: (x[1], x[0])
)
}
def updateGramMap(
gramMap: GramMap,
rRawStrSet: RawStringSet
) -> None:
gramMapper = Remapper(start=-1, step=-1)
gramMap.update(
(gram, gramMapper.next())
for gram in rRawStrSet
if gram not in gramMap
)
def applyGramMap(
gramMap: GramMap,
rRawStrSet: RawStringSet
) -> StringSet:
return sorted(gramMap[gram] for gram in rRawStrSet)
| [
"collections.Counter"
] | [((216, 225), 'collections.Counter', 'Counter', ([], {}), '()\n', (223, 225), False, 'from collections import Counter\n')] |
# %%
# live demo
import mediapipe as mp
from PIL import Image
import numpy as np
import utilities
from pathlib import Path
image = Image.open(Path(__file__).parent / "face_orig.jpeg")
image = np.array(image)
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
# For webcam input:
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
with mp_face_mesh.FaceMesh(
min_detection_confidence=0.5, min_tracking_confidence=0.5
) as face_mesh:
results = face_mesh.process(image)
# Draw the face mesh annotations on the image.
image.flags.writeable = True
if results.multi_face_landmarks:
landmarks = utilities.get_avg_landmarks_from_results(
results, utilities.UNIQUE_EYE_COORDINATES
)
image = utilities.add_landmarks_to_image(image, landmarks)
distance = utilities.get_eye_distance(landmarks)
image = utilities.add_distance_to_image(image, distance)
image = Image.fromarray(image)
image.save(Path(__file__).parent / "face_proc.jpeg")
| [
"PIL.Image.fromarray",
"utilities.get_eye_distance",
"utilities.add_landmarks_to_image",
"pathlib.Path",
"numpy.array",
"utilities.add_distance_to_image",
"utilities.get_avg_landmarks_from_results"
] | [((193, 208), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (201, 208), True, 'import numpy as np\n'), ((1010, 1032), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1025, 1032), False, 'from PIL import Image\n'), ((685, 773), 'utilities.get_avg_landmarks_from_results', 'utilities.get_avg_landmarks_from_results', (['results', 'utilities.UNIQUE_EYE_COORDINATES'], {}), '(results, utilities.\n UNIQUE_EYE_COORDINATES)\n', (725, 773), False, 'import utilities\n'), ((819, 869), 'utilities.add_landmarks_to_image', 'utilities.add_landmarks_to_image', (['image', 'landmarks'], {}), '(image, landmarks)\n', (851, 869), False, 'import utilities\n'), ((893, 930), 'utilities.get_eye_distance', 'utilities.get_eye_distance', (['landmarks'], {}), '(landmarks)\n', (919, 930), False, 'import utilities\n'), ((951, 999), 'utilities.add_distance_to_image', 'utilities.add_distance_to_image', (['image', 'distance'], {}), '(image, distance)\n', (982, 999), False, 'import utilities\n'), ((143, 157), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (147, 157), False, 'from pathlib import Path\n'), ((1044, 1058), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1048, 1058), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# Basic Connection Example - connect1.py
import socket
import time
loops = 5
total_time_elapsed = 0
SERVER = "www.stackoverflow.com"
for n in range(1, loops + 1):
# print ("Creating socket...",)
start = time.perf_counter()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# print ("done.")
# print ("Connecting to remote host...",)
try:
s.connect((SERVER, 80))
except Exception as e:
print("Unable to connect! Error: {} {}".format(type(Exception), e))
end = time.perf_counter()
# print ("done.")
elapsed = end - start
total_time_elapsed += elapsed
print("ATTEMPT {}--Time taken to connect to {}: {}"
.format(n, SERVER, elapsed))
avg = total_time_elapsed/loops
print("\nAverage time taken: {}".format(avg)) | [
"time.perf_counter",
"socket.socket"
] | [((237, 256), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (254, 256), False, 'import time\n'), ((265, 314), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (278, 314), False, 'import socket\n'), ((538, 557), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (555, 557), False, 'import time\n')] |
"""
Multivariate polynomial extenders.
"""
import nzmath.rational as rational
import nzmath.ring as ring
import nzmath.poly.termorder as termorder
import nzmath.poly.ring as poly_ring
import nzmath.poly.uniutil as uniutil
import nzmath.poly.multivar as multivar
import nzmath.poly.ratfunc as ratfunc
_MIXIN_MSG = "%s is mix-in"
class OrderProvider (object):
"""
OrderProvider provides order and related operations.
"""
def __init__(self, order):
"""
Do not instantiate OrderProvider.
This initializer should be called from descendant:
OrderProvider.__init__(self, order)
"""
if type(self) is OrderProvider:
raise NotImplementedError(_MIXIN_MSG % self.__class__.__name__)
self.order = order
class NestProvider (object):
"""
Provide nest/unnest pair to convert a multivar polynomial to a
univar polynomial of polynomial coefficient and opposite
direction.
"""
def leading_variable(self):
"""
Return the position of the leading variable (the leading term
among all total degree one terms).
The leading term varies with term orders, so does the result.
The term order can be specified via the attribute 'order'.
"""
if hasattr(self, 'order'):
order = self.order
pivot, pvar = (1,) + (0,)*(self.number_of_variables - 1), 0
for var in range(1, self.number_of_variables):
vindex = (0,)*var + (1,) + (0,)*(self.number_of_variables - var - 1)
if order.cmp(pivot, vindex) < 0:
pivot, pvar = vindex, var
else:
pvar = 0
return pvar
def nest(self, outer, coeffring):
"""
Nest the polynomial by extracting outer variable at the given
position.
"""
combined = {}
if self.number_of_variables == 2:
itercoeff = lambda coeff: [(i[0], c) for (i, c) in coeff]
poly = uniutil.polynomial
polyring = poly_ring.PolynomialRing.getInstance(coeffring)
elif self.number_of_variables >= 3:
itercoeff = lambda coeff: coeff
poly = self.__class__
polyring = poly_ring.PolynomialRing.getInstance(coeffring, self.number_of_variables - 1)
else:
raise TypeError("number of variable is not multiple")
for index, coeff in self.combine_similar_terms(outer):
combined[index] = poly(itercoeff(coeff), coeffring=coeffring)
return uniutil.polynomial(combined, coeffring=polyring)
def unnest(self, q, outer, coeffring):
"""
Unnest the nested polynomial q by inserting outer variable at
the given position.
"""
q_coeff = {}
for d, cpoly in q:
for inner_d, inner_c in cpoly:
if isinstance(inner_d, int):
inner_d = [inner_d]
else:
inner_d = list(inner_d)
inner_d.insert(outer, d)
q_coeff[tuple(inner_d)] = inner_c
return self.__class__(q_coeff, coeffring=coeffring)
class RingElementProvider (ring.CommutativeRingElement):
"""
Provides interfaces for ring.CommutativeRingElement.
"""
def __init__(self):
"""
Do not instantiate RingElementProvider.
This initializer should be called from descendant:
RingElementProvider.__init__(self)
"""
if type(self) is RingElementProvider:
raise NotImplementedError(_MIXIN_MSG % self.__class__.__name__)
ring.CommutativeRingElement.__init__(self)
self._coefficient_ring = None
self._ring = None
def getRing(self):
"""
Return an object of a subclass of Ring, to which the element
belongs.
"""
if self._coefficient_ring is None or self._ring is None:
myring = None
for c in self.itercoefficients():
cring = ring.getRing(c)
if not myring or myring != cring and myring.issubring(cring):
myring = cring
elif not cring.issubring(myring):
myring = myring.getCommonSuperring(cring)
if not myring:
myring = rational.theIntegerRing
self.set_coefficient_ring(myring)
return self._ring
def getCoefficientRing(self):
"""
Return the coefficient ring.
"""
return self._coefficient_ring
def set_coefficient_ring(self, coeffring):
if self._coefficient_ring is None:
self._coefficient_ring = coeffring
self._ring = poly_ring.PolynomialRing.getInstance(self._coefficient_ring, self.number_of_variables)
class PseudoDivisionProvider (object):
"""
PseudoDivisionProvider provides pseudo divisions for multivariate
polynomials. It is assumed that the coefficient ring of the
polynomials is a domain.
The class should be used with NestProvider, RingElementProvider.
"""
def pseudo_divmod(self, other):
"""
self.pseudo_divmod(other) -> (Q, R)
Q, R are polynomials such that
d**(deg(self) - deg(other) + 1) * self == other * Q + R,
w.r.t. a fixed variable, where d is the leading coefficient of
other.
The leading coefficient varies with term orders, so does the
result. The term order can be specified via the attribute
'order'.
"""
var = self.leading_variable()
coeffring = self.getCoefficientRing()
s = self.nest(var, coeffring)
o = other.nest(var, coeffring)
q, r = s.pseudo_divmod(o)
qpoly = self.unnest(q, var, coeffring)
rpoly = self.unnest(r, var, coeffring)
return qpoly, rpoly
def pseudo_floordiv(self, other):
"""
self.pseudo_floordiv(other) -> Q
Q is a polynomial such that
d**(deg(self) - deg(other) + 1) * self == other * Q + R,
where d is the leading coefficient of other and R is a
polynomial.
The leading coefficient varies with term orders, so does the
result. The term order can be specified via the attribute
'order'.
"""
var = self.leading_variable()
coeffring = self.getCoefficientRing()
s = self.nest(var, coeffring)
o = other.nest(var, coeffring)
q = s.pseudo_floordiv(o)
return self.unnest(q, var, coeffring)
def pseudo_mod(self, other):
"""
self.pseudo_mod(other) -> R
R is a polynomial such that
d**(deg(self) - deg(other) + 1) * self == other * Q + R,
where d is the leading coefficient of other and Q a
polynomial.
The leading coefficient varies with term orders, so does the
result. The term order can be specified via the attribute
'order'.
"""
var = self.leading_variable()
coeffring = self.getCoefficientRing()
s = self.nest(var, coeffring)
o = other.nest(var, coeffring)
r = s.pseudo_mod(o)
return self.unnest(r, var, coeffring)
def __truediv__(self, other):
"""
self / other
The result is a rational function.
"""
return ratfunc.RationalFunction(self, other)
def exact_division(self, other):
"""
Return quotient of exact division.
"""
coeffring = self.getCoefficientRing()
if other in coeffring:
new_coeffs = []
keep_ring = True
for i, c in self:
ratio = c / other
if keep_ring and ratio not in coeffring:
keep_ring = False
new_coeffring = ratio.getRing()
new_coeffs.append((i, ratio))
if keep_ring:
return self.__class__(new_coeffs, coeffring=coeffring)
else:
return self.__class__(new_coeffs, coeffring=new_coeffring)
var = self.leading_variable()
coeffring = self.getCoefficientRing()
s = self.nest(var, coeffring)
o = other.nest(var, coeffring)
q = s.exact_division(o)
return self.unnest(q, var, coeffring)
class GcdProvider (object):
"""
Provides greatest common divisor for multivariate polynomial.
The class should be used with NestProvider, RingElementProvider.
"""
def gcd(self, other):
"""
Return gcd.
The nested polynomials' gcd is used.
"""
var = self.leading_variable()
coeffring = self.getCoefficientRing()
s = self.nest(var, coeffring)
o = other.nest(var, coeffring)
if hasattr(s, "gcd"):
g = s.gcd(o)
elif hasattr(s, "subresultant_gcd"):
g = s.subresultant_gcd(o)
else:
raise TypeError("no gcd method available")
return self.unnest(g, var, coeffring)
class RingPolynomial (OrderProvider,
NestProvider,
multivar.BasicPolynomial,
RingElementProvider):
"""
General polynomial with commutative ring coefficients.
"""
def __init__(self, coefficients, **kwds):
"""
Initialize the polynomial.
Required argument:
- coefficients: initializer for polynomial coefficients
Keyword arguments should include:
- coeffring: domain
- number_of_variables: the number of variables
"""
if "number_of_variables" not in kwds:
coefficients = dict(coefficients)
for i in coefficients.keys():
kwds["number_of_variables"] = len(i)
break
multivar.BasicPolynomial.__init__(self, coefficients, **kwds)
NestProvider.__init__(self)
PseudoDivisionProvider.__init__(self)
RingElementProvider.__init__(self)
coeffring = None
if "coeffring" in kwds:
coeffring = kwds["coeffring"]
else:
coeffring = uniutil.init_coefficient_ring(self._coefficients)
if coeffring is not None:
self.set_coefficient_ring(coeffring)
else:
raise TypeError("argument `coeffring' is required")
if "order" in kwds:
order = kwds["order"]
else:
order = termorder.lexicographic_order
OrderProvider.__init__(self, order)
def getRing(self):
"""
Return an object of a subclass of Ring, to which the element
belongs.
"""
# short-cut self._ring is None case
return self._ring
def getCoefficientRing(self):
"""
Return an object of a subclass of Ring, to which the all
coefficients belong.
"""
# short-cut self._coefficient_ring is None case
return self._coefficient_ring
def __repr__(self): # debug
return "%s(%s)" % (self.__class__.__name__, self._coefficients)
def __add__(self, other):
try:
return multivar.BasicPolynomial.__add__(self, other)
except (AttributeError, TypeError):
one = self.getRing().one
try:
return multivar.BasicPolynomial.__add__(self, other * one)
except Exception:
return NotImplemented
def __radd__(self, other):
one = self.getRing().one
try:
return other * one + self
except Exception:
return NotImplemented
def __sub__(self, other):
try:
return multivar.BasicPolynomial.__sub__(self, other)
except (AttributeError, TypeError):
one = self.getRing().one
try:
return multivar.BasicPolynomial.__sub__(self, other * one)
except Exception:
return NotImplemented
def __rsub__(self, other):
one = self.getRing().one
try:
return other * one - self
except Exception:
return NotImplemented
class DomainPolynomial (PseudoDivisionProvider,
RingPolynomial):
"""
Polynomial with domain coefficients.
"""
def __init__(self, coefficients, **kwds):
"""
Initialize the polynomial.
- coefficients: initializer for polynomial coefficients
- coeffring: domain
"""
RingPolynomial.__init__(self, coefficients, **kwds)
if not self._coefficient_ring.isdomain():
raise TypeError("coefficient ring has to be a domain")
PseudoDivisionProvider.__init__(self)
class UniqueFactorizationDomainPolynomial (GcdProvider,
DomainPolynomial):
"""
Polynomial with unique factorization domain coefficients.
"""
def __init__(self, coefficients, **kwds):
"""
Initialize the polynomial.
- coefficients: initializer for polynomial coefficients
- coeffring: unique factorization domain
"""
DomainPolynomial.__init__(self, coefficients, **kwds)
if not self._coefficient_ring.isufd():
raise TypeError("coefficient ring has to be a UFD")
GcdProvider.__init__(self)
def resultant(self, other, var):
"""
Return resultant of two polynomials of the same ring, with
respect to the variable specified by its position var.
"""
cring = self._coefficient_ring
return self.nest(var, cring).resultant(other.nest(var, cring))
class PolynomialRingAnonymousVariables (ring.CommutativeRing):
"""
The class of multivariate polynomial ring.
There's no need to specify the variable names.
"""
_instances = {}
def __init__(self, coeffring, number_of_variables):
if not isinstance(coeffring, ring.Ring):
raise TypeError("%s should not be passed as ring" % coeffring.__class__)
ring.CommutativeRing.__init__(self)
self._coefficient_ring = coeffring
if self._coefficient_ring.isufd():
self.properties.setIsufd(True)
if self._coefficient_ring.isnoetherian():
self.properties.setIsnoetherian(True)
elif self._coefficient_ring.isdomain() in (True, False):
self.properties.setIsdomain(self._coefficient_ring.isdomain())
self.number_of_variables = number_of_variables
def getCoefficientRing(self):
"""
Return the coefficient ring.
"""
return self._coefficient_ring
def getQuotientField(self):
"""
getQuotientField returns the quotient field of the ring
if coefficient ring has its quotient field. Otherwise,
an exception will be raised.
"""
coefficientField = self._coefficient_ring.getQuotientField()
variables = ["x%d" % i for i in range(self.number_of_variables)]
return ratfunc.RationalFunctionField(coefficientField, variables)
def __eq__(self, other):
if self is other:
return True
if (isinstance(other, PolynomialRingAnonymousVariables) and
self._coefficient_ring == other._coefficient_ring and
self.number_of_variables == other.number_of_variables):
return True
return False
def __repr__(self):
"""
Return 'PolynomialRingAnonymousVariables(Ring, #vars)'
"""
return "%s(%s, %d)" % (self.__class__.__name__, repr(self._coefficient_ring), self.number_of_variables)
def __str__(self):
"""
Return R[][]
"""
return str(self._coefficient_ring) + "[]" * self.number_of_variables
def __hash__(self):
"""
hash(self)
"""
return (hash(self._coefficient_ring) ^ (self.number_of_variables * hash(self.__class__.__name__) + 1)) & 0x7fffffff
def __contains__(self, element):
"""
`in' operator is provided for checking the element be in the
ring.
"""
if element in self._coefficient_ring:
return True
elem_ring = ring.getRing(element)
if elem_ring is not None and elem_ring.issubring(self):
return True
return False
def issubring(self, other):
"""
reports whether another ring contains this polynomial ring.
"""
if isinstance(other, poly_ring.PolynomialRing):
if (self._coefficient_ring.issubring(other.getCoefficientRing()) and
self.number_of_variables <= other.number_of_variables):
return True
elif isinstance(other, poly_ring.RationalFunctionField):
if (len(other.vars) >= self.number_of_variables and
other.coefficientField.issuperring(self._coefficient_ring)):
return True
try:
return other.issuperring(self)
except RuntimeError:
# reach max recursion by calling each other
return False
def issuperring(self, other):
"""
reports whether this polynomial ring contains another ring.
"""
if self._coefficient_ring.issuperring(other):
return True
if isinstance(other, poly_ring.PolynomialRing):
return (self._coefficient_ring.issuperring(other.getCoefficientRing()) and
self.number_of_variables >= other.number_of_variables)
try:
return other.issubring(self)
except RuntimeError:
# reach max recursion by calling each other
return False
def getCommonSuperring(self, other):
"""
Return common superring of two rings.
"""
if self.issuperring(other):
return self
elif other.issuperring(self):
return other
elif (not isinstance(other, PolynomialRingAnonymousVariables) and
other.issuperring(self._coefficient_ring)):
return self.__class__(other, self.number_of_variables)
try:
if hasattr(other, "getCommonSuperring"):
return other.getCommonSuperring(self)
except RuntimeError:
# reached recursion limit by calling on each other
pass
raise TypeError("no common super ring")
def createElement(self, seed):
"""
Return an element of the polynomial ring made from seed
overriding ring.createElement.
"""
if not seed:
return polynomial((), coeffring=self._coefficient_ring, number_of_variables=self.number_of_variables)
elif seed in self._coefficient_ring:
return polynomial([((0,)*self.number_of_variables, seed)], coeffring=self._coefficient_ring)
# implementation should be replaced later
raise NotImplementedError("unclear which type of polynomial be chosen")
def _getOne(self):
"getter for one"
if self._one is None:
self._one = self.createElement(self._coefficient_ring.one)
return self._one
one = property(_getOne, None, None, "multiplicative unit")
def _getZero(self):
"getter for zero"
if self._zero is None:
self._zero = self.createElement(self._coefficient_ring.zero)
return self._zero
zero = property(_getZero, None, None, "additive unit")
def gcd(self, a, b):
"""
Return the greatest common divisor of given polynomials.
The polynomials must be in the polynomial ring.
If the coefficient ring is a field, the result is monic.
"""
if hasattr(a, "gcd"):
return a.gcd(b)
elif hasattr(a, "subresultant_gcd"):
return a.subresultant_gcd(b)
raise NotImplementedError("no gcd")
def extgcd(self, a, b):
"""
Return the tuple (u, v, d): d is the greatest common divisor
of given polynomials, and they satisfy d = u*a + v*b. The
polynomials must be in the polynomial ring. If the
coefficient ring is a field, the result is monic.
"""
if hasattr(a, "extgcd"):
return a.extgcd(b)
raise NotImplementedError("no extgcd")
@classmethod
def getInstance(cls, coeffring, number_of_variables):
"""
Return an instance of the class with specified coefficient ring
and number of variables.
"""
if (coeffring, number_of_variables) not in cls._instances:
cls._instances[coeffring, number_of_variables] = cls(coeffring, number_of_variables)
return cls._instances[coeffring, number_of_variables]
class PolynomialIdeal (ring.Ideal):
"""
Multivariate polynomial ideal.
"""
def __init__(self, generators, aring):
"""
Initialize a polynomial ideal.
"""
ring.Ideal.__init__(self, generators, aring)
def __contains__(self, elem):
"""
Return whether elem is in the ideal or not.
"""
if not elem.getRing().issubring(self.ring):
return False
if self.generators == [self.ring.zero]:
return elem == self.ring.zero
return not self.reduce(elem)
def __bool__(self):
"""
Report whether the ideal is zero ideal or not. Of course,
False is for zero ideal.
"""
return self.generators and self.generators != [self.ring.zero]
def __repr__(self):
"""
Return repr string.
"""
return "%s(%r, %r)" % (self.__class__.__name__, self.generators, self.ring)
def __str__(self):
"""
Return str string.
"""
return "(%s)%s" % (", ".join([str(g) for g in self.generators]), self.ring)
# factories
special_ring_table = {}
def polynomial(coefficients, coeffring, number_of_variables=None):
"""
Return a polynomial.
- coefficients has to be a initializer for dict, whose keys are
variable indices and values are coefficients at the indices.
- coeffring has to be an object inheriting ring.Ring.
- number_of_variables has to be the number of variables.
One can override the way to choose a polynomial type from a
coefficient ring, by setting:
special_ring_table[coeffring_type] = polynomial_type
before the function call.
"""
if type(coeffring) in special_ring_table:
poly_type = special_ring_table[type(coeffring)]
elif coeffring.isufd():
poly_type = UniqueFactorizationDomainPolynomial
elif coeffring.isdomain():
poly_type = DomainPolynomial
else:
poly_type = multivar.BasicPolynomial
if number_of_variables is None:
coefficients = dict(coefficients)
for k in coefficients:
number_of_variables = len(k)
break
return poly_type(coefficients, coeffring=coeffring, number_of_variables=number_of_variables)
def MultiVariableSparsePolynomial(coefficient, variable, coeffring=None):
"""
MultiVariableSparsePolynomial(coefficient, variable [,coeffring])
- coefficient has to be a dictionary of form {(i1,...,ik): c}
- variable has to be a list of character strings.
- coeffring has to be, if specified, an object inheriting ring.Ring.
This function is provided for backward compatible way of defining
multivariate polynomial. The variable names are ignored, but
their number is used.
"""
if not isinstance(variable, list) or not isinstance(coefficient, dict):
raise ValueError("You must input MultiVariableSparsePolynomial(dict, list) but (%s, %s)." % (coefficient.__class__, variable.__class__))
if coeffring is None:
coeffring = uniutil.init_coefficient_ring(coefficient)
return polynomial(coefficient, coeffring=coeffring, number_of_variables=len(variable))
def prepare_indeterminates(names, ctx, coeffring=None):
"""
From space separated names of indeterminates, prepare variables
representing the indeterminates. The result will be stored in ctx
dictionary.
The variables should be prepared at once, otherwise wrong aliases
of variables may confuse you in later calculation.
If an optional coeffring is not given, indeterminates will be
initialized as integer coefficient polynomials.
Example:
>>> prepare_indeterminates("X Y Z", globals())
>>> Y
UniqueFactorizationDomainPolynomial({(0, 1, 0): 1})
"""
split_names = names.split()
number_of_variables = len(split_names)
if coeffring is None:
coeffring = uniutil.init_coefficient_ring({1:1})
for i, name in enumerate(split_names):
e_i = tuple([0] * i + [1] + [0] * (number_of_variables - i - 1))
ctx[name] = polynomial({e_i: 1}, coeffring, number_of_variables)
| [
"nzmath.poly.uniutil.polynomial",
"nzmath.ring.CommutativeRing.__init__",
"nzmath.poly.multivar.BasicPolynomial.__init__",
"nzmath.ring.Ideal.__init__",
"nzmath.poly.multivar.BasicPolynomial.__add__",
"nzmath.ring.getRing",
"nzmath.poly.ring.PolynomialRing.getInstance",
"nzmath.poly.ratfunc.RationalFunction",
"nzmath.poly.uniutil.init_coefficient_ring",
"nzmath.poly.multivar.BasicPolynomial.__sub__",
"nzmath.poly.ratfunc.RationalFunctionField",
"nzmath.ring.CommutativeRingElement.__init__"
] | [((2560, 2608), 'nzmath.poly.uniutil.polynomial', 'uniutil.polynomial', (['combined'], {'coeffring': 'polyring'}), '(combined, coeffring=polyring)\n', (2578, 2608), True, 'import nzmath.poly.uniutil as uniutil\n'), ((3630, 3672), 'nzmath.ring.CommutativeRingElement.__init__', 'ring.CommutativeRingElement.__init__', (['self'], {}), '(self)\n', (3666, 3672), True, 'import nzmath.ring as ring\n'), ((7352, 7389), 'nzmath.poly.ratfunc.RationalFunction', 'ratfunc.RationalFunction', (['self', 'other'], {}), '(self, other)\n', (7376, 7389), True, 'import nzmath.poly.ratfunc as ratfunc\n'), ((9809, 9870), 'nzmath.poly.multivar.BasicPolynomial.__init__', 'multivar.BasicPolynomial.__init__', (['self', 'coefficients'], {}), '(self, coefficients, **kwds)\n', (9842, 9870), True, 'import nzmath.poly.multivar as multivar\n'), ((14017, 14052), 'nzmath.ring.CommutativeRing.__init__', 'ring.CommutativeRing.__init__', (['self'], {}), '(self)\n', (14046, 14052), True, 'import nzmath.ring as ring\n'), ((14990, 15048), 'nzmath.poly.ratfunc.RationalFunctionField', 'ratfunc.RationalFunctionField', (['coefficientField', 'variables'], {}), '(coefficientField, variables)\n', (15019, 15048), True, 'import nzmath.poly.ratfunc as ratfunc\n'), ((16173, 16194), 'nzmath.ring.getRing', 'ring.getRing', (['element'], {}), '(element)\n', (16185, 16194), True, 'import nzmath.ring as ring\n'), ((20897, 20941), 'nzmath.ring.Ideal.__init__', 'ring.Ideal.__init__', (['self', 'generators', 'aring'], {}), '(self, generators, aring)\n', (20916, 20941), True, 'import nzmath.ring as ring\n'), ((23748, 23790), 'nzmath.poly.uniutil.init_coefficient_ring', 'uniutil.init_coefficient_ring', (['coefficient'], {}), '(coefficient)\n', (23777, 23790), True, 'import nzmath.poly.uniutil as uniutil\n'), ((24608, 24647), 'nzmath.poly.uniutil.init_coefficient_ring', 'uniutil.init_coefficient_ring', (['{(1): 1}'], {}), '({(1): 1})\n', (24637, 24647), True, 'import nzmath.poly.uniutil as uniutil\n'), ((2057, 2104), 'nzmath.poly.ring.PolynomialRing.getInstance', 'poly_ring.PolynomialRing.getInstance', (['coeffring'], {}), '(coeffring)\n', (2093, 2104), True, 'import nzmath.poly.ring as poly_ring\n'), ((4718, 4809), 'nzmath.poly.ring.PolynomialRing.getInstance', 'poly_ring.PolynomialRing.getInstance', (['self._coefficient_ring', 'self.number_of_variables'], {}), '(self._coefficient_ring, self.\n number_of_variables)\n', (4754, 4809), True, 'import nzmath.poly.ring as poly_ring\n'), ((10133, 10182), 'nzmath.poly.uniutil.init_coefficient_ring', 'uniutil.init_coefficient_ring', (['self._coefficients'], {}), '(self._coefficients)\n', (10162, 10182), True, 'import nzmath.poly.uniutil as uniutil\n'), ((11133, 11178), 'nzmath.poly.multivar.BasicPolynomial.__add__', 'multivar.BasicPolynomial.__add__', (['self', 'other'], {}), '(self, other)\n', (11165, 11178), True, 'import nzmath.poly.multivar as multivar\n'), ((11659, 11704), 'nzmath.poly.multivar.BasicPolynomial.__sub__', 'multivar.BasicPolynomial.__sub__', (['self', 'other'], {}), '(self, other)\n', (11691, 11704), True, 'import nzmath.poly.multivar as multivar\n'), ((2250, 2327), 'nzmath.poly.ring.PolynomialRing.getInstance', 'poly_ring.PolynomialRing.getInstance', (['coeffring', '(self.number_of_variables - 1)'], {}), '(coeffring, self.number_of_variables - 1)\n', (2286, 2327), True, 'import nzmath.poly.ring as poly_ring\n'), ((4032, 4047), 'nzmath.ring.getRing', 'ring.getRing', (['c'], {}), '(c)\n', (4044, 4047), True, 'import nzmath.ring as ring\n'), ((11300, 11351), 'nzmath.poly.multivar.BasicPolynomial.__add__', 'multivar.BasicPolynomial.__add__', (['self', '(other * one)'], {}), '(self, other * one)\n', (11332, 11351), True, 'import nzmath.poly.multivar as multivar\n'), ((11826, 11877), 'nzmath.poly.multivar.BasicPolynomial.__sub__', 'multivar.BasicPolynomial.__sub__', (['self', '(other * one)'], {}), '(self, other * one)\n', (11858, 11877), True, 'import nzmath.poly.multivar as multivar\n')] |
import configparser
from datetime import datetime, timedelta
import os
from pathlib import Path
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from src.remindo_driver import main
# Reading configurations
config = configparser.ConfigParser()
config.read_file(open(os.path.join(Path(__file__).parents[1], "config/prod.cfg")))
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": datetime(2020, 10, 1),
"email": ["<EMAIL>"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 10,
"retry_delay": timedelta(minutes=5),
"catchup": False, # To setup only in production
}
dag_name = "transform_load_pipeline"
dag = DAG(
dag_name,
default_args=default_args,
description="Transform and load data from landing zone to processed zone.\
Populate data from Processed zone to remindo Warehouse.",
# schedule_interval=None,
schedule_interval=timedelta(minutes=5),
max_active_runs=1,
)
startOperator = DummyOperator(task_id="BeginExecution", dag=dag)
jobOperator = PythonOperator(task_id="TransformLoadJob", python_callable=main, dag=dag)
endOperator = DummyOperator(task_id="StopExecution", dag=dag)
startOperator >> jobOperator >> endOperator
| [
"datetime.datetime",
"configparser.ConfigParser",
"airflow.operators.dummy_operator.DummyOperator",
"airflow.operators.python_operator.PythonOperator",
"pathlib.Path",
"datetime.timedelta"
] | [((313, 340), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (338, 340), False, 'import configparser\n'), ((1095, 1143), 'airflow.operators.dummy_operator.DummyOperator', 'DummyOperator', ([], {'task_id': '"""BeginExecution"""', 'dag': 'dag'}), "(task_id='BeginExecution', dag=dag)\n", (1108, 1143), False, 'from airflow.operators.dummy_operator import DummyOperator\n'), ((1158, 1231), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""TransformLoadJob"""', 'python_callable': 'main', 'dag': 'dag'}), "(task_id='TransformLoadJob', python_callable=main, dag=dag)\n", (1172, 1231), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((1246, 1293), 'airflow.operators.dummy_operator.DummyOperator', 'DummyOperator', ([], {'task_id': '"""StopExecution"""', 'dag': 'dag'}), "(task_id='StopExecution', dag=dag)\n", (1259, 1293), False, 'from airflow.operators.dummy_operator import DummyOperator\n'), ((514, 535), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(1)'], {}), '(2020, 10, 1)\n', (522, 535), False, 'from datetime import datetime, timedelta\n'), ((661, 681), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (670, 681), False, 'from datetime import datetime, timedelta\n'), ((1031, 1051), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (1040, 1051), False, 'from datetime import datetime, timedelta\n'), ((376, 390), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'from pathlib import Path\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.