id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3222786 | def showBits(n):
l=[]
for i in range (32):
r=n%2
l.append(r)
n //=2
l.reverse()
return l
| StarcoderdataPython |
3356913 | <reponame>ejuarez007/IncidentesVialesCDMX<filename>Classes.py
import pandas as pd
from dataclasses import dataclass
from decorators import open_close_connection
@dataclass
class Datos():
path:str
def read_data(self,sep=","):
data = pd.read_csv(self.path, sep=sep)
return data
def unique_values(self,data,column):
values = data[column].unique()
return values
def get_values(self, dataframe, column):
valid_columns = ['dia_semana', 'mes_cierre', 'delegacion_inicio']
if column in valid_columns:
agg_values = dataframe.gropby(column).agg({'folio':'count'})
values = agg_values.to_dict()
else:
values = []
return
@dataclass
class ClaseMongo():
JSON:str
db_name:str
column_name: str
@open_close_connection
def find(self,conn):
response = list(conn.DBMongoTG.collection_prueba[self.db_name][self.column_name].find(self.JSON))
response = pd.DataFrame(response)
return response
@open_close_connection
def insert(self,conn=None):
conn.DBMongoTG.collection_prueba.insert(self.JSON)
@open_close_connection
def update(self,JSET, conn=None):
conn.DBMongoTG.collection_prueba.update(self.JSON,{"$set":JSET})
| StarcoderdataPython |
3348230 | <filename>pdf_manager/core/main.py
import argparse
import sys
from typing import List
from colorama import init
from pdf_manager.budget import create_budget
from pdf_manager.concat import concat_pdfs
_LS = List[str]
__all__ = ['main']
def _parse_args(args: _LS = None) -> argparse.Namespace:
"""ArgumentParser designed for this application.
Args:
args (List[str]): list of arguments.
Returns:
argparse.Namespace: arguments processed.
"""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser('PDF')
parser.add_argument('--exclude', type=str, default=None,
metavar='pattern', help='regex pattern to exclude documents')
subparsers = parser.add_subparsers(title='commands', dest='command')
subparsers.required = True
# TODO: add help for each argument and each parser
budget = subparsers.add_parser('budget')
budget.add_argument('--path', type=str, default='./')
budget.add_argument('--price', type=float, default=0.03)
concat = subparsers.add_parser('concat')
concat.add_argument('--output', type=str, default='compact_pdf.pdf')
concat.add_argument('--no_open', action='store_false')
concat.add_argument('--path', type=str, default='./')
opt = parser.parse_args(args)
if 'output' in opt:
if opt.output.endswith('.pdf') is False:
opt.output = opt.output + '.pdf'
return opt
def main():
"""Main function."""
opt = _parse_args()
init()
if opt.command == 'budget':
return create_budget(path=opt.path, price_per_sheet=opt.price,
exclude=opt.exclude)
elif opt.command == 'concat':
return concat_pdfs(path=opt.path, output=opt.output, open_files=opt.no_open,
exclude=opt.exclude)
else:
raise exit('Unknown error')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1636037 | import unittest
from program.hr import (
payroll_system,
get_policy,
calculate_payroll,
DisabilityPolicy,
)
from program.employees import employee_database
class TestHr(unittest.TestCase):
employee_db = employee_database
employee_list = employee_db.employees()
system = payroll_system
def test_get_policy(self):
result = []
for e in self.employee_list:
result.append(get_policy(e.identification))
self.assertIsNotNone(result)
def test_get_policy_with_bad_input(self):
with self.assertRaises(ValueError):
get_policy(0)
def test_calculate_payroll(self):
result = calculate_payroll(self.employee_list)
self.assertIsNone(result)
class TestDisabilityPolicy(unittest.TestCase):
def test_DisabilityPolicy_without_base_policy(self):
with self.assertRaises(RuntimeError):
disability = DisabilityPolicy()
disability.track_work(40)
employee_db = employee_database
employee_list = employee_db.employees()
sales_employee = employee_list[2]
disability = DisabilityPolicy()
sales_employee.apply_payroll_policy(disability)
def test_track_work(self):
result = self.disability.track_work(40)
self.assertIsNone(result)
def test_calculate_payroll(self):
result = self.disability.calculate_payroll()
self.assertEqual(result, 360.0)
| StarcoderdataPython |
86500 | import unittest.mock as mock
from urllib.parse import quote as param_encoder
from django.urls import reverse
from django.core.exceptions import ImproperlyConfigured
from rest_framework import status
from api.models import PublicDataset, Workspace, Resource
from api.tests.base import BaseAPITestCase
from api.tests import test_settings
class PublicDataListTests(BaseAPITestCase):
def setUp(self):
self.url = reverse('public-dataset-list')
self.establish_clients()
self.all_public_datasets = PublicDataset.objects.all()
if len(self.all_public_datasets) == 0:
raise ImproperlyConfigured('Need at least one active public dataset to'
' run this test properly.'
)
self.all_active_datasets = [x for x in self.all_public_datasets if x.active]
if len(self.all_active_datasets) == 0:
raise ImproperlyConfigured('Need at least one active public dataset to'
' run this test properly.'
)
def test_list_requires_auth(self):
"""
Test that general requests to the endpoint generate 401
"""
response = self.regular_client.get(self.url)
self.assertTrue((response.status_code == status.HTTP_401_UNAUTHORIZED)
| (response.status_code == status.HTTP_403_FORBIDDEN))
response = self.authenticated_regular_client.get(self.url)
self.assertTrue((response.status_code == status.HTTP_200_OK))
self.assertTrue(len(response.json()) == len(self.all_active_datasets))
class PublicDataDetailsTests(BaseAPITestCase):
'''
Tests focused around the ability to query a specific public dataset.
'''
def setUp(self):
self.all_public_datasets = PublicDataset.objects.all()
if len(self.all_public_datasets) == 0:
raise ImproperlyConfigured('Need at least one active public dataset to'
' run this test properly.'
)
self.all_active_datasets = [x for x in self.all_public_datasets if x.active]
if len(self.all_active_datasets) == 0:
raise ImproperlyConfigured('Need at least one active public dataset to'
' run this test properly.'
)
# grab the first active dataset to use in the tests below
self.test_active_dataset = self.all_active_datasets[0]
self.url = reverse('public-dataset-details',
kwargs={'dataset_id': self.test_active_dataset.index_name}
)
self.establish_clients()
def test_requires_auth(self):
"""
Test that general requests to the endpoint generate 401
"""
response = self.regular_client.get(self.url)
self.assertTrue((response.status_code == status.HTTP_401_UNAUTHORIZED)
| (response.status_code == status.HTTP_403_FORBIDDEN))
response = self.authenticated_regular_client.get(self.url)
self.assertTrue((response.status_code == status.HTTP_200_OK))
def test_returns_expected_details(self):
response = self.authenticated_regular_client.get(self.url)
self.assertTrue((response.status_code == status.HTTP_200_OK))
response_json = response.json()
self.assertTrue(response_json['index_name'] == self.test_active_dataset.index_name)
print(response_json)
def test_inactive_instance_returns_404(self):
'''
If the details are requested on an inactive dataset, return a 404
'''
# check that we have an inactive dataset first:
dataset_tag = 'public-baz'
pd = PublicDataset.objects.get(index_name = dataset_tag)
self.assertFalse(pd.active)
url = reverse('public-dataset-details',
kwargs={'dataset_id': dataset_tag}
)
response = self.authenticated_regular_client.get(url)
self.assertTrue((response.status_code == status.HTTP_404_NOT_FOUND))
class PublicDataQueryTests(BaseAPITestCase):
'''
Tests focused around the ability to query public datasets.
'''
def setUp(self):
self.all_public_datasets = PublicDataset.objects.all()
if len(self.all_public_datasets) == 0:
raise ImproperlyConfigured('Need at least one active public dataset to'
' run this test properly.'
)
self.all_active_datasets = [x for x in self.all_public_datasets if x.active]
if len(self.all_active_datasets) == 0:
raise ImproperlyConfigured('Need at least one active public dataset to'
' run this test properly.'
)
# grab the first active dataset to use in the tests below
self.test_active_dataset = self.all_active_datasets[0]
self.url = reverse('public-dataset-query',
kwargs={'dataset_id': self.test_active_dataset.index_name}
)
self.establish_clients()
@mock.patch('api.views.public_dataset.query_dataset')
def test_call_format(self, mock_query_dataset):
'''
Test that the proper request is made
'''
query_str = 'q=*:*&facet.field=foo&facet=on'
encoded_str = 'q=%2A%3A%2A&facet.field=foo&facet=on'
url = self.url + '?' + query_str
mock_response_json = {'a':1, 'b':2}
mock_query_dataset.return_value = mock_response_json
response = self.authenticated_admin_client.get(url)
mock_query_dataset.assert_called_with(self.test_active_dataset.index_name, encoded_str)
class PublicDataCreateTests(BaseAPITestCase):
'''
Tests focused around the ability to create public datasets.
'''
def setUp(self):
self.establish_clients()
self.all_public_datasets = PublicDataset.objects.all()
if len(self.all_public_datasets) == 0:
raise ImproperlyConfigured('Need at least one active public dataset to'
' run this test properly.'
)
self.all_active_datasets = [x for x in self.all_public_datasets if x.active]
if len(self.all_active_datasets) == 0:
raise ImproperlyConfigured('Need at least one active public dataset to'
' run this test properly.'
)
# grab the first active dataset to use in the tests below
self.test_active_dataset = self.all_active_datasets[0]
self.url = reverse('public-dataset-create',
kwargs={'dataset_id': self.test_active_dataset.index_name}
)
def test_requires_auth(self):
"""
Test that general requests to the endpoint generate 401
"""
response = self.regular_client.post(self.url)
self.assertTrue((response.status_code == status.HTTP_401_UNAUTHORIZED)
| (response.status_code == status.HTTP_403_FORBIDDEN))
@mock.patch('api.views.public_dataset.create_dataset_from_params')
def test_error_to_add_resource_reported(self, mock_create_dataset_from_params):
'''
If something goes wrong in the
api.views.public_dataset.create_dataset_from_params function,
we return a 400 and report it.
'''
# this is the payload we want passed to the function.
# the full request will have this AND the workspace
payload = {'samples': [1,2,3]}
# mock the failure:
mock_create_dataset_from_params.side_effect = Exception('something bad!')
response = self.authenticated_regular_client.post(
self.url, data=payload, format='json')
self.assertTrue(response.status_code == status.HTTP_400_BAD_REQUEST)
@mock.patch('api.views.public_dataset.create_dataset_from_params')
def test_missing_filter_creates_null_filter(self, mock_create_dataset_from_params):
'''
Assume that the request payload was valid so that the
api.views.public_dataset.create_dataset_from_params function
returns an api.models.Resource instance.
Here, test that we add that resource to the workspace and return a 201
'''
payload = {}
# this is the new resource that is mock-created
new_resource = Resource.objects.create(
owner = self.regular_user_1,
path = '/some/dummy_path/file.tsv',
name = 'foo.tsv'
)
mock_create_dataset_from_params.return_value = [new_resource,]
# finally, call the endpoint
response = self.authenticated_regular_client.post(
self.url, data=payload, format='json')
self.assertTrue(response.status_code == status.HTTP_201_CREATED)
mock_create_dataset_from_params.assert_called_with(
self.test_active_dataset.index_name,
self.regular_user_1,
None,
''
)
j = response.json()
self.assertTrue(j[0]['name'] == 'foo.tsv')
@mock.patch('api.views.public_dataset.create_dataset_from_params')
def test_adds_new_resource_to_workspace_case1(self, mock_create_dataset_from_params):
'''
Assume that the request payload was valid so that the
api.views.public_dataset.create_dataset_from_params function
returns a list of api.models.Resource instances.
Here, test that we add that resource to the workspace and return a 201.
No output name for the dataset is provided
'''
# this is the payload we want passed to the function.
# the full request will have this AND the workspace
payload = {'filters': {'a':1}}
# below, we check that the workspace key gets stripped
# from the call to the creation method
new_resource = Resource.objects.create(
owner = self.regular_user_1,
path = '/some/dummy_path/file.tsv',
name = 'foo.tsv'
)
mock_create_dataset_from_params.return_value = [new_resource,]
# finally, call the endpoint
response = self.authenticated_regular_client.post(
self.url, data=payload, format='json')
self.assertTrue(response.status_code == status.HTTP_201_CREATED)
# below, we check that the workspace key gets stripped
# from the call to the creation method
mock_create_dataset_from_params.assert_called_with(
self.test_active_dataset.index_name,
self.regular_user_1,
payload['filters'],
''
)
j = response.json()
# j is a list of resource instances. We expect only one:
self.assertTrue(j[0]['name'] == 'foo.tsv')
@mock.patch('api.views.public_dataset.create_dataset_from_params')
def test_adds_new_resource_to_workspace_case2(self, mock_create_dataset_from_params):
'''
Assume that the request payload was valid so that the
api.views.public_dataset.create_dataset_from_params function
returns a list of api.models.Resource instances.
Here, test that we add that resource to the workspace and return a 201.
Here, we pass a name for the dataset
'''
# this is the payload we want passed to the function.
# the full request will have this AND the workspace
output_name = 'foo'
payload = {'filters': {'a':1}, 'output_name': output_name}
# below, we check that the workspace key gets stripped
# from the call to the creation method
new_resource = Resource.objects.create(
owner = self.regular_user_1,
path = '/some/dummy_path/file.tsv',
name = 'foo.tsv'
)
mock_create_dataset_from_params.return_value = [new_resource,]
# finally, call the endpoint
response = self.authenticated_regular_client.post(
self.url, data=payload, format='json')
self.assertTrue(response.status_code == status.HTTP_201_CREATED)
# below, we check that the workspace key gets stripped
# from the call to the creation method
mock_create_dataset_from_params.assert_called_with(
self.test_active_dataset.index_name,
self.regular_user_1,
payload['filters'],
output_name
)
j = response.json()
# j is a list of resource instances. We expect only one:
self.assertTrue(j[0]['name'] == 'foo.tsv')
@mock.patch('api.views.public_dataset.create_dataset_from_params')
def test_rejects_malformatted_filter(self, mock_create_dataset_from_params):
'''
Test that if a 'filter' key is provided and it is not parsed
as a dict, then we reject
'''
payload = {
'filters': 'abc'
}
response = self.authenticated_regular_client.post(
self.url, data=payload, format='json')
self.assertTrue(response.status_code == status.HTTP_400_BAD_REQUEST)
| StarcoderdataPython |
33729 | import os
import subprocess
class TestTasks:
""" Test that the tasks work with invoke. """
CMD_KWARGS = dict(
capture_output=True,
encoding="utf-8",
shell=True,
env=os.environ.copy(),
)
def test_unapproved_licenses(self):
""" Should emit table of unapproved licenses. """
reply = subprocess.run("poetry run invoke license.unapproved-licenses", **self.CMD_KWARGS)
output = reply.stdout
# assumes we require pylint and pylint is GPL and that's on our unapproved list
assert "pylint" in output
assert "GNU General Public License" in output
def test_write_table(self):
""" Should emit a table of licenses used. """
reply = subprocess.run("poetry run invoke license.write-table --outfile='-'", **self.CMD_KWARGS)
output = reply.stdout
# assumes we require coverage and at least one package we depend on is Apache licensed
assert 'coverage' in output
assert 'Apache Software License' in output
| StarcoderdataPython |
72205 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import plugins.basetypes
import plugins.session
import plugins.auditlog
import os
import yaml
""" Settings API end point for CLC"""
async def process(server: plugins.basetypes.Server, session: plugins.session.SessionObject, indata: dict) -> dict:
if session and (session.credentials or server.config.debug.open_server):
repo = indata.get("repo", '')
assert ".." not in repo, "Invalid path specified"
assert "~" not in repo, "Invalid path specified"
excludes = indata.get("excludes")
bad_words = indata.get("words")
excludes_context = indata.get("excludes_context", [])
ymlfile = os.path.join(server.config.dirs.scratch, repo, "_clc.yaml")
if os.path.exists(ymlfile):
yml = yaml.safe_load(open(ymlfile))
if not isinstance(bad_words, dict):
return {
"okay": False,
"message": "Word list must be dictionary, word: context",
}
if not isinstance(excludes, list):
return {
"okay": False,
"message": "Excludes list must by an array of exclude globs",
}
if not isinstance(excludes_context, list):
return {
"okay": False,
"message": "Excludes context list must by an array of exclude regexes",
}
yml["bad_words"] = bad_words
yml["excludes"] = excludes
yml["excludes_context"] = excludes_context
yaml.dump(yml, open(ymlfile, "w"))
server.data.projects[repo].mtimes[ymlfile] = os.stat(ymlfile).st_mtime
plugins.auditlog.log_entry(session, f"Changed project settings for {repo} ({ymlfile})")
return {
"okay": True,
"message": "Settings saved. Please wait for next scan for it to apply.",
}
else:
return {
"okay": False,
"message": "No such project",
}
else:
return {
"okay": False,
"message": "You need to be logged in to access this endpoint",
}
def register(server: plugins.basetypes.Server):
return plugins.basetypes.Endpoint(process)
| StarcoderdataPython |
3283519 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import division
import os
import sys
from setuptools import setup
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(DIR, "extern", "pybind11"))
from pybind11.setup_helpers import ParallelCompile, Pybind11Extension # noqa: E402
del sys.path[-1]
# Use the environment variable CMAKE_BUILD_PARALLEL_LEVEL to control parallel builds
ParallelCompile("CMAKE_BUILD_PARALLEL_LEVEL").install()
cxx_std = int(os.environ.get("CMAKE_CXX_STANDAR", "14"))
SRC_FILES = [
"src/module.cpp",
"src/register_accumulators.cpp",
"src/register_algorithm.cpp",
"src/register_axis.cpp",
"src/register_histograms.cpp",
"src/register_storage.cpp",
"src/register_transforms.cpp",
]
INCLUDE_DIRS = [
"include",
"extern/assert/include",
"extern/config/include",
"extern/core/include",
"extern/histogram/include",
"extern/mp11/include",
"extern/throw_exception/include",
"extern/variant2/include",
]
ext_modules = [
Pybind11Extension(
"boost_histogram._core",
SRC_FILES,
include_dirs=INCLUDE_DIRS,
cxx_std=cxx_std,
extra_compile_args=["/d2FH4-"] if sys.platform.startswith("win32") else [],
)
]
extras = {
"test": ["pytest", "pytest-benchmark", "typing_extensions", "cloudpickle"],
"docs": [
"Sphinx~=3.0",
"recommonmark>=0.5.0",
"sphinx_book_theme==0.38.0",
"nbsphinx",
"sphinx_copybutton",
],
"examples": ["matplotlib", "xarray", "xhistogram", "netCDF4", "numba", "uproot3"],
"dev": ["ipykernel", "typer"],
}
extras["all"] = sum(extras.values(), [])
extras["dev"] += extras["test"]
setup(ext_modules=ext_modules, extras_require=extras)
| StarcoderdataPython |
3236444 | <gh_stars>10-100
from pyamf.adapters import register_adapter
def when_imported(mod):
"""
This function is called immediately after mymodule has been imported.
It configures PyAMF to encode a list when an instance of mymodule.CustomClass
is encountered.
"""
import pyamf
pyamf.add_type(mod.CustomClass, lambda obj: list(obj))
register_adapter('mymodule', when_imported) | StarcoderdataPython |
4843020 | from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.core.mail.message import EmailMessage
from django.conf import settings
from wildlifelicensing.apps.returns.models import Return
import datetime
from zipfile import ZipFile
import os
class Command(BaseCommand):
help = 'Run the Returns Report'
def handle(self, *args, **options):
dt = datetime.date(2018,1,1)
filename1 = f'returns_report_reg17_{datetime.datetime.now().strftime("%Y%m%d")}.csv'
with open(filename1, "w") as f:
hdr = 'LODGEMENT_NUMBER|*|LICENCE REFERENCE|*|LODGEMENT_DATE|*|STATUS|*|RETURN_NAME|*|DATE|*|FATE|*|SITE|*|ZONE|*|COUNT|*|DATUM|*|METHOD|*|EASTING|*|MARKING|*|NAME_ID|*|SAMPLES|*|ACCURACY|*|LATITUDE|*|LOCATION|*|NORTHING|*|CERTAINTY|*|LONGITUDE|*|IDENTIFIER|*|COMMON_NAME|*|TRANSMITTER|*|VOUCHER_REF|*|SPECIES_NAME|*|SPECIES_GROUP\n'
f.write(hdr)
for ret in Return.objects.filter(returntable__name__in=['regulation-17'], lodgement_date__gt=dt):
for return_table in ret.returntable_set.all():
for return_row in return_table.returnrow_set.all():
data = "|*|".join(str(val) for val in return_row.data.values())
line = f'{ret.lodgement_number}|*|{ret.licence.reference}|*|{ret.lodgement_date}|*|{ret.status}|*|{return_table.name}|*|{data}\n'
#print(ret.lodgement_number, ret.lodgement_date, ret.status, return_table.name, data)
f.write(line)
filename2 = f'returns_report_reg15_{datetime.datetime.now().strftime("%Y%m%d")}.csv'
with open(filename2, "w") as f:
hdr = 'LODGEMENT_NUMBER|*|LICENCE REFERENCE|*|LODGEMENT_DATE|*|STATUS|*|RETURN_NAME|*|COMMENTS|*|NUMBER TAKEN|*|CONDITION WHEN CAPTURED|*|DATE COLLECTED/DESTROYED|*|DATE RELEASED|*|LOCATION RELEASED|*|SPECIES|*|LOCATION COLLECTED\n'
f.write(hdr)
for ret in Return.objects.filter(returntable__name__in=['regulation-15'], lodgement_date__gt=dt):
for return_table in ret.returntable_set.all():
for return_row in return_table.returnrow_set.all():
data = "|*|".join(str(val) for val in return_row.data.values())
line = f'{ret.lodgement_number}|*|{ret.licence.reference}|*|{ret.lodgement_date}|*|{ret.status}|*|{return_table.name}|*|{data}\n'
#print(ret.lodgement_number, ret.lodgement_date, ret.status, return_table.name, data)
f.write(line)
filename_zip = f'returns_reports_{datetime.datetime.now().strftime("%Y%m%d")}.zip'
with ZipFile(filename_zip, 'w') as zipObj:
# Add multiple files to the zip
zipObj.write(filename1)
zipObj.write(filename2)
email = EmailMessage()
email.subject = 'Wildlife Licensing Returns Report'
email.body = 'Wildlife Licensing Returns Report'
email.from_email = settings.EMAIL_FROM
email.to = settings.REPORTS_EMAIL if isinstance(settings.REPORTS_EMAIL, list) else [settings.REPORTS_EMAIL]
email.attach_file(filename_zip)
res = email.send()
# cleanup
os.remove(filename1)
os.remove(filename2)
os.remove(filename_zip)
| StarcoderdataPython |
44485 | from flask import url_for
from tests.conftest import normalize_spaces
def test_set_inbound_sms_sets_a_number_for_service(
logged_in_client,
mock_add_sms_sender,
multiple_available_inbound_numbers,
service_one,
fake_uuid,
mock_no_inbound_number_for_service,
mocker
):
mocker.patch('app.service_api_client.update_service_with_properties')
data = {
"inbound_number": "781d9c60-7a7e-46b7-9896-7b045b992fa5",
}
response = logged_in_client.post(
url_for('main.service_set_inbound_number', service_id=service_one['id']),
data=data
)
assert response.status_code == 302
mock_add_sms_sender.assert_called_once_with(
service_one['id'],
sms_sender="781d9c60-7a7e-46b7-9896-7b045b992fa5",
is_default=True,
inbound_number_id="781d9c60-7a7e-46b7-9896-7b045b992fa5"
)
def test_set_inbound_sms_when_no_available_inbound_numbers(
client_request,
service_one,
no_available_inbound_numbers,
mock_no_inbound_number_for_service,
mocker
):
page = client_request.get(
'main.service_set_inbound_number',
service_id=service_one['id']
)
assert normalize_spaces(page.select_one('main p').text) == "No available inbound numbers"
def test_set_inbound_sms_when_service_already_has_sms(
client_request,
service_one,
multiple_available_inbound_numbers,
mock_get_inbound_number_for_service,
):
page = client_request.get(
'main.service_set_inbound_number',
service_id=service_one['id']
)
assert normalize_spaces(page.select_one('main p').text) == "This service already has an inbound number"
| StarcoderdataPython |
38903 | #45-crie um programa que faça o computador jogar jokenpo com voce.
print('=====JOKENPO=====')
print('')
from random import randint
from time import sleep
itens = ('pedra','papel','tesoura')
computador = randint(0, 2)
print('''FAÇA SUA ESCOLHA
[ 0 ] pedra
[ 1 ] papel
[ 2 ] tesoura
''')
jogador = int(input('Qual a sua jogada ? '))
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO')
sleep(1)
print('computador jogou {}.'.format(itens[computador]))
print('jogador jogou {}.'.format(itens[jogador]))
if computador == 0: #computador jogou pedra
if jogador == 0:
print('EMPATE')
elif jogador == 1:
print('JOGADOR VENCE')
elif jogador == 2:
print('COMPUTADOR VENCE')
else:
print('jogada invalida')
elif computador == 1: #computador jogou papel
if jogador == 0:
print('COMPUTADOR VENCE')
elif jogador == 1:
print('EMPATE')
elif jogador == 2:
print('JOGADOR VENCE')
else:
print('jogada invalida')
elif computador == 2: #computador jogou tesoura
if jogador == 0:
print('JOGADOR VENCE')
elif jogador == 1:
print('COMPUTADOR VENCE')
elif jogador == 2:
print('EMPATE')
else:
print('jogada invalida')
#FIM//A\\ | StarcoderdataPython |
198984 | from validation.types.type_validation import (
ValidationType, ValidationTypeError
)
from uontypes.scalars.uon_uint import UonUint
class UintTypeValidation(ValidationType):
def validate_type(self, input_):
if (not isinstance(input_, UonUint)):
raise ValidationTypeError("The following input {} type "
"does not correspond to "
"unsigned int".format(input_))
def __repr__(self):
return "UintTypeValidation()"
def __str__(self):
return "!uint"
def to_binary(self):
"""Binary representation of unsigned integer type validation.
b"\x30" corresponds to uint type in UON.
Returns:
bytes: binary representation of uint type validation
"""
return b"\x30" | StarcoderdataPython |
3288912 | # Generated by Django 4.0 on 2022-03-20 13:41
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('first_name', models.CharField(max_length=30, validators=[django.core.validators.MinLengthValidator(2)])),
('last_name', models.CharField(max_length=30, validators=[django.core.validators.MinLengthValidator(2)])),
('picture', models.URLField()),
('date_of_birth', models.DateTimeField(blank=True, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('gender', models.CharField(blank=True, choices=[('Male', 'Male'), ('Female', 'Female'), ('Do not show', 'Do not show')], default='Do not show', max_length=11, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='accounts.petstagramuser')),
],
),
]
| StarcoderdataPython |
126097 | <gh_stars>0
# https://zulko.github.io/moviepy/examples/star_worms.html | StarcoderdataPython |
1659207 | <filename>Main.py
import YellScraper
scraper = YellScraper.Scraper()
scraper.setKeyword('Restaurant')
scraper.setLocation('Old+Street%2C+ec1v')
scraper.start()
print 'Done... Exiting' | StarcoderdataPython |
3211518 | <gh_stars>1-10
from http.server import BaseHTTPRequestHandler, HTTPServer
from json import *
port = 8081
class TestServer(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
@staticmethod
def is_request_valid(data):
"""
Determines if request data is valid; it needs to have:
* either property "command" or property "distance"
* only acceptable value if "command" is "stop"
* "distance" needs to be array of length 3 containing only ints/floats
:param data: request data as dictionary
:return: True or False
"""
if len(data) != 1:
return False
if 'command' in data and data['command'] == 'stop':
return True
if 'distance' not in data:
return False
has_correct_length = len(data['distance']) == 3
has_correct_type = all(isinstance(n, float) or isinstance(n, int)
for n in data['distance'])
if has_correct_length and has_correct_type:
return True
return False
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write( # return HTML redirect
bytes('<strong>Server is running!</strong>', 'utf-8'))
def do_POST(self):
content_length = int(self.headers['Content-Length'])
request = self.rfile.read(content_length)
try:
json = loads(request.decode())
if self.is_request_valid(json):
self.send_response(200)
reply = {"success": json}
else:
self.send_response(400)
reply = {"invalid request data": json}
except JSONDecodeError as e:
self.send_response(400)
reply = {"error": str(e)}
self._set_headers()
self.wfile.write(dumps(reply).encode())
def run():
server_address = ('', port)
server = HTTPServer(server_address, TestServer)
print(f'Running test server on port {port}...')
server.serve_forever()
run()
| StarcoderdataPython |
22116 | """Some debugging functions for working with the Scrapy engine"""
# used in global tests code
from time import time # noqa: F401
def get_engine_status(engine):
"""Return a report of the current engine status"""
tests = [
"time()-engine.start_time",
"engine.has_capacity()",
"len(engine.downloader.active)",
"engine.scraper.is_idle()",
"engine.spider.name",
"engine.spider_is_idle(engine.spider)",
"engine.slot.closing",
"len(engine.slot.inprogress)",
"len(engine.slot.scheduler.dqs or [])",
"len(engine.slot.scheduler.mqs)",
"len(engine.scraper.slot.queue)",
"len(engine.scraper.slot.active)",
"engine.scraper.slot.active_size",
"engine.scraper.slot.itemproc_size",
"engine.scraper.slot.needs_backout()",
]
checks = []
for test in tests:
try:
checks += [(test, eval(test))]
except Exception as e:
checks += [(test, f"{type(e).__name__} (exception)")]
return checks
def format_engine_status(engine=None):
checks = get_engine_status(engine)
s = "Execution engine status\n\n"
for test, result in checks:
s += f"{test:<47} : {result}\n"
s += "\n"
return s
def print_engine_status(engine):
print(format_engine_status(engine))
| StarcoderdataPython |
3225577 | # -*- coding: utf-8 -*-
"""
Survey_traverseAdjustment.py
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = '2019-11-17'
__copyright__ = '(C) 2019, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterString,
QgsProcessingParameterPoint,
QgsProcessingParameterNumber,
QgsProcessingParameterCrs,
QgsProcessingParameterFileDestination,
QgsFields,
QgsField,
QgsWkbTypes,
QgsFeature,
QgsGeometry,
QgsPointXY,
QgsPoint,
QgsApplication
)
from numpy import radians, arctan, pi, sin, cos, matrix, sqrt, degrees, array, diag, ones, zeros, floor
from numpy.linalg import norm, pinv, inv
from lftools.geocapt.imgs import Imgs
from lftools.geocapt.topogeo import *
import os
from qgis.PyQt.QtGui import QIcon
class TraverseAdjustment(QgsProcessingAlgorithm):
A = 'A'
B = 'B'
Y = 'Y'
Z = 'Z'
DIST = 'DIST'
ANGS = 'ANGS'
DIST_PREC = 'DIST_PREC'
PPM = 'PPM'
ANGS_PREC = 'ANGS_PREC'
CRS = 'CRS'
OUTPUT = 'OUTPUT'
HTML = 'HTML'
rho = 180*3600/pi
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return TraverseAdjustment()
def name(self):
return 'traverseadjustment'
def displayName(self):
return self.tr('Traverse adjustment', 'Poligonal enquadrada')
def group(self):
return self.tr('Survey', 'Agrimensura')
def groupId(self):
return 'survey'
def tags(self):
return self.tr('survey,agrimensura,polygonal,adjustment,total,station,angle,least square').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/total_station.png'))
txt_en = 'This algorithm performs the traverse adjustments of a framed polygonal by least squares method, where the distances, angles, and directions observations are adjusted simultaneously, providing the most probable values for the given data set. Futhermore, the observations can be rigorously weighted based on their estimated errors and adjusted accordingly.'
txt_pt = 'Este algoritmo realiza o ajustamento de poligonal enquadrada pelo método dos mínimos quadrados, onde as observações de distâncias, ângulos e direções são ajustadas simultaneamente, fornecendo os valores mais prováveis para o conjunto de dados. Além disso, as observações podem ser rigorosamente ponderadas considerando os erros estimados e ajustados.'
figure = 'images/tutorial/survey_traverse.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
nota_en = '''Note: Sample data obtained from class notes of the Geodetic Survey discipline at UFPE.
'''
nota_pt = '''Nota: Dados de exemplo obtidos das notas de aula da disciplina de Levantamento Geodésicos na UFPE.
'''
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<div>''' + self.tr(nota_en, nota_pt) + '''
''' +'</a><br><b>'+ self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterPoint(
self.A,
self.tr('A: first (E,N) coordinates','A: 1º ponto (E,N)'),
defaultValue = QgsPointXY(150000, 250000)
)
)
self.addParameter(
QgsProcessingParameterPoint(
self.B,
self.tr('B: second (E,N) coordinates','B: 2º ponto (E,N)'),
defaultValue = QgsPointXY(149922.119, 249875.269)
)
)
self.addParameter(
QgsProcessingParameterPoint(
self.Y,
self.tr('Y: penultimate (E,N) coordinates', 'Y: penúltimo ponto (E,N)'),
defaultValue = QgsPointXY(150347.054, 249727.281)
)
)
self.addParameter(
QgsProcessingParameterPoint(
self.Z,
self.tr('Z: final (E,N) coordinates', 'Z: último ponto (E,N)'),
defaultValue = QgsPointXY(150350.201, 249622.000)
)
)
self.addParameter(
QgsProcessingParameterString(
self.DIST,
self.tr('List of Horizontal Distances (m)', 'Lista de Distâncias Horizontais (m)'),
defaultValue = '110.426, 72.375, 186.615, 125.153, 78.235, 130.679, 110.854',
multiLine = True
)
)
self.addParameter(
QgsProcessingParameterString(
self.ANGS,
self.tr('List of Angles', 'Lista de Ângulos'),
defaultValue = '''75°23'34", 202°4'36", 56°51'15", 283°31'32", 242°57'31", 185°5'12", 94°11'35", 266°13'20" ''',
multiLine = True
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.DIST_PREC,
self.tr('Initial distance precision (mm)', 'Precisão linear inicial (mm)'),
type = 1,
defaultValue = 3
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.PPM,
self.tr('PPM distance precision', 'Precisão linear em PPM'),
type = 1,
defaultValue = 3
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.ANGS_PREC,
self.tr('Angular precision (seconds)', 'Precisão angular (em segundos)'),
type = 1,
defaultValue = 10
)
)
self.addParameter(
QgsProcessingParameterCrs(
self.CRS,
self.tr('CRS','SRC'),
'ProjectCrs'))
# OUTPUT
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Adjusted Points', 'Pontos da Poligonal')
)
)
self.addParameter(
QgsProcessingParameterFileDestination(
'HTML',
self.tr('Report of the closed traverse', 'Relatório de ajuste da Poligonal'),
self.tr('HTML files (*.html)')
)
)
# F(Xo) para distâncias:
def F_X_d(self, pnts, B, Y):
F_X = [[sqrt((B[0]-pnts[0][0])**2 + (B[1]-pnts[0][1])**2)]]
for k in range(len(pnts)-1):
x1 = pnts[k][0]
y1 = pnts[k][1]
x2 = pnts[k+1][0]
y2 = pnts[k+1][1]
F_X += [[sqrt((x1-x2)**2 + (y1-y2)**2)]]
F_X += [[sqrt((Y[0]-pnts[-1][0])**2 + (Y[1]-pnts[-1][1])**2)]]
return F_X
# F(Xo) para ângulos:
def F_X_a(self, pnts, A, B, Y, Z):
pnts2 = [B] + pnts + [Y]
# leitura do ângulo no sentido horário
F_X = [[3600*degrees(DifAz(azimute(QgsPointXY(B[0], B[1]), QgsPointXY(A[0], A[1]))[0], azimute(QgsPointXY(B[0], B[1]),QgsPointXY(pnts[0][0], pnts[0][1]))[0]))]]
for k in range(len(pnts2)-2):
pnt0 = QgsPointXY(pnts2[k][0], pnts2[k][1])
pnt1 = QgsPointXY(pnts2[k+1][0], pnts2[k+1][1])
pnt2 = QgsPointXY(pnts2[k+2][0], pnts2[k+2][1])
F_X += [[3600*degrees(DifAz(azimute(pnt1,pnt0)[0], azimute(pnt1, pnt2)[0]))]]
F_X += [[3600*degrees(DifAz(azimute(QgsPointXY(Y[0], Y[1]), QgsPointXY(pnts2[-2][0], pnts2[-2][1]))[0], azimute(QgsPointXY(Y[0], Y[1]), QgsPointXY(Z[0], Z[1]))[0]))]]
return F_X
def Jacobiana_d(self, pnts, B, Y, n_d, n_par):
Jac = zeros([n_d, n_par])
pnts2 = [B] + pnts + [Y]
for k in range(n_d):
I = pnts2[k]
J = pnts2[k+1]
IJ = norm(array(J) - array(I))
linha = [(I[0]-J[0])/IJ, (I[1]-J[1])/IJ, (J[0]-I[0])/IJ, (J[1]-I[1])/IJ]
if k == 0:
Jac[k, 0:2] = linha[2:]
elif k < (n_d-1):
Jac[k, (2*k-2):(2*k-2 + 4)] = linha
else:
Jac[k, (2*k-2):(2*k-2 + 2)] = linha[:2]
return list(Jac)
def Jacobiana_a(self, pnts, A, B, Y, Z, n_angs, n_par):
Jac = zeros([n_angs, n_par])
pnts2 = [A, B] + pnts + [Y, Z]
for k in range(n_angs):
B = pnts2[k]
I = pnts2[k+1]
F = pnts2[k+2]
IB = norm(array(B) - array(I))
IF = norm(array(F) - array(I))
linha = [(I[1]-B[1])/IB**2, (B[0]-I[0])/IB**2, (B[1]-I[1])/IB**2 - (F[1]-I[1])/IF**2,
(I[0]-B[0])/IB**2 - (I[0]-F[0])/IF**2, (F[1]-I[1])/IF**2, (I[0]-F[0])/IF**2]
linha = list(self.rho*array(linha))
if n_par > 2:
if k == 0:
Jac[k, 0:2] = linha[4:]
elif k==1:
Jac[k, 0:4] = linha[2:]
elif k < (n_angs-2):
Jac[k, (2*k-4):(2*k-4 + 6)] = linha
elif k == n_angs-2:
Jac[k, (2*k-4):(2*k-4 + 4)] = linha[:4]
else:
Jac[k, (2*k-4):(2*k-4 + 2)] = linha[:2]
else:
if k == 0:
Jac[0, 0:2] = linha[4:]
elif k == 1:
Jac[1, 0:2] = linha[2:4]
elif k == 2:
Jac[2, 0:2] = linha[:2]
return list(Jac)
def processAlgorithm(self, parameters, context, feedback):
A = self.parameterAsPoint(
parameters,
self.A,
context
)
A = [A.x(), A.y()]
B = self.parameterAsPoint(
parameters,
self.B,
context
)
B = [B.x(), B.y()]
Y = self.parameterAsPoint(
parameters,
self.Y,
context
)
Y = [Y.x(), Y.y()]
Z = self.parameterAsPoint(
parameters,
self.Z,
context
)
Z = [Z.x(), Z.y()]
d = self.parameterAsString(
parameters,
self.DIST,
context
)
d = String2NumberList(d)
#feedback.pushInfo('Distances list: ' + str(d))
angs = self.parameterAsString(
parameters,
self.ANGS,
context
)
angs = String2StringList(angs)
#feedback.pushInfo('Angles list: ' + str(angs))
lista = []
for ang in angs:
lista += [3600*float(dms2dd(ang))]
angs = lista
dist_precision = self.parameterAsDouble(
parameters,
self.DIST_PREC,
context
)
dist_precision *= 1e-3 # milimeter to meters
ppm = self.parameterAsDouble(
parameters,
self.PPM,
context
)
ppm *= 1e-6 # ppm
ang_precision = self.parameterAsDouble(
parameters,
self.ANGS_PREC,
context
)
CRS = self.parameterAsCrs(
parameters,
self.CRS,
context
)
if CRS.isGeographic():
raise QgsProcessingException(self.tr('The output CRS must be Projected!', 'O SRC da camada de saída deve ser Projetado!'))
# OUTPUT
Fields = QgsFields()
Fields.append(QgsField('id', QVariant.Int))
GeomType = QgsWkbTypes.Point
(sink, dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
Fields,
GeomType,
CRS
)
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
html_output = self.parameterAsFileOutput(
parameters,
self.HTML,
context
)
# Precisões
sd_d = list(dist_precision + array(d)*ppm)
sd_a = list(ang_precision*ones(len(angs)))
# Observações
Lb = matrix(d + angs).reshape([len(d)+len(angs),1])
# Cálculo de aproximações inicias
Xo = []
pnts = []
Az0 = azimute(QgsPointXY(B[0], B[1]), QgsPointXY(A[0], A[1]))[0]
p0 = B
for k in range(len(d)-1):
ang = pi/2 - Az0 - radians(angs[k]/3600) # leitura do ângulo no sentido horário
x = p0[0] + d[k]*cos(ang)
y = p0[1] + d[k]*sin(ang)
Xo += [[x], [y]]
pnts += [(x, y)]
Az0 = -pi/2 - ang
p0 = (x, y)
pnts_ini = pnts
# Cálculo do Erro de Fechamento Linear
ang = pi/2 - Az0 - radians(angs[-2]/3600)
x = p0[0] + d[-1]*cos(ang)
y = p0[1] + d[-1]*sin(ang)
Y_ = (x, y)
Erro = array(Y_)-array(Y)
feedback.pushInfo('Linear closure error: ' + str(round(norm(array(Y_)-array(Y)),4)) + ' m')
feedback.pushInfo('E and N errors: ' + str((round(Erro[0],4),round(Erro[1],4))) + ' m')
# Cálculo do Erro de Azimute
Az0 = azimute(QgsPointXY(B[0], B[1]), QgsPointXY(A[0], A[1]))[0]
for k in range(len(angs)):
ang = pi/2 - Az0 - radians(angs[k]/3600) # leitura do ângulo no sentido horário
Az = pi/2 - ang
Az0 = Az -pi
if Az<0 or Az>2*pi:
if (Az<0):
Az=Az+2*pi
else:
Az=Az-2*pi
feedback.pushInfo('Angular closure error: ' + str(round(3600*(degrees(Az - azimute(QgsPointXY(Y[0], Y[1]), QgsPointXY(Z[0], Z[1]))[0])),2)) + ' sec')
# Dados para matrix jacobiana
n_par = len(pnts)*2
n_d = len(d)
n_angs = len(angs)
n_obs = n_d + n_angs
# Matriz Peso
P = matrix(diag(array(sd_d + sd_a)**(-2)))
# Cálculo iterativo das Coordenadas (Parâmetros)
cont = 0
cont_max = 10
tol = 1e-4
while cont < cont_max:
F_Xo = self.F_X_d(pnts, B, Y) + self.F_X_a(pnts, A, B, Y, Z)
J = matrix(list(self.Jacobiana_d(pnts, B, Y, n_d, n_par)) + list(self.Jacobiana_a(pnts, A, B, Y, Z, n_angs, n_par)))
L = matrix(Lb - F_Xo)
delta = pinv(J.T*P*J)*J.T*P*L
Xa = array(Xo) + array(delta)
cont += 1
#print('Iteração:', cont, '\ncoord:', Xa.T, '\ndelta:', delta.T)
feedback.pushInfo('Iteração: ' + str( cont) + '\nCoord: ' + str(Xa.T) + '\nDelta:' + str(delta.T))
if max(abs(delta))[0,0] > tol:
Xo = Xa
pnts = []
for k in range(int(len(Xo)/2)):
pnts += [(float(Xo[2*k][0]), float(Xo[2*k+1][0]))]
else:
break
# Resíduos
V = Lb - F_Xo
# Sigma posteriori
n = len(Lb) # número de observações
u = len(Xa) # número de parâmetros
sigma2 = V.T*P*V/(n-u)
# Observações Ajustadas (La)
La = Lb + V
# MVC de Xa
SigmaXa = sigma2[0,0]*pinv(J.T*P*J)
# MVC de La
SigmaLa = J*SigmaXa*J.T
# MVC de Lb
var_priori = 1.0
SigmaLb = var_priori*inv(P)
# MVC dos resíduos
SigmaV = SigmaLa + SigmaLb
feature = QgsFeature()
total = 100.0 /len(pnts) if len(pnts) else 0
for current, pnt in enumerate(pnts):
geom = QgsGeometry(QgsPoint(float(pnt[0]), float(pnt[1])))
feature.setGeometry(geom)
feature.setAttributes([current+1])
sink.addFeature(feature, QgsFeatureSink.FastInsert)
if feedback.isCanceled():
break
feedback.setProgress(int(current * total))
# Relatório
tabela1 = '''<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[EST]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[E]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">[N]</span><o:p></o:p></p>
</td>
</tr>
'''
tabela2 = '''<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[EST]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[E]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">[N]</span><o:p></o:p></p>
</td>
<td style="text-align: center;"><i> [S_E] </i></td>
<td style="text-align: center;"><i> [S_N] </i></td>
</tr>
'''
tabela3 = '''<tr style="">
<td
style="border-style: none solid solid; border-color: -moz-use-text-color windowtext windowtext; border-width: medium 1pt 1pt; padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[obs]<o:p></o:p></span></p>
</td>
<td
style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[r]<o:p></o:p></span></p>
</td>
<td
style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 117.6pt;"
valign="top" width="157">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[adj_obs]<o:p></o:p></span></p>
</td>
<td
style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 102.05pt;"
valign="top" width="136">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[sd]<o:p></o:p></span></p>
</td>
</tr>
'''
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta content="text/html; charset=ISO-8859-1"
http-equiv="content-type">
<title>'''+ self.tr('Traverse Adjustment Report', 'Relatório de Ajuste de Poligonal') + '''</title>
<link rel = "icon" href = "https://github.com/LEOXINGU/lftools/blob/main/images/lftools.png?raw=true" type = "image/x-icon">
</head>
<body
style="color: rgb(0, 0, 0); background-color: rgb(255, 255, 204);"
alink="#000099" link="#000099" vlink="#990099">
<p class="MsoNormal" style="text-align: center;"
align="center"><b><span
style="font-size: 12pt; line-height: 107%;">''' + self.tr('TRAVERSE ADJUSTMENT', 'POLIGONAL ENQUADRADA') + '''<o:p></o:p></span></b></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><span style="font-style: italic;">''' + self.tr('Method of Least Squares', str2HTML('Método dos Mínimos Quadrados')) + '''</span></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><b><u>''' + self.tr('REPORT', str2HTML('RELATÓRIO')) + '''<o:p></o:p></u></b></p>
<div align="center">
<table style="text-align: center; width: 100%;" border="1"
cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td width="50%"><b><span style=""
>'''+ self.tr('Initial approximation', str2HTML('Aproximação Incial')) + '''</span></b></td>
<td width="50%"><b><span style=""
>'''+ self.tr('Adjusted Coordinates', 'Coordenadas Ajustadas') + '''<o:p></o:p></span></b></td>
</tr>
<tr>
<td style="text-align: center;">
<div align="center">
<table class="MsoTableGrid"
style="border: medium none ; border-collapse: collapse;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>'''+self.tr('Station',
str2HTML('Estação')) + '''</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">E</span><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">N</span><o:p></o:p></p>
</td>
</tr>
[TABLE 1]
</tbody>
</table>
</div>
</td>
<td style="text-align: center;">
<div align="center"></br>
<table class="MsoTableGrid"
style="border: medium none ; border-collapse: collapse;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>'''+self.tr('Station',
str2HTML('Estação')) + '''</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">E</span><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">N</span><o:p></o:p></p>
</td>
<td style="text-align: center;">
σ<span style="font-style: italic;">E </span></td>
<td style="text-align: center;">
σ<span style="font-style: italic;">N </span></td>
</tr>
[TABLE 2]
</tbody>
</table>
<i><span style="" >''' + self.tr('Posteriori variance', str2HTML('Variância a posteriori')) + '''</span></i><span style="" >: </span><span
style="" > <span
style="color: red;">[sigma2]</span></span>
</div>
</td>
</tr>
<tr>
<td colspan="2" rowspan="1"><b><span
style="" >''' + self.tr('Observations', str2HTML('Observações')) + '''<o:p></o:p></span></b></td>
</tr>
<tr>
<td colspan="2" rowspan="1"
style="text-align: center;">
<div align="center">
<table class="MsoTableGrid"
style="border: medium none ; width: 389.5pt; border-collapse: collapse;"
border="1" cellpadding="0" cellspacing="0"
width="519">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Observation', str2HTML('Observação')) + '''<o:p></o:p></span></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Residual', str2HTML('Resíduo')) + '''<o:p></o:p></span></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 117.6pt;"
valign="top" width="157">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Adjusted Observation', str2HTML('Observação Ajustada')) + '''<o:p></o:p></span></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 102.05pt;"
valign="top" width="136">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Standard Deviation', str2HTML('Desvio Padrão')) + '''<o:p></o:p></span></p>
</td>
</tr>
[TABLE 3]
</tbody>
</table></br>
</div>
</td>
</tr>
</tbody>
</table>
<p class="MsoNormal" style="text-align: left;"
align="left"><i><span
style="font-size: 10pt; line-height: 100%; color: rgb(127, 127, 127);">''' + self.tr(str2HTML('*The unit of measurement of the adjusted coordinates is the same as the input coordinates.'), str2HTML('*A unidade de medida das coordenadas ajustadas é a mesma da coordenadas de entrada.')) + '''<o:p></o:p></span></i></p>
</div>
<footer">
<p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: right;" align="right"><b>''' + self.tr('<NAME>', str2HTML('<NAME>')) + '''
</br>''' + self.tr('Cartographic Engineer', 'Eng. Cartógrafo') + '''<o:p></o:p></b></p>
</br>
<div align="right">'''+ Imgs().social_table_color + '''
</div>
<o:p></o:p></b></p>
</footer>
</body>
</html>
'''
# Aproximação inicial
cont = 0
table1 = ''
for k, pnt in enumerate(pnts_ini):
X = pnt[0]
Y = pnt[1]
cont += 1
tableRowN = tabela1
itens = {
'[EST]' : str(k+1),
'[E]': self.tr('{:,.3f}'.format(X), '{:,.3f}'.format(X).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[N]': self.tr('{:,.3f}'.format(Y), '{:,.3f}'.format(Y).replace(',', 'X').replace('.', ',').replace('X', '.')),
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table1 += tableRowN
# Ajustamento
cont = 0
table2 = ''
SD = SigmaXa.diagonal()
for k in range(len(pnts_ini)):
X = Xa[2*k, 0]
Y = Xa[2*k+1, 0]
sdx = sqrt(SD[0, 2*k])
sdy = sqrt(SD[0, 2*k+1])
cont += 1
tableRowN = tabela2
itens = {
'[EST]' : str(k+1),
'[E]': self.tr('{:,.3f}'.format(X), '{:,.3f}'.format(X).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[N]': self.tr('{:,.3f}'.format(Y), '{:,.3f}'.format(Y).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[S_E]': self.tr('{:,.3f}'.format(sdx), '{:,.3f}'.format(sdx).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[S_N]': self.tr('{:,.3f}'.format(sdy), '{:,.3f}'.format(sdy).replace(',', 'X').replace('.', ',').replace('X', '.')),
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table2 += tableRowN
# Observações
table3 = ''
SD = SigmaLa.diagonal()
for k in range(n_d): # Distâncias
obs = Lb[k, 0]
adj_obs = La[k, 0]
sd = sqrt(SD[0, k])
r = V[k, 0]
cont += 1
tableRowN = tabela3
itens = {
'[obs]' : str(round(obs,3)),
'[r]': str(round(r,4)),
'[adj_obs]': str(round(adj_obs,3)),
'[sd]': str(round(sd,3))
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table3 += tableRowN
for t in range(k+1, k+1+ n_angs): # Ângulos
obs = Lb[t, 0]
adj_obs = La[t, 0]
sd = sqrt(SD[0, t])
r = V[t, 0]
cont += 1
tableRowN = tabela3
itens = {
'[obs]' : str2HTML(dd2dms(obs/3600,3)),
'[r]': str(round(r,4)) + '"',
'[adj_obs]': str2HTML(dd2dms(adj_obs/3600,3)),
'[sd]': str(round(sd,3)) + '"'
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table3 += tableRowN
# Documento prinicipal
itens = {
'[TABLE 1]': table1,
'[TABLE 2]': table2,
'[TABLE 3]': table3,
'[sigma2]': str(round(sigma2[0,0],3))
}
for item in itens:
texto = texto.replace(item, itens[item])
# Exportar HTML
arq = open(html_output, 'w')
arq.write(texto)
arq.close()
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('<NAME> - Cartographic Engineer', '<NAME> - Eng Cart'))
return {self.OUTPUT: dest_id,
self.HTML: html_output}
| StarcoderdataPython |
135821 | """The Media Source implementation for the Jellyfin integration."""
from __future__ import annotations
import logging
import mimetypes
from typing import Any
import urllib.parse
from jellyfin_apiclient_python.api import jellyfin_url
from jellyfin_apiclient_python.client import JellyfinClient
from homeassistant.components.media_player.const import (
MEDIA_CLASS_ALBUM,
MEDIA_CLASS_ARTIST,
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_TRACK,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.components.media_source.models import (
BrowseMediaSource,
MediaSource,
MediaSourceItem,
PlayMedia,
)
from homeassistant.core import HomeAssistant
from .const import (
COLLECTION_TYPE_MUSIC,
DATA_CLIENT,
DOMAIN,
ITEM_KEY_COLLECTION_TYPE,
ITEM_KEY_ID,
ITEM_KEY_IMAGE_TAGS,
ITEM_KEY_INDEX_NUMBER,
ITEM_KEY_MEDIA_SOURCES,
ITEM_KEY_MEDIA_TYPE,
ITEM_KEY_NAME,
ITEM_TYPE_ALBUM,
ITEM_TYPE_ARTIST,
ITEM_TYPE_AUDIO,
ITEM_TYPE_LIBRARY,
MAX_IMAGE_WIDTH,
MAX_STREAMING_BITRATE,
MEDIA_SOURCE_KEY_PATH,
MEDIA_TYPE_AUDIO,
MEDIA_TYPE_NONE,
SUPPORTED_COLLECTION_TYPES,
)
_LOGGER = logging.getLogger(__name__)
async def async_get_media_source(hass: HomeAssistant) -> MediaSource:
"""Set up Jellyfin media source."""
# Currently only a single Jellyfin server is supported
entry = hass.config_entries.async_entries(DOMAIN)[0]
data = hass.data[DOMAIN][entry.entry_id]
client: JellyfinClient = data[DATA_CLIENT]
return JellyfinSource(hass, client)
class JellyfinSource(MediaSource):
"""Represents a Jellyfin server."""
name: str = "Jellyfin"
def __init__(self, hass: HomeAssistant, client: JellyfinClient) -> None:
"""Initialize the Jellyfin media source."""
super().__init__(DOMAIN)
self.hass = hass
self.client = client
self.api = client.jellyfin
self.url = jellyfin_url(client, "")
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Return a streamable URL and associated mime type."""
media_item = await self.hass.async_add_executor_job(
self.api.get_item, item.identifier
)
stream_url = self._get_stream_url(media_item)
mime_type = _media_mime_type(media_item)
return PlayMedia(stream_url, mime_type)
async def async_browse_media(self, item: MediaSourceItem) -> BrowseMediaSource:
"""Return a browsable Jellyfin media source."""
if not item.identifier:
return await self._build_libraries()
media_item = await self.hass.async_add_executor_job(
self.api.get_item, item.identifier
)
item_type = media_item["Type"]
if item_type == ITEM_TYPE_LIBRARY:
return await self._build_library(media_item, True)
if item_type == ITEM_TYPE_ARTIST:
return await self._build_artist(media_item, True)
if item_type == ITEM_TYPE_ALBUM:
return await self._build_album(media_item, True)
raise BrowseError(f"Unsupported item type {item_type}")
async def _build_libraries(self) -> BrowseMediaSource:
"""Return all supported libraries the user has access to as media sources."""
base = BrowseMediaSource(
domain=DOMAIN,
identifier=None,
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type=MEDIA_TYPE_NONE,
title=self.name,
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_DIRECTORY,
)
libraries = await self._get_libraries()
base.children = []
for library in libraries:
base.children.append(await self._build_library(library, False))
return base
async def _get_libraries(self) -> list[dict[str, Any]]:
"""Return all supported libraries a user has access to."""
response = await self.hass.async_add_executor_job(self.api.get_media_folders)
libraries = response["Items"]
result = []
for library in libraries:
if ITEM_KEY_COLLECTION_TYPE in library:
if library[ITEM_KEY_COLLECTION_TYPE] in SUPPORTED_COLLECTION_TYPES:
result.append(library)
return result
async def _build_library(
self, library: dict[str, Any], include_children: bool
) -> BrowseMediaSource:
"""Return a single library as a browsable media source."""
collection_type = library[ITEM_KEY_COLLECTION_TYPE]
if collection_type == COLLECTION_TYPE_MUSIC:
return await self._build_music_library(library, include_children)
raise BrowseError(f"Unsupported collection type {collection_type}")
async def _build_music_library(
self, library: dict[str, Any], include_children: bool
) -> BrowseMediaSource:
"""Return a single music library as a browsable media source."""
library_id = library[ITEM_KEY_ID]
library_name = library[ITEM_KEY_NAME]
result = BrowseMediaSource(
domain=DOMAIN,
identifier=library_id,
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type=MEDIA_TYPE_NONE,
title=library_name,
can_play=False,
can_expand=True,
)
if include_children:
result.children_media_class = MEDIA_CLASS_ARTIST
result.children = await self._build_artists(library_id) # type: ignore[assignment]
if not result.children:
result.children_media_class = MEDIA_CLASS_ALBUM
result.children = await self._build_albums(library_id) # type: ignore[assignment]
return result
async def _build_artists(self, library_id: str) -> list[BrowseMediaSource]:
"""Return all artists in the music library."""
artists = await self._get_children(library_id, ITEM_TYPE_ARTIST)
artists = sorted(artists, key=lambda k: k[ITEM_KEY_NAME]) # type: ignore[no-any-return]
return [await self._build_artist(artist, False) for artist in artists]
async def _build_artist(
self, artist: dict[str, Any], include_children: bool
) -> BrowseMediaSource:
"""Return a single artist as a browsable media source."""
artist_id = artist[ITEM_KEY_ID]
artist_name = artist[ITEM_KEY_NAME]
thumbnail_url = self._get_thumbnail_url(artist)
result = BrowseMediaSource(
domain=DOMAIN,
identifier=artist_id,
media_class=MEDIA_CLASS_ARTIST,
media_content_type=MEDIA_TYPE_NONE,
title=artist_name,
can_play=False,
can_expand=True,
thumbnail=thumbnail_url,
)
if include_children:
result.children_media_class = MEDIA_CLASS_ALBUM
result.children = await self._build_albums(artist_id) # type: ignore[assignment]
return result
async def _build_albums(self, parent_id: str) -> list[BrowseMediaSource]:
"""Return all albums of a single artist as browsable media sources."""
albums = await self._get_children(parent_id, ITEM_TYPE_ALBUM)
albums = sorted(albums, key=lambda k: k[ITEM_KEY_NAME]) # type: ignore[no-any-return]
return [await self._build_album(album, False) for album in albums]
async def _build_album(
self, album: dict[str, Any], include_children: bool
) -> BrowseMediaSource:
"""Return a single album as a browsable media source."""
album_id = album[ITEM_KEY_ID]
album_title = album[ITEM_KEY_NAME]
thumbnail_url = self._get_thumbnail_url(album)
result = BrowseMediaSource(
domain=DOMAIN,
identifier=album_id,
media_class=MEDIA_CLASS_ALBUM,
media_content_type=MEDIA_TYPE_NONE,
title=album_title,
can_play=False,
can_expand=True,
thumbnail=thumbnail_url,
)
if include_children:
result.children_media_class = MEDIA_CLASS_TRACK
result.children = await self._build_tracks(album_id) # type: ignore[assignment]
return result
async def _build_tracks(self, album_id: str) -> list[BrowseMediaSource]:
"""Return all tracks of a single album as browsable media sources."""
tracks = await self._get_children(album_id, ITEM_TYPE_AUDIO)
tracks = sorted(
tracks,
key=lambda k: (
ITEM_KEY_INDEX_NUMBER not in k,
k.get(ITEM_KEY_INDEX_NUMBER, None),
),
)
return [self._build_track(track) for track in tracks]
def _build_track(self, track: dict[str, Any]) -> BrowseMediaSource:
"""Return a single track as a browsable media source."""
track_id = track[ITEM_KEY_ID]
track_title = track[ITEM_KEY_NAME]
mime_type = _media_mime_type(track)
thumbnail_url = self._get_thumbnail_url(track)
result = BrowseMediaSource(
domain=DOMAIN,
identifier=track_id,
media_class=MEDIA_CLASS_TRACK,
media_content_type=mime_type,
title=track_title,
can_play=True,
can_expand=False,
thumbnail=thumbnail_url,
)
return result
async def _get_children(
self, parent_id: str, item_type: str
) -> list[dict[str, Any]]:
"""Return all children for the parent_id whose item type is item_type."""
params = {
"Recursive": "true",
"ParentId": parent_id,
"IncludeItemTypes": item_type,
}
if item_type == ITEM_TYPE_AUDIO:
params["Fields"] = ITEM_KEY_MEDIA_SOURCES
result = await self.hass.async_add_executor_job(self.api.user_items, "", params)
return result["Items"] # type: ignore[no-any-return]
def _get_thumbnail_url(self, media_item: dict[str, Any]) -> str | None:
"""Return the URL for the primary image of a media item if available."""
image_tags = media_item[ITEM_KEY_IMAGE_TAGS]
if "Primary" not in image_tags:
return None
item_id = media_item[ITEM_KEY_ID]
return str(self.api.artwork(item_id, "Primary", MAX_IMAGE_WIDTH))
def _get_stream_url(self, media_item: dict[str, Any]) -> str:
"""Return the stream URL for a media item."""
media_type = media_item[ITEM_KEY_MEDIA_TYPE]
if media_type == MEDIA_TYPE_AUDIO:
return self._get_audio_stream_url(media_item)
raise BrowseError(f"Unsupported media type {media_type}")
def _get_audio_stream_url(self, media_item: dict[str, Any]) -> str:
"""Return the stream URL for a music media item."""
item_id = media_item[ITEM_KEY_ID]
user_id = self.client.config.data["auth.user_id"]
device_id = self.client.config.data["app.device_id"]
api_key = self.client.config.data["auth.token"]
params = urllib.parse.urlencode(
{
"UserId": user_id,
"DeviceId": device_id,
"api_key": api_key,
"MaxStreamingBitrate": MAX_STREAMING_BITRATE,
}
)
return f"{self.url}Audio/{item_id}/universal?{params}"
def _media_mime_type(media_item: dict[str, Any]) -> str:
"""Return the mime type of a media item."""
if not media_item[ITEM_KEY_MEDIA_SOURCES]:
raise BrowseError("Unable to determine mime type for item without media source")
media_source = media_item[ITEM_KEY_MEDIA_SOURCES][0]
path = media_source[MEDIA_SOURCE_KEY_PATH]
mime_type, _ = mimetypes.guess_type(path)
if mime_type is not None:
return mime_type
raise BrowseError(f"Unable to determine mime type for path {path}")
| StarcoderdataPython |
182506 | <gh_stars>0
import sys
import math
points = []
with open(sys.argv[1], 'r') as infile:
for line in infile:
line = line.strip().split(',')
points.append(str(line[0]) + "," + str(line[1]) + "," + str(line[2]))
with open(sys.argv[2], 'w') as outfile:
with open(sys.argv[3], 'w') as edgefile:
for ind in range(len(points)):
temp = points[ind].strip().split(',')
x = temp[0]
y = temp[1]
z = temp[2]
radius = 15
outfile.write("s" + str(ind + 1) + "," + str(x) + "," + str(y) + "," + str(z) + "\n")
for index in range(ind + 1, len(points)):
tmp = points[index].strip().split(',')
distance = math.sqrt(math.pow(int(x) - int(tmp[0]), 2) + math.pow(int(y) - int(tmp[1]), 2) + math.pow(int(z) - int(tmp[2]), 2))
if distance < radius:
edgefile.write("s" + str(ind + 1) + "," + "s" + str(index + 1) + "\n")
| StarcoderdataPython |
3393766 | from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from tqdm import tqdm
from data.dataset import train, test, books
def baseline_model() -> Tuple[Dict[int, List], Dict[int, List]]:
global train, test
sol = test.groupby(["user_id"])["book_id"].agg({"unique"}).reset_index()
gt = {}
for user in tqdm(sol["user_id"].unique()):
gt[user] = list(sol[sol["user_id"] == user]["unique"].values[0])
rec_df = pd.DataFrame()
rec_df["user_id"] = train["user_id"].unique()
popular_rec_model = books.sort_values(by="books_count", ascending=False)[
"book_id"
].values[0:500]
# 통계기반 모델 데이터셋
train = pd.merge(
train, books[["book_id", "authors", "ratings_count"]], how="left", on="book_id"
)
agg = train.groupby(["user_id", "authors"])["authors"].agg({"count"}).reset_index()
agg = agg.sort_values(by="count", ascending=False)
author_books = (
books[["book_id", "authors", "ratings_count"]]
.sort_values(by=["authors", "ratings_count"], ascending=[True, False])
.reset_index(drop=True)
)
author_rec_model = agg.merge(author_books, how="left", on=["authors"])
# 내가 읽을 책의 목록 추출
read_list = train.groupby(["user_id"])["book_id"].agg({"unique"}).reset_index()
total_rec_list = {}
for user in tqdm(rec_df["user_id"].unique()):
rec_list = []
author_rec_model_ = author_rec_model[author_rec_model["user_id"] == user][
"book_id"
].values
seen = read_list[read_list["user_id"] == user]["unique"].values[0]
for rec in author_rec_model_:
if rec not in seen:
rec_list.append(rec)
if len(rec_list) < 200:
for i in popular_rec_model[0:200]:
if i not in seen:
rec_list.append(i)
total_rec_list[user] = rec_list[0:200]
return total_rec_list, gt
| StarcoderdataPython |
1705776 | <reponame>miladgharibi/PicoSchool<filename>account/views.py
from django.contrib import (
auth,
messages,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import PasswordChangeView
from django.shortcuts import (
render,
redirect,
)
from django.urls import reverse_lazy
from account.decorators import is_login
from account.models import User
@is_login()
def login_view(request):
if request.method == 'POST':
user_input = request.POST['username']
try:
username = User.objects.get(national_code=user_input).username
except User.DoesNotExist:
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
if user.is_manager:
return redirect(reverse_lazy('manager:manager_panel'))
elif user.is_teacher:
return redirect(reverse_lazy('teacher:teacher_panel'))
elif user.is_student:
return redirect(reverse_lazy('student:student_panel'))
elif user.is_parent:
return redirect(reverse_lazy('parent:parent_panel'))
else:
messages.error(request, 'نام کاربری یا گذرواژه وارد شده نادرست است')
context = {
'page_title': 'ورود',
}
return render(request, "account/signin.html", context)
class ChangePasswordView(PasswordChangeView):
template_name = "manager/persons/change_password.html"
success_url = reverse_lazy('account:user_password_done')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = "تغییر گذرواژه"
return context
@login_required()
def password_change_done(request):
return render(request, "manager/persons/change_password_done.html", {"page_title": "گذرواژه با موفقیت تغییر کرد"})
| StarcoderdataPython |
64414 | <filename>learn2learn/_version.py
__version__ = '0.0.5.1'
| StarcoderdataPython |
4822004 | from django.conf.urls.defaults import *
urlpatterns = patterns('aquaticore.databases.views',
(r'^$', 'index'),
(r'^(?P<database_id>\d+)/$', 'detail'),
)
| StarcoderdataPython |
118449 | <filename>pypospack/task/tasks_lammps/elastic_calculation.py
import os
from collections import OrderedDict
import pypospack.potential as potential
from pypospack.task.lammps import LammpsSimulationError, LammpsSimulation
class LammpsElasticCalculation(LammpsSimulation):
""" Class for LAMMPS elastic calculation
This class sets up, runs and processes the relaxation of atomic poistions
to find the lowest energy struction in the local basin of attraction. The
simulation cell is then deformed and atomic positions relaxed to calculate
the energy differences to calculate elements of the elastic tensor
This class is based on the LAMMPS script written by <NAME>
Args:
task_name (str): the name of the task
task_directory (str): the directory of the task
"""
def __init__(self,
task_name,
task_directory,
structure_filename,
restart=False,
fullauto=False):
_task_type = 'lmps_elastic'
LammpsSimulation.__init__(self,
task_name=task_name,
task_directory=task_directory,
task_type=_task_type,
structure_filename=structure_filename,
restart=restart,
fullauto=fullauto)
self.lammps_results_names = [
'c11','c12','c13',
'c22','c23','c24','c25','c26',
'c33','c34','c35','c36',
'c44','c45','c46',
'c55','c56',
'c66']
def postprocess(self):
LammpsSimulation.postprocess(self)
def on_post(self,configuration=None):
#self.__get_results_from_lammps_outputfile()
self.get_elastic_tensor()
LammpsSimulation.on_post(self,configuration=configuration)
def on_finished(self,configuration=None):
pass
def write_potential_file(self):
if self.potential is None:
return
_setfl_dst_filename = None
if isinstance(self.potential,potential.EamPotential):
_setfl_dst_filename = os.path.join(
self.task_directory,
"{}.eam.alloy".format("".join(self.potential.symbols)))
_str_out = self.potential.lammps_potential_section_to_string(
setfl_dst_filename=_setfl_dst_filename)
# <-------- FOR STILLINGER WEBER POTENTIALS
elif isinstance(self.potential,potential.StillingerWeberPotential):
# define the filename --- SiO.parameters, Si.parameters
_symbols_str = "".join(self.potential.symbols)
_p_fname = "{}.parameters".format(_symbols_str)
# set the name of the output file
self.potential.lmps_parameter_filename = _p_fname
# get the string of potential.mod
_str_out = self.potential.lammps_potential_section_to_string()
# write out the potential parameter file
_str_lmps_params = self.potential.lammps_parameter_file_to_string()
_p_fname_dst = os.path.join(self.task_directory,_p_fname)
with open(_p_fname_dst,'w') as f:
f.write(_str_lmps_params)
#default behavior
else:
_str_out = self.potential.lammps_potential_section_to_string()
_str_out += "\n"
# coulumbic charge summation
if self.potential.is_charge:
_str_out += "kspace_style pppm 1.0e-5\n"
_str_out += "\n"
# neighborlists
_str_out += "neighbor 1.0 bin\n"
_str_out += "neigh_modify every 1 delay 0 check yes\n"
# Setup output
_str_out += "\n".join([
"thermo 1",
"thermo_style custom step temp pe press pxx pyy pzz pxy pxz pyz lx ly lz vol",
"thermo_modify norm no\n"])
_lammps_potentialmod_filename = os.path.join(
self.task_directory,
self.lammps_potentialmod_filename)
with open(_lammps_potentialmod_filename,'w') as f:
f.write(_str_out)
def write_lammps_input_file(self):
""" writes LAMMPS input file
This method is modified from the LammpsSimulation template due to
the need for the multiple files.
Args:
filename (str): name of the input file for LAMMPS. Default is
'lammps.in'.
Attributes:
"""
str_out = self.lammps_input_file_to_string()
filename = os.path.join(self.task_directory,
self.lammps_input_filename)
with open(filename,'w') as f:
f.write(str_out)
str_out = self.lammps_init_mod_to_string()
filename = os.path.join(self.task_directory,'init.mod')
with open(filename,'w') as f:
f.write(str_out)
str_out = self.lammps_displace_mod_to_string()
filename = os.path.join(self.task_directory,'displace.mod')
with open(filename,'w') as f:
f.write(str_out)
def lammps_input_file_to_string(self):
str_out = (
"include init.mod\n"
"include potential.mod\n"
"# ---- Compute initial state\n"
"fix 3 all box/relax aniso 0.0\n"
"minimize ${etol} ${ftol} ${maxiter} ${maxeval}\n"
"\n"
"variable tmp equal pxx\n"
"variable pxx0 equal ${tmp}\n"
"variable tmp equal pyy\n"
"variable pyy0 equal ${tmp}\n"
"variable tmp equal pzz\n"
"variable pzz0 equal ${tmp}\n"
"variable tmp equal pyz\n"
"variable pyz0 equal ${tmp}\n"
"variable tmp equal pxz\n"
"variable pxz0 equal ${tmp}\n"
"variable tmp equal pxy\n"
"variable pxy0 equal ${tmp}\n"
"\n"
"variable tmp equal lx\n"
"variable lx0 equal ${tmp}\n"
"variable tmp equal ly\n"
"variable ly0 equal ${tmp}\n"
"variable tmp equal lz\n"
"variable lz0 equal ${tmp}\n"
"\n"
" # ---- define the derivatives w.r.t. strain components\n"
"variable d1 equal -(v_pxx1-${pxx0})/(v_delta/v_len0)*${cfac}\n"
"variable d2 equal -(v_pyy1-${pyy0})/(v_delta/v_len0)*${cfac}\n"
"variable d3 equal -(v_pzz1-${pzz0})/(v_delta/v_len0)*${cfac}\n"
"variable d4 equal -(v_pyz1-${pyz0})/(v_delta/v_len0)*${cfac}\n"
"variable d5 equal -(v_pxz1-${pxz0})/(v_delta/v_len0)*${cfac}\n"
"variable d6 equal -(v_pxy1-${pxy0})/(v_delta/v_len0)*${cfac}\n"
"\n"
"# ---- write restart files\n"
"unfix 3\n"
"write_restart restart.equil\n"
"# ---- uxx Perturbation\n"
"variable dir equal 1\n"
"include displace.mod\n"
"# ---- uyy Perturbation\n"
"variable dir equal 2\n"
"include displace.mod\n"
"# ---- uzz Perturbation\n"
"variable dir equal 3\n"
"include displace.mod\n"
"# ---- uyz Perturbation\n"
"variable dir equal 4\n"
"include displace.mod\n"
"# ---- uxz Perturbation\n"
"variable dir equal 5\n"
"include displace.mod\n"
"# ---- uxy Perturbation\n"
"variable dir equal 6\n"
"include displace.mod\n"
"\n"
"# ---- Output final values\n"
"variable C11all equal ${C11}\n"
"variable C22all equal ${C22}\n"
"variable C33all equal ${C33}\n"
"variable C12all equal 0.5*(${C12}+${C21})\n"
"variable C13all equal 0.5*(${C13}+${C31})\n"
"variable C23all equal 0.5*(${C23}+${C32})\n"
"variable C44all equal ${C44}\n"
"variable C55all equal ${C55}\n"
"variable C66all equal ${C66}\n"
"variable C14all equal 0.5*(${C14}+${C41})\n"
"variable C15all equal 0.5*(${C15}+${C51})\n"
"variable C16all equal 0.5*(${C16}+${C61})\n"
"variable C24all equal 0.5*(${C24}+${C42})\n"
"variable C25all equal 0.5*(${C25}+${C52})\n"
"variable C26all equal 0.5*(${C26}+${C62})\n"
"variable C34all equal 0.5*(${C34}+${C43})\n"
"variable C35all equal 0.5*(${C35}+${C53})\n"
"variable C36all equal 0.5*(${C36}+${C63})\n"
"variable C45all equal 0.5*(${C45}+${C54})\n"
"variable C46all equal 0.5*(${C46}+${C64})\n"
"variable C56all equal 0.5*(${C56}+${C65})\n"
"\n"
"print \"c11 = ${C11all} ${cunits}\"\n"
"print \"c22 = ${C22all} ${cunits}\"\n"
"print \"c33 = ${C33all} ${cunits}\"\n"
"print \"c12 = ${C12all} ${cunits}\"\n"
"print \"c13 = ${C13all} ${cunits}\"\n"
"print \"c23 = ${C23all} ${cunits}\"\n"
"print \"c44 = ${C44all} ${cunits}\"\n"
"print \"c55 = ${C55all} ${cunits}\"\n"
"print \"c66 = ${C66all} ${cunits}\"\n"
"print \"c14 = ${C14all} ${cunits}\"\n"
"print \"c15 = ${C15all} ${cunits}\"\n"
"print \"c16 = ${C16all} ${cunits}\"\n"
"print \"c24 = ${C24all} ${cunits}\"\n"
"print \"c25 = ${C25all} ${cunits}\"\n"
"print \"c26 = ${C26all} ${cunits}\"\n"
"print \"c34 = ${C34all} ${cunits}\"\n"
"print \"c35 = ${C35all} ${cunits}\"\n"
"print \"c36 = ${C36all} ${cunits}\"\n"
"print \"c45 = ${C45all} ${cunits}\"\n"
"print \"c46 = ${C46all} ${cunits}\"\n"
"print \"c56 = ${C56all} ${cunits}\"\n"
"print \"lammps_sim_done\"\n")
return str_out
def lammps_init_mod_to_string(self):
if self.potential.is_charge:
_atom_style = 'charge'
else:
_atom_style = 'atomic'
_structure_file = self.lammps_structure_filename
str_out = (\
"# ---- init.mod file\n"
"variable up equal 1.0e-6\n"
"units metal\n"
"dimension 3\n"
"boundary p p p\n"
"atom_style {atom_style}\n"
"atom_modify map array\n"
"variable cfac equal 1.0e-4\n"
"variable cunits string GPa\n"
"# ---- define minimization parameters\n"
"variable etol equal 0.0\n"
"variable ftol equal 1.0e-10\n"
"variable maxiter equal 100\n"
"variable maxeval equal 1000\n"
"variable dmax equal 1.0e-2\n"
"# --- read data structure\n"
"read_data {structure_file}\n"
).format(\
atom_style=_atom_style,
structure_file=_structure_file
)
return str_out
def lammps_displace_mod_to_string(self):
str_out = (\
"# NOTE: This script should not need to be\n"
"# modified. See in.elastic for more info.\n"
"# Find which reference length to use\n"
"\n"
"if \"${dir} == 1\" then &\n"
" \"variable len0 equal ${lx0}\"\n"
"if \"${dir} == 2\" then &\n"
" \"variable len0 equal ${ly0}\"\n"
"if \"${dir} == 3\" then &\n"
" \"variable len0 equal ${lz0}\"\n"
"if \"${dir} == 4\" then &\n"
" \"variable len0 equal ${lz0}\"\n"
"if \"${dir} == 5\" then &\n"
" \"variable len0 equal ${lz0}\"\n"
"if \"${dir} == 6\" then &\n"
" \"variable len0 equal ${ly0}\"\n"
"\n"
"# Reset box and simulation parameters\n"
"\n"
"clear\n"
"read_restart restart.equil remap\n"
"include potential.mod\n"
"\n"
"# Negative deformation\n"
"\n"
"variable delta equal -${up}*${len0}\n"
"if \"${dir} == 1\" then &\n"
" \"change_box all x delta 0 ${delta} remap units box\"\n"
"if \"${dir} == 2\" then &\n"
" \"change_box all y delta 0 ${delta} remap units box\"\n"
"if \"${dir} == 3\" then &\n"
" \"change_box all z delta 0 ${delta} remap units box\"\n"
"if \"${dir} == 4\" then &\n"
" \"change_box all yz delta ${delta} remap units box\"\n"
"if \"${dir} == 5\" then &\n"
" \"change_box all xz delta ${delta} remap units box\"\n"
"if \"${dir} == 6\" then &\n"
" \"change_box all xy delta ${delta} remap units box\"\n"
"\n"
"# Relax atoms positions\n"
"\n"
"minimize ${etol} ${ftol} ${maxiter} ${maxeval}\n"
"\n"
"# Obtain new stress tensor\n"
"\n"
"variable tmp equal pxx\n"
"variable pxx1 equal ${tmp}\n"
"variable tmp equal pyy\n"
"variable pyy1 equal ${tmp}\n"
"variable tmp equal pzz\n"
"variable pzz1 equal ${tmp}\n"
"variable tmp equal pxy\n"
"variable pxy1 equal ${tmp}\n"
"variable tmp equal pxz\n"
"variable pxz1 equal ${tmp}\n"
"variable tmp equal pyz\n"
"variable pyz1 equal ${tmp}\n"
"\n"
"# Compute elastic constant from pressure tensor\n"
"\n"
"variable C1neg equal ${d1}\n"
"variable C2neg equal ${d2}\n"
"variable C3neg equal ${d3}\n"
"variable C4neg equal ${d4}\n"
"variable C5neg equal ${d5}\n"
"variable C6neg equal ${d6}\n"
"\n"
"# Reset box and simulation parameters\n"
"\n"
"clear\n"
"read_restart restart.equil remap\n"
"include potential.mod\n"
"\n"
"# Positive deformation\n"
"\n"
"variable delta equal ${up}*${len0}\n"
"if \"${dir} == 1\" then &\n"
" \"change_box all x delta 0 ${delta} remap units box\"\n"
"if \"${dir} == 2\" then &\n"
" \"change_box all y delta 0 ${delta} remap units box\"\n"
"if \"${dir} == 3\" then &\n"
" \"change_box all z delta 0 ${delta} remap units box\"\n"
"if \"${dir} == 4\" then &\n"
" \"change_box all yz delta ${delta} remap units box\"\n"
"if \"${dir} == 5\" then &\n"
" \"change_box all xz delta ${delta} remap units box\"\n"
"if \"${dir} == 6\" then &\n"
" \"change_box all xy delta ${delta} remap units box\"\n"
"\n"
"# Relax atoms positions\n"
"\n"
"minimize ${etol} ${ftol} ${maxiter} ${maxeval}\n"
"\n"
"# Obtain new stress tensor\n"
"\n"
"variable tmp equal pe\n"
"variable e1 equal ${tmp}\n"
"variable tmp equal press\n"
"variable p1 equal ${tmp}\n"
"variable tmp equal pxx\n"
"variable pxx1 equal ${tmp}\n"
"variable tmp equal pyy\n"
"variable pyy1 equal ${tmp}\n"
"variable tmp equal pzz\n"
"variable pzz1 equal ${tmp}\n"
"variable tmp equal pxy\n"
"variable pxy1 equal ${tmp}\n"
"variable tmp equal pxz\n"
"variable pxz1 equal ${tmp}\n"
"variable tmp equal pyz\n"
"variable pyz1 equal ${tmp}\n"
"\n"
"# Compute elastic constant from pressure tensor\n"
"\n"
"variable C1pos equal ${d1}\n"
"variable C2pos equal ${d2}\n"
"variable C3pos equal ${d3}\n"
"variable C4pos equal ${d4}\n"
"variable C5pos equal ${d5}\n"
"variable C6pos equal ${d6}\n"
"\n"
"# Combine positive and negative\n"
"\n"
"variable C1${dir} equal 0.5*(${C1neg}+${C1pos})\n"
"variable C2${dir} equal 0.5*(${C2neg}+${C2pos})\n"
"variable C3${dir} equal 0.5*(${C3neg}+${C3pos})\n"
"variable C4${dir} equal 0.5*(${C4neg}+${C4pos})\n"
"variable C5${dir} equal 0.5*(${C5neg}+${C5pos})\n"
"variable C6${dir} equal 0.5*(${C6neg}+${C6pos})\n"
"\n"
"# Delete dir to make sure it is not reused\n"
"\n"
"variable dir delete\n")
return str_out
def get_elastic_tensor(self):
filename = os.path.join(self.task_directory,'lammps.out')
with open(filename,'r') as f:
lines = f.readlines()
_lammps_results_names = self.lammps_results_names
self.results = OrderedDict()
_task_name = self.task_name
for i,line in enumerate(lines):
for name in _lammps_results_names:
if line.startswith('{} = '.format(name)):
# get the result name and result value from the file
# _rk = key to store result
# _rv = value to store
_rk = '{}.{}'.format(_task_name,name)
_rv = float(line.split('=')[1].split()[0].strip())
self.results[_rk] = _rv
elif line.startswith('ERROR'):
print('name:{}'.format(name))
print('line:{}'.format(line.strip))
raise NotImplementedError
#_all_components_exist = all([n in self.results for n in _lammps_results_names])
#if not _all_components_exist:
# for n in _lammps_results_names:
# print(n,n in self.results.keys())
| StarcoderdataPython |
41122 | <filename>udp-client.py<gh_stars>0
import socket, traceback
host = '255.255.255.255' # Bind to all interfaces
port = 2081
print "Creating socker on port: ", port
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#print "Setting REUSEADDR option"
#s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print "Setting SO_BROADCAST option"
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
print "Connecting to host: ", host
s.connect((host, port))
try:
print "Going to send data ..."
s.send("I am here")
print "... sent"
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
| StarcoderdataPython |
52765 | import pytest
import sys
sys.path.append(".")
sys.path.append("../.")
from boxdetect import config
from boxdetect import pipelines
def test_save_load_config(capsys):
cfg = config.PipelinesConfig()
cfg.morph_kernels_thickness = 10
cfg.save_yaml('test_cfg.yaml')
cfg2 = config.PipelinesConfig('test_cfg.yaml')
assert(cfg.__dict__ == cfg2.__dict__)
cfg.new_var = 10
cfg.save_yaml('test_cfg.yaml')
cfg2.load_yaml('test_cfg.yaml')
captured = capsys.readouterr()
assert("WARNING" in captured.out)
def test_update_num_iterations():
cfg = config.PipelinesConfig()
cfg.height_range = (5, 5)
cfg.width_range = [(10, 10), (20, 20)]
cfg.update_num_iterations()
assert(cfg.num_iterations == 2)
assert(len(cfg.height_range) == 2)
assert(len(cfg.width_range) == 2)
def test_autoconfig_simple():
box_sizes = [(42, 44), (41, 47), (41, 44), (41, 44), (125, 54), (92, 103)]
file_path = "tests/data/autoconfig_simple/dummy_example.png"
cfg = config.PipelinesConfig()
cfg.autoconfigure(box_sizes)
checkboxes = pipelines.get_checkboxes(
file_path, cfg=cfg, px_threshold=0.01, plot=False, verbose=False)
assert(len(checkboxes) == 12)
cfg = config.PipelinesConfig()
cfg.autoconfigure(box_sizes)
rects, groups, _, _ = pipelines.get_boxes(
file_path, cfg=cfg, plot=False)
assert(len(rects) == 23)
assert(len(groups) == 14)
def test_autoconfig_from_vott_simple():
vott_dir = "tests/data/autoconfig_simple"
file_path = "tests/data/autoconfig_simple/dummy_example.png"
cfg = config.PipelinesConfig()
cfg.autoconfigure_from_vott(vott_dir, class_tags=['box'])
checkboxes = pipelines.get_checkboxes(
file_path, cfg=cfg, px_threshold=0.01, plot=False, verbose=False)
assert(len(checkboxes) == 12)
cfg = config.PipelinesConfig()
cfg.autoconfigure_from_vott(vott_dir, class_tags=['box'])
rects, groups, _, _ = pipelines.get_boxes(
file_path, cfg=cfg, plot=False)
assert(len(rects) == 23)
assert(len(groups) == 14) | StarcoderdataPython |
122037 | import logging
from configparser import ConfigParser
from os import path, listdir
LOGGER = logging.getLogger('gullveig')
def priority_sort_files(k: str):
first = 0
second = k
if '-' not in k:
return first, second
parts = k.split('-', 2)
# noinspection PyBroadException
try:
first = int(parts[0])
second = parts[1]
except BaseException:
pass
return first, second
class ConfigurationError(RuntimeError):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class Configuration(ConfigParser):
def __init__(self, file_path: str, defaults: dict, required: dict = {}) -> None:
self.__file_path = path.realpath(file_path)
self.__dirname = path.dirname(self.__file_path)
self.__defaults = defaults
self.__required = required
super().__init__()
def is_file_path_valid(self):
return path.exists(self.__file_path) and path.isfile(self.__file_path)
def resolve_config_path(self, to_resolve):
if path.isabs(to_resolve):
return to_resolve
return path.abspath(path.join(
self.__dirname,
to_resolve
))
def initialize(self):
files_to_read = [
self.__file_path
]
# Scan for additional configuration files to read
d_directory = '%s.d/' % self.__file_path
if path.isdir(d_directory):
d_files = sorted(listdir(d_directory), key=priority_sort_files)
for d_file in d_files:
if d_file[-5:] != '.conf':
continue
files_to_read.append(path.join(d_directory, d_file))
LOGGER.debug('Loading configuration files, in order: %s', ', '.join(files_to_read))
self.read(files_to_read, encoding='utf-8')
# Load defaults. Slightly different than DEFAULT behavior
for section, section_values in self.__defaults.items():
if not self.has_section(section):
self[section] = section_values
continue
for key, value in section_values.items():
if not self.has_option(section, key):
self.set(section, key, value)
for section, keys in self.__required.items():
if not self.has_section(section):
raise ConfigurationError(
'Missing mandatory configuration section %s in file %s'
% (section, self.__file_path)
)
for key in keys:
if not self.has_option(section, key):
raise ConfigurationError(
'Missing mandatory configuration key %s in section %s, in file %s'
% (key, section, self.__file_path)
)
| StarcoderdataPython |
1611384 | #To input username&pass and check if it's correct. Q22
#Made by INS, Using dictionary for faster credential check.
d={}
while True:
x=raw_input("Enter a user name : ")
y=raw_input("Enter a password : ")
d[x]=y
cont=raw_input("Do you want to add more usernames? [y/n] ")
if cont=='y':
continue
else:
break
#begining login system
a=raw_input("Enter username to log in : ")
if d.has_key(a)==True:
print "User found. Enter password to continue. "
b=raw_input("Enter password to log in : ")
if d.get(a)==b:
print "Welcome back ",a
else:
print "Wrong password. Aborting program."
#Made by <NAME>, using LISTS
""" #Remove this quotation marks to use GM's program.
u=[]
p=[]
count=input("Enter number of usernames you want to save : ")
for i in range(0, count):
username=raw_input("Enter username to save : ")
password=raw_input("Enter password to save : ")
position_user=len(u)+1
position_pass=len(p)+1
u.append(username)
p.append(password)
#Search function begins
def passcheck():
user_=raw_input("Enter username to log in : ")
pass_=raw_input("Ente password to log in : ")
if user_ in u:
if pass_ in p:
if position_user==position_pass:
print "Verified user. "
else:
print "Password doesn't match. "
else:
print "Username doesn't exist"
passcheck()"""
| StarcoderdataPython |
1737889 | import json
import logging
import pathlib
import subprocess
import sys
from manubot.cite.citekey import (
citekey_to_csl_item,
standardize_citekey,
is_valid_citekey,
)
from manubot.pandoc.util import get_pandoc_info
from manubot.util import shlex_join
# For manubot cite, infer --format from --output filename extensions
extension_to_format = {
".txt": "plain",
".md": "markdown",
".docx": "docx",
".html": "html",
".xml": "jats",
}
def call_pandoc(metadata, path, format="plain"):
"""
path is the path to write to.
"""
_exit_without_pandoc()
info = get_pandoc_info()
_check_pandoc_version(info, metadata, format)
metadata_block = "---\n{yaml}\n...\n".format(
yaml=json.dumps(metadata, ensure_ascii=False, indent=2)
)
args = [
"pandoc",
"--filter",
"pandoc-citeproc",
"--output",
str(path) if path else "-",
]
if format == "markdown":
args.extend(["--to", "markdown_strict", "--wrap", "none"])
elif format == "jats":
args.extend(["--to", "jats", "--standalone"])
elif format == "docx":
args.extend(["--to", "docx"])
elif format == "html":
args.extend(["--to", "html"])
elif format == "plain":
args.extend(["--to", "plain", "--wrap", "none"])
if info["pandoc version"] >= (2,):
# Do not use ALL_CAPS for bold & underscores for italics
# https://github.com/jgm/pandoc/issues/4834#issuecomment-412972008
filter_path = (
pathlib.Path(__file__)
.joinpath("..", "plain-pandoc-filter.lua")
.resolve()
)
assert filter_path.exists()
args.extend(["--lua-filter", str(filter_path)])
logging.info("call_pandoc subprocess args:\n" + shlex_join(args))
process = subprocess.run(
args=args,
input=metadata_block.encode(),
stdout=subprocess.PIPE if path else sys.stdout,
stderr=sys.stderr,
)
process.check_returncode()
def cli_cite(args):
"""
Main function for the manubot cite command-line interface.
Does not allow user to directly specify Pandoc's --to argument, due to
inconsistent citaiton rendering by output format. See
https://github.com/jgm/pandoc/issues/4834
"""
# generate CSL JSON data
csl_list = list()
for citekey in args.citekeys:
try:
if not is_valid_citekey(citekey):
continue
citekey = standardize_citekey(citekey)
csl_item = citekey_to_csl_item(citekey, prune=args.prune_csl)
csl_list.append(csl_item)
except Exception as error:
logging.error(
f"citekey_to_csl_item for {citekey!r} failed "
f"due to a {error.__class__.__name__}:\n{error}"
)
logging.info(error, exc_info=True)
# output CSL JSON data, if --render is False
if not args.render:
write_file = (
args.output.open("w", encoding="utf-8") if args.output else sys.stdout
)
with write_file:
json.dump(csl_list, write_file, ensure_ascii=False, indent=2)
write_file.write("\n")
return
# use Pandoc to render references
if not args.format and args.output:
vars(args)["format"] = extension_to_format.get(args.output.suffix)
if not args.format:
vars(args)["format"] = "plain"
pandoc_metadata = {"nocite": "@*", "csl": args.csl, "references": csl_list}
call_pandoc(metadata=pandoc_metadata, path=args.output, format=args.format)
def _exit_without_pandoc():
"""
Given info from get_pandoc_info, exit Python if Pandoc is not available.
"""
info = get_pandoc_info()
for command in "pandoc", "pandoc-citeproc":
if not info[command]:
logging.critical(
f'"{command}" not found on system. ' f"Check that Pandoc is installed."
)
raise SystemExit(1)
def _check_pandoc_version(info, metadata, format):
"""
Given info from get_pandoc_info, check that Pandoc's version is sufficient
to perform the citation rendering command specified by metadata and format.
Please add additional minimum version information to this function, as its
discovered.
"""
issues = list()
if format == "jats" and info["pandoc version"] < (2,):
issues.append("--jats requires pandoc >= v2.0.")
# --csl=URL did not work in https://travis-ci.org/greenelab/manubot/builds/417314743#L796,
# but exact version where this fails unknown
# if metadata.get('csl', '').startswith('http') and pandoc_version < (2,):
# issues.append('--csl=URL requires pandoc >= v2.0.')
issues = "\n".join(issues)
if issues:
logging.critical(f"issues with pandoc version detected:\n{issues}")
| StarcoderdataPython |
1660155 | combo_list = []
one_list = [4,5]
combo_list.extend(one_list)
combo_list
#Output
"""
[4,5]
"""
| StarcoderdataPython |
190391 | <filename>scripts/strelka-2.9.2.centos6_x86_64/lib/python/makeRunScript.py
#
# Strelka - Small Variant Caller
# Copyright (c) 2009-2018 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
"""
This provides a function to auto-generate a workflow run script.
"""
import os, sys
from configureUtil import pickleConfigSections
def makeRunScript(scriptFile, workflowModulePath, workflowClassName, primaryConfigSection, configSections, pythonBin=None) :
"""
This function generates the python workflow runscript
The auto-generated python script presents the user with options to
run and/or continue their workflow, and reads all workflow
configuration info from an ini file.
scriptFile -- file name of the runscript to create
workflowModulePath -- the python module containing the workflow class
workflowClassName -- the workflow class name
primaryConfigSection -- the section used to create the primary workflow parameter object
configSections -- a hash or hashes representing all configuration info
@param pythonBin: optionally specify a custom python interpreter for the script she-bang
"""
assert os.path.isdir(os.path.dirname(scriptFile))
assert os.path.isfile(workflowModulePath)
workflowModulePath=os.path.abspath(workflowModulePath)
workflowModuleDir=os.path.dirname(workflowModulePath)
workflowModuleName=os.path.basename(workflowModulePath)
pyExt=".py"
if workflowModuleName.endswith(pyExt) :
workflowModuleName=workflowModuleName[:-len(pyExt)]
# dump inisections to a file
pickleConfigFile=scriptFile+".config.pickle"
pickleConfigSections(pickleConfigFile,configSections)
sfp=open(scriptFile,"w")
if pythonBin is None :
pythonBin="/usr/bin/env python2"
sfp.write(runScript1 % (pythonBin, " ".join(sys.argv),workflowModuleDir,workflowModuleName,workflowClassName))
sfp.write('\n')
sfp.write(runScript2)
sfp.write('\n')
sfp.write(runScript3)
sfp.write('\n')
sfp.write('main(r"%s","%s",%s)\n' % (pickleConfigFile, primaryConfigSection, workflowClassName))
sfp.write('\n')
sfp.close()
os.chmod(scriptFile,0755)
runScript1="""#!%s
# Workflow run script auto-generated by command: '%s'
#
import os, sys
if sys.version_info >= (3,0):
import platform
raise Exception("Strelka does not currently support python3 (version %%s detected)" %% (platform.python_version()))
if sys.version_info < (2,6):
import platform
raise Exception("Strelka requires python2 version 2.6+ (version %%s detected)" %% (platform.python_version()))
scriptDir=os.path.abspath(os.path.dirname(__file__))
sys.path.append(r'%s')
from %s import %s
"""
runScript2="""
def get_run_options(workflowClassName) :
from optparse import OptionGroup, SUPPRESS_HELP
from configBuildTimeInfo import workflowVersion
from configureUtil import EpilogOptionParser
from estimateHardware import EstException, getNodeHyperthreadCoreCount, getNodeMemMb
sgeDefaultCores=workflowClassName.runModeDefaultCores('sge')
epilog=\"\"\"Note this script can be re-run to continue the workflow run in case of interruption.
Also note that dryRun option has limited utility when task definition depends on upstream task
results -- in this case the dry run will not cover the full 'live' run task set.\"\"\"
parser = EpilogOptionParser(description="Version: %s" % (workflowVersion), epilog=epilog, version=workflowVersion)
parser.add_option("-m", "--mode", type="string",dest="mode",
help="select run mode (local|sge)")
parser.add_option("-q", "--queue", type="string",dest="queue",
help="specify scheduler queue name")
parser.add_option("-j", "--jobs", type="string",dest="jobs",
help="number of jobs, must be an integer or 'unlimited' (default: Estimate total cores on this node for local mode, %s for sge mode)" % (sgeDefaultCores))
parser.add_option("-g","--memGb", type="string",dest="memGb",
help="gigabytes of memory available to run workflow -- only meaningful in local mode, must be an integer (default: Estimate the total memory for this node for local mode, 'unlimited' for sge mode)")
parser.add_option("-d","--dryRun", dest="isDryRun",action="store_true",default=False,
help="dryRun workflow code without actually running command-tasks")
parser.add_option("--quiet", dest="isQuiet",action="store_true",default=False,
help="Don't write any log output to stderr (but still write to workspace/pyflow.data/logs/pyflow_log.txt)")
def isLocalSmtp() :
import smtplib
try :
smtplib.SMTP('localhost')
except :
return False
return True
isEmail = isLocalSmtp()
emailHelp = SUPPRESS_HELP
if isEmail :
emailHelp="send email notification of job completion status to this address (may be provided multiple times for more than one email address)"
parser.add_option("-e","--mailTo", type="string",dest="mailTo",action="append",help=emailHelp)
debug_group = OptionGroup(parser,"development debug options")
debug_group.add_option("--rescore", dest="isRescore",action="store_true",default=False,
help="Reset task list to re-run hypothesis generation and scoring without resetting graph generation.")
parser.add_option_group(debug_group)
ext_group = OptionGroup(parser,"extended portability options (should not be needed by most users)")
ext_group.add_option("--maxTaskRuntime", type="string", metavar="hh:mm:ss",
help="Specify scheduler max runtime per task, argument is provided to the 'h_rt' resource limit if using SGE (no default)")
parser.add_option_group(ext_group)
(options,args) = parser.parse_args()
if not isEmail : options.mailTo = None
if len(args) :
parser.print_help()
sys.exit(2)
if options.mode is None :
parser.print_help()
sys.exit(2)
elif options.mode not in ["local","sge"] :
parser.error("Invalid mode. Available modes are: local, sge")
if options.jobs is None :
if options.mode == "sge" :
options.jobs = sgeDefaultCores
else :
try :
options.jobs = getNodeHyperthreadCoreCount()
except EstException:
parser.error("Failed to estimate cores on this node. Please provide job count argument (-j).")
if options.jobs != "unlimited" :
options.jobs=int(options.jobs)
if options.jobs <= 0 :
parser.error("Jobs must be 'unlimited' or an integer greater than 1")
# note that the user sees gigs, but we set megs
if options.memGb is None :
if options.mode == "sge" :
options.memMb = "unlimited"
else :
try :
options.memMb = getNodeMemMb()
except EstException:
parser.error("Failed to estimate available memory on this node. Please provide available gigabyte argument (-g).")
elif options.memGb != "unlimited" :
options.memGb=int(options.memGb)
if options.memGb <= 0 :
parser.error("memGb must be 'unlimited' or an integer greater than 1")
options.memMb = 1024*options.memGb
else :
options.memMb = options.memGb
options.schedulerArgList=[]
if options.queue is not None :
options.schedulerArgList.extend(["-q",options.queue])
if options.maxTaskRuntime is not None :
options.schedulerArgList.extend(["-l","h_rt="+options.maxTaskRuntime])
options.resetTasks=[]
if options.isRescore :
options.resetTasks.append("makeHyGenDir")
return options
"""
runScript3="""
def main(pickleConfigFile, primaryConfigSection, workflowClassName) :
from configureUtil import getConfigWithPrimaryOptions
runOptions=get_run_options(workflowClassName)
flowOptions,configSections=getConfigWithPrimaryOptions(pickleConfigFile,primaryConfigSection)
# new logs and marker files to assist automated workflow monitoring:
warningpath=os.path.join(flowOptions.runDir,"workflow.warning.log.txt")
errorpath=os.path.join(flowOptions.runDir,"workflow.error.log.txt")
exitpath=os.path.join(flowOptions.runDir,"workflow.exitcode.txt")
# the exit path should only exist once the workflow completes:
if os.path.exists(exitpath) :
if not os.path.isfile(exitpath) :
raise Exception("Unexpected filesystem item: '%s'" % (exitpath))
os.unlink(exitpath)
wflow = workflowClassName(flowOptions)
retval=1
try:
retval=wflow.run(mode=runOptions.mode,
nCores=runOptions.jobs,
memMb=runOptions.memMb,
dataDirRoot=flowOptions.workDir,
mailTo=runOptions.mailTo,
isContinue="Auto",
isForceContinue=True,
isDryRun=runOptions.isDryRun,
isQuiet=runOptions.isQuiet,
schedulerArgList=runOptions.schedulerArgList,
resetTasks=runOptions.resetTasks,
successMsg=wflow.getSuccessMessage(),
retryWindow=0,
retryMode='all',
warningLogFile=warningpath,
errorLogFile=errorpath)
finally:
exitfp=open(exitpath,"w")
exitfp.write("%i\\n" % (retval))
exitfp.close()
sys.exit(retval)
"""
| StarcoderdataPython |
1752118 | <reponame>lorddaedra/django-magiclink<gh_stars>0
from __future__ import annotations
from django.urls import path
from .views import LoginSentView, LoginVerifyView, LoginView, LogoutView, SignupView
app_name = "magiclinks"
urlpatterns = [
path('login/', LoginView.as_view(), name='login'),
path('login/sent/', LoginSentView.as_view(), name='login_sent'),
path('signup/', SignupView.as_view(), name='signup'),
path('login/verify/', LoginVerifyView.as_view(), name='login_verify'),
path('logout/', LogoutView.as_view(), name='logout'),
]
| StarcoderdataPython |
3245944 | import os
from pathlib import Path
import pytest
from s3fetch import __version__
from s3fetch.command import S3Fetch
from s3fetch.exceptions import DirectoryDoesNotExistError, NoObjectsFoundError
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
@pytest.fixture
def s3fetch(aws_credentials):
bucket = "my-test-bucket"
prefix = "my/test/objects/"
regex = "_mytestobject_"
s3_path = f"{bucket}/{prefix}"
s3fetch = S3Fetch(s3_uri=s3_path, regex=regex, debug=False)
s3fetch._objects = [
"one_mytestobject_one",
"two_mytestobject_two",
"three_mytestobject_three",
"four*mytestobject*four",
"five)mytestobject_five",
"six!mytestdirectoryobject!six/",
]
return s3fetch
def test_s3fetch_obj(s3fetch):
assert s3fetch._bucket == "my-test-bucket"
assert s3fetch._prefix == "my/test/objects/"
assert s3fetch._regex == "_mytestobject_"
assert s3fetch._objects == [
"one_mytestobject_one",
"two_mytestobject_two",
"three_mytestobject_three",
"four*mytestobject*four",
"five)mytestobject_five",
"six!mytestdirectoryobject!six/",
]
def test_filter_object(s3fetch):
expected_objects = [
"one_mytestobject_one",
"two_mytestobject_two",
"three_mytestobject_three",
]
s3fetch._regex = r"^\w+_\w+_\w+$"
tmp_list = []
for key in filter(s3fetch._filter_object, expected_objects):
tmp_list.append(key)
assert tmp_list == expected_objects
def test_filter_object_no_regex(s3fetch):
expected_objects = [
"one_mytestobject_one",
"two_mytestobject_two",
"three_mytestobject_three",
"four*mytestobject*four",
"five)mytestobject_five",
"six!mytestdirectoryobject!six/",
]
s3fetch._regex = None
tmp_list = []
for key in filter(s3fetch._filter_object, (obj for obj in expected_objects)):
tmp_list.append(key)
assert tmp_list == expected_objects[0:-1]
# TODO: Fixup once moto tests are working.
# NoObjectsFoundError now raised by_retrieve_list_of_objects
# def test_filter_object_no_matching_objects(s3fetch):
# s3fetch._regex = r"^sdfasdfasdfsa$"
# with pytest.raises(NoObjectsFoundError):
# s3fetch._filter_object()
# TODO: Fixup once moto tests are working.
# NoObjectsFoundError now raised by_retrieve_list_of_objects
# def test_filter_object_empty_object_list(s3fetch):
# s3fetch._objects = []
# s3fetch._regex = r"^\w+_\w+_\w+$"
# with pytest.raises(NoObjectsFoundError):
# s3fetch._filter_object()
def test_check_for_failed_downloads(s3fetch, capfd):
s3fetch._failed_downloads = [
(
"my/test/objects/one_mytestobject_one",
"reason",
)
]
s3fetch._check_for_failed_downloads()
out, _ = capfd.readouterr()
assert "objects failed to download" in out
s3fetch._debug = True
s3fetch._check_for_failed_downloads()
out, _ = capfd.readouterr()
assert f"my/test/objects/one_mytestobject_one: reason" in out
def test_check_for_failed_downloads_no_failures(s3fetch, capfd):
s3fetch._failed_downloads = []
s3fetch._check_for_failed_downloads()
out, _ = capfd.readouterr()
assert "objects failed to download" not in out
def test_dry_run_detected(s3fetch, capfd):
s3_path = "s3://my-test-bucket/my/test/objects/"
S3Fetch(s3_uri=s3_path, dry_run=True, debug=True)
out, _ = capfd.readouterr()
assert "Operating in dry run mode. Will not download objects." in out
def test_determine_download_dir_none_dir_specified(s3fetch, mocker):
os_mock = mocker.patch("os.getcwd")
expected_directory = Path("/home/test")
os_mock.return_value = expected_directory
assert s3fetch._determine_download_dir(None) == expected_directory
def test_determine_download_dir_dir_specified_and_exists(s3fetch, mocker):
is_dir_mock = mocker.patch("pathlib.Path.is_dir")
is_dir_mock.return_value = True
expected_directory = Path("/home/test/Downloads")
assert s3fetch._determine_download_dir("/home/test/Downloads") == expected_directory
def test_determine_download_dir_dir_specified_and_raises(s3fetch, mocker):
is_dir_mock = mocker.patch("pathlib.Path.is_dir")
is_dir_mock.return_value = False
expected_directory = "/home/test/Downloads"
with pytest.raises(DirectoryDoesNotExistError):
s3fetch._determine_download_dir(expected_directory)
def test_remove_directories(s3fetch):
expected_objects = [
"five)mytestobject_five",
"six!mytestdirectoryobject!six/",
]
s3fetch._regex = None
tmp_list = []
for key in filter(s3fetch._filter_object, (obj for obj in expected_objects)):
tmp_list.append(key)
assert tmp_list == ["five)mytestobject_five"]
def test_parse_and_split_s3_uri_full_path(s3fetch):
bucket, prefix = s3fetch._parse_and_split_s3_uri(
s3_uri="s3://testbucket/files", delimiter="/"
)
assert bucket == "testbucket"
assert prefix == "files"
bucket, prefix = s3fetch._parse_and_split_s3_uri(
s3_uri="s3://testbucket/files/", delimiter="/"
)
assert bucket == "testbucket"
assert prefix == "files/"
def test_parse_and_split_s3_uri_no_prefix(s3fetch):
bucket, prefix = s3fetch._parse_and_split_s3_uri(
s3_uri="s3://testbucket", delimiter="/"
)
assert bucket == "testbucket"
assert prefix == ""
bucket, prefix = s3fetch._parse_and_split_s3_uri(
s3_uri="s3://testbucket/", delimiter="/"
)
assert bucket == "testbucket"
assert prefix == ""
def test_rollup_prefix(s3fetch):
# (prefix, object_key, expected directory, expected filename)
prefix_and_keys = [
("", "object1", None, "object1"),
("storage", "storage/object1", "storage", "object1"),
("sto", "storage/object1", "storage", "object1"),
("storage/obj", "storage/object1", None, "object1"),
("test/an", "test/another_folder/console", "another_folder", "console"),
("", "test/another_folder/console", "test/another_folder", "console"),
]
for prefix, key, directory, filename in prefix_and_keys:
s3fetch._prefix = prefix
tmp_directory, tmp_filename = s3fetch._rollup_prefix(key)
assert (directory, filename) == (tmp_directory, tmp_filename) | StarcoderdataPython |
109377 | <filename>evennia/contrib/tutorials/batchprocessor/__init__.py
"""
Batch processing examples - Griatch 2012
"""
| StarcoderdataPython |
3385438 | #
# This file is part of PyFOPPL, an implementation of a First Order Probabilistic Programming Language in Python.
#
# License: MIT (see LICENSE.txt)
#
# 21. Feb 2018, <NAME>
# 20. Mar 2018, <NAME>
#
from ..fe_clojure import ppl_clojure_forms as clj
from ..ppl_ast import *
from .ppl_clojure_lexer import ClojureLexer
from .ppl_clojure_parser import ClojureParser
#######################################################################################################################
class FopplParser(ClojureParser):
def visit_loop(self, count, initial_data, function, *args):
if not clj.is_integer(count):
raise SyntaxError("loop requires an integer value as first argument")
count = count.value
initial_data = initial_data.visit(self)
function = function.visit(self)
args = [arg.visit(self) for arg in args]
result = initial_data
i = 0
while i < count:
result = AstCall(function, [AstValue(i), result] + args)
i += 1
return result
#######################################################################################################################
def parse(source):
clj_ast = list(ClojureLexer(source))
ppl_ast = FopplParser().visit(clj_ast)
return ppl_ast
| StarcoderdataPython |
3215420 | <gh_stars>10-100
# Copyright (c) 2018 Ansible by Red Hat
# All Rights Reserved.
class _AwxTaskError():
def build_exception(self, task, message=None):
if message is None:
message = "Execution error running {}".format(task.log_format)
e = Exception(message)
e.task = task
e.is_awx_task_error = True
return e
def TaskCancel(self, task, rc):
"""Canceled flag caused run_pexpect to kill the job run"""
message="{} was canceled (rc={})".format(task.log_format, rc)
e = self.build_exception(task, message)
e.rc = rc
e.awx_task_error_type = "TaskCancel"
return e
def TaskError(self, task, rc):
"""Userspace error (non-zero exit code) in run_pexpect subprocess"""
message = "{} encountered an error (rc={}), please see task stdout for details.".format(task.log_format, rc)
e = self.build_exception(task, message)
e.rc = rc
e.awx_task_error_type = "TaskError"
return e
AwxTaskError = _AwxTaskError()
| StarcoderdataPython |
1783397 | from oauth2client.client import OAuth2WebServerFlow
import httplib2
import json
class Oauth2Client(object):
"""client for interacting with google oauth 2,
as google openid connect is supported under oauth2"""
def __init__(self, settings, logger, HTTP_PROXY=None):
self.logger = logger
self.settings = settings
self.flow = OAuth2WebServerFlow(
client_id=settings["client_id"],
client_secret=settings["client_secret"],
scope="openid email",
redirect_uri=settings["redirect_url"],
)
self.auth_url = self.get_auth_url()
self.HTTP_PROXY = HTTP_PROXY
def get_auth_url(self):
return self.flow.step1_get_authorize_url()
def get_user_id(self, code):
try:
if self.HTTP_PROXY:
proxy = httplib2.ProxyInfo(
proxy_type=httplib2.socks.PROXY_TYPE_HTTP,
proxy_host=self.HTTP_PROXY["host"],
proxy_port=self.HTTP_PROXY["port"],
proxy_rdns=True,
)
http = httplib2.Http(proxy_info=proxy)
else:
http = httplib2.Http()
creds = self.flow.step2_exchange(code, http=http)
http = creds.authorize(http)
r = http.request(
"https://www.googleapis.com/plus/v1/people/me/openIdConnect"
)
if len(r) > 1:
user_profile = json.loads(r[1])
if user_profile.get("email_verified") == "true":
return {"email": user_profile["email"]}
else:
return {
"error": (
"Your email is not verified: {}".format(
user_profile.get("error", "")
)
)
}
else:
return {"error": "Can't get user's email"}
except Exception as e:
self.logger.exception("Can't get user info")
return {"error": "Can't get your email: {}".format(e)}
| StarcoderdataPython |
3218153 | <reponame>ZwCreatePhoton/htmlmth
import htmlmth.mods.http
from . import TransformFunction, http_payload_to_tfarg_function
def _generate_encode_chunked_equisize(*args, **kwargs):
chunksize = kwargs.get("chunksize", 256)
assert(chunksize > 0)
return TransformFunction("",
"chunked encoding (equisize)",
http_payload_to_tfarg_function(lambda x: htmlmth.mods.http.encode_chunked_equisize(x, chunksize=chunksize))
)
encode_chunked_equisize = _generate_encode_chunked_equisize()
encode_chunked_equisize.parameterize = _generate_encode_chunked_equisize
def _generate_encode_chunked_equisize_leadingzeros(*args, **kwargs):
chunksize = kwargs.get("chunksize", 256)
leadingzeros = kwargs.get("leadingzeros", 10)
assert(chunksize > 0)
assert(leadingzeros > 1)
return TransformFunction("",
"chunked encoding (equisize, chunk sizes with leading zeros)",
http_payload_to_tfarg_function(lambda x: htmlmth.mods.http.encode_chunked_equisize(x, chunksize=chunksize, leadingzeros=leadingzeros))
)
encode_chunked_equisize_leadingzeros = _generate_encode_chunked_equisize_leadingzeros()
encode_chunked_equisize_leadingzeros.parameterize = _generate_encode_chunked_equisize_leadingzeros
def _generate_encode_chunked_varysize(*args, **kwargs):
min_chunksize = kwargs.get("min_chunksize", 128)
max_chunksize = kwargs.get("max_chunksize", 256)
assert(min_chunksize > 0)
assert(max_chunksize > 0)
assert(min_chunksize < max_chunksize)
return TransformFunction("",
"chunked encoding (various sizes)",
http_payload_to_tfarg_function(lambda x: htmlmth.mods.http.encode_chunked_varysize(x, min_chunksize=min_chunksize, max_chunksize=max_chunksize))
)
encode_chunked_varysize = _generate_encode_chunked_varysize()
encode_chunked_varysize.parameterize = _generate_encode_chunked_varysize
def _generate_encode_chunked_equisize_leadingzeros(*args, **kwargs):
min_chunksize = kwargs.get("min_chunksize", 128)
max_chunksize = kwargs.get("max_chunksize", 256)
leadingzeros = kwargs.get("leadingzeros", 10)
assert (min_chunksize > 0)
assert (max_chunksize > 0)
assert (min_chunksize < max_chunksize)
assert(leadingzeros > 1)
return TransformFunction("",
"chunked encoding (various sizes, chunk sizes with leading zeros)",
http_payload_to_tfarg_function(
lambda x: htmlmth.mods.http.encode_chunked_varysize(x, min_chunksize=min_chunksize,
max_chunksize=max_chunksize, leadingzeros=leadingzeros))
)
encode_chunked_varysize_leadingzeros = _generate_encode_chunked_equisize_leadingzeros()
encode_chunked_varysize_leadingzeros.parameterize = _generate_encode_chunked_equisize_leadingzeros
| StarcoderdataPython |
77953 | <reponame>Furzoom/learnpython<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# type(a) == type(b) - whether value of type(a) is equivalent to value of type(b)
# type(a) is type(b) - whether type(a) and type(b) have same type
| StarcoderdataPython |
3252066 | <reponame>nexus-lab/relf-server
#!/usr/bin/env python
"""Benchmark tests for the Cloud Bigtable data store."""
from grr.lib import flags
from grr.server import data_store_test
from grr.server.data_stores import cloud_bigtable_data_store_test
from grr.test_lib import test_lib
class CloudBigtableDataStoreBenchmarks(
cloud_bigtable_data_store_test.CloudBigTableDataStoreMixin,
data_store_test.DataStoreBenchmarks):
"""Performance test cloud bigtable."""
def main(argv):
del argv # Unused.
test_lib.main()
if __name__ == "__main__":
flags.StartMain(main)
| StarcoderdataPython |
3272253 | <filename>Netflix/em.py
"""Mixture model for matrix completion"""
from typing import Tuple
import numpy as np
from scipy.special import logsumexp
from common import GaussianMixture
def log_gaussian(x: np.ndarray, mean: np.ndarray, var: float) -> float:
"""Computes the log probablity of vector x under a normal distribution
Args:
x: (d, ) array holding the vector's coordinates
mean: (d, ) mean of the gaussian
var: variance of the gaussian
Returns:
float: the log probability
"""
d = len(x)
log_prob = -d / 2.0 * np.log(2 * np.pi * var)
log_prob -= 0.5 * ((x - mean) ** 2).sum() / var
return log_prob
def estep(X: np.ndarray, mixture: GaussianMixture) -> Tuple[np.ndarray, float]:
"""E-step: Softly assigns each datapoint to a gaussian component
Args:
X: (n, d) array holding the data, with incomplete entries (set to 0)
mixture: the current gaussian mixture
Returns:
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the assignment
"""
n, _ = X.shape
K, _ = mixture.mu.shape
post = np.zeros((n, K))
ll = 0
for i in range(n):
mask = (X[i, :] != 0)
for j in range(K):
log_likelihood = log_gaussian(X[i, mask], mixture.mu[j, mask],
mixture.var[j])
post[i, j] = np.log(mixture.p[j] + 1e-16) + log_likelihood
total = logsumexp(post[i, :])
post[i, :] = post[i, :] - total
ll += total
return np.exp(post), ll
def mstep(X: np.ndarray, post: np.ndarray, mixture: GaussianMixture,
min_variance: float = .25) -> GaussianMixture:
"""M-step: Updates the gaussian mixture by maximizing the log-likelihood
of the weighted dataset
Args:
X: (n, d) array holding the data, with incomplete entries (set to 0)
post: (n, K) array holding the soft counts
for all components for all examples
mixture: the current gaussian mixture
min_variance: the minimum variance for each gaussian
Returns:
GaussianMixture: the new gaussian mixture
"""
n, d = X.shape
_, K = post.shape
n_hat = post.sum(axis=0)
p = n_hat / n
mu = mixture.mu.copy()
var = np.zeros(K)
for j in range(K):
sse, weight = 0, 0
for l in range(d):
mask = (X[:, l] != 0)
n_sum = post[mask, j].sum()
if (n_sum >= 1):
# Updating mean
mu[j, l] = (X[mask, l] @ post[mask, j]) / n_sum
# Computing variance
sse += ((mu[j, l] - X[mask, l]) ** 2) @ post[mask, j]
weight += n_sum
var[j] = sse / weight
if var[j] < min_variance:
var[j] = min_variance
return GaussianMixture(mu, var, p)
def run(X: np.ndarray, mixture: GaussianMixture,
post: np.ndarray) -> Tuple[GaussianMixture, np.ndarray, float]:
"""Runs the mixture model
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the current assignment
"""
prev_ll = None
ll = None
while (prev_ll is None or ll - prev_ll > 1e-6 * np.abs(ll)):
prev_ll = ll
post, ll = estep(X, mixture)
mixture = mstep(X, post, mixture)
return mixture, post, ll
def fill_matrix(X: np.ndarray, mixture: GaussianMixture) -> np.ndarray:
"""Fills an incomplete matrix according to a mixture model
Args:
X: (n, d) array of incomplete data (incomplete entries =0)
mixture: a mixture of gaussians
Returns
np.ndarray: a (n, d) array with completed data
"""
n, d = X.shape
X_pred = X.copy()
K, _ = mixture.mu.shape
for i in range(n):
mask = X[i, :] != 0
mask0 = X[i, :] == 0
post = np.zeros(K)
for j in range(K):
log_likelihood = log_gaussian(X[i, mask], mixture.mu[j, mask],
mixture.var[j])
post[j] = np.log(mixture.p[j]) + log_likelihood
post = np.exp(post - logsumexp(post))
X_pred[i, mask0] = np.dot(post, mixture.mu[:, mask0])
return X_pred
| StarcoderdataPython |
8185 | <gh_stars>0
def calc_fitness(pop):
from to_decimal import to_decimal
from math import sin, sqrt
for index, elem in enumerate(pop):
# só atribui a fitness a cromossomos que ainda não possuem fitness
# print(elem[0], elem[1])
x = to_decimal(elem[0])
y = to_decimal(elem[1])
# x = elem[0]
# y = elem[1]
f6 = 0.5 - ((sin(sqrt(x**2 + y**2)))**2 - 0.5) / (1 + 0.001 * (x**2 + y**2))**2
pop[index] = [f6, elem]
return 0
# populacao = [[0,0],[-3,1]]
# calc_fitness(pop=populacao)
# print(populacao)
| StarcoderdataPython |
3271634 | <reponame>WillemRvX/ethelease
#!/usr/bin/env python
import os
from time import sleep
from ethelease.commons.utils import ENV, LOGGER
from ethelease.cronos.utils import eval_cron
from ethelease.k8s.ops import pod_launch_n_mgmt, K8sPodConf
from ethelease.workflow.commons.utils import Valuables, scheds, scheduler
FAMILY = os.environ.get('_FAMILY_')
def sleeper() -> dict:
groups, _index = list(zip(*[iter(range(1, 101))]*10)), dict()
for j, group in enumerate(groups):
for x in group:
_index[x] = j
return _index
def workflow(pipeline_name: str, vals: Valuables) -> None:
kind, sched = vals.kind, vals.schedule
mssg, devil = f'`{pipeline_name}` launching in `{ENV}`!', {'develop', 'fix'}
conf_body = (
K8sPodConf()
.env(ENV)
.family_name(vals.name)
.container_registry(vals.registry_location)
.metadata(name=pipeline_name, proj_or_acct_id= vals.proj_or_acct_id, namespace='default')
.which_nodepoolorgroup(name=vals.node_poolorgroup, cloud=vals.which_cloud)
.pipeline_or_runner_script(vals.script)
.script_args(vals.args)
.pick_secret(name=vals.which_secret)
.cpu_usage(
req=vals.req_cpu,
lim=vals.lim_cpu
)
.mem_usage(
req=vals.req_mem,
lim=vals.lim_mem
)
.restart_policy(vals.restart_policy)
.assemble()
)
if ENV == 'pr' and sched != 'immediately_once':
while True:
if eval_cron(pipeline_name=pipeline_name, expr=sched):
LOGGER.info(mssg)
pod_launch_n_mgmt(conf_body)
if ENV == 'pr' and sched == 'immediately_once':
LOGGER.info(mssg)
pod_launch_n_mgmt(conf_body)
while True:
sleep(60)
if ENV == 'dv' and sched in devil:
LOGGER.info(mssg)
pod_launch_n_mgmt(conf_body)
while True:
sleep(60)
if ENV == 'dv' and sched != devil:
while True:
LOGGER.info('Nothing here...')
sleep(300)
if __name__ == '__main__':
scheduler(
scheds(
'.',
family=os.environ.get('_FAMILY_')
),
workflow=workflow,
is_local=False
)
| StarcoderdataPython |
1705281 | <gh_stars>1-10
import unittest
import os
import sys
lib_path = os.path.abspath('../')
sys.path.append(lib_path)
try:
from Comodo.TV import * # @UnusedWildImport
except ImportError, erro:
from trunk.Comodo.TV import * # @UnusedWildImport
class TesteTV(unittest.TestCase):
def setUp(self):
self.tv1 = TV()
def tearDown(self):
self.tv1 = None
def testPower(self):
self.assertFalse(self.tv1.getState(), "O estado deve ser False")
self.tv1.turnOn()
self.assertTrue(self.tv1.getState(), "O estado deve ser True")
self.tv1.turnOff()
self.assertFalse(self.tv1.getState(), "O estado deve ser False")
def testVolume(self):
self.assertEqual(self.tv1.getVolume(), 0, "O volume deve iniciar em 0")
self.tv1.upVolume()
self.assertEqual(self.tv1.getVolume(), 1, "O volume deve ser 1")
for _ in range(10):
self.tv1.upVolume()
self.assertEqual(self.tv1.getVolume(), 11, "O volume deve ser 11")
# down volume
self.tv1.downVolume()
self.assertEqual(self.tv1.getVolume(), 10, "O volume deve ser 10")
for _ in range(5):
self.tv1.downVolume()
self.assertEqual(self.tv1.getVolume(), 5, "O volume deve ser 5")
def testCanal(self):
self.assertEqual(1, self.tv1.getChannel(), "O canal inicial deve ser 1")
self.tv1.upChannel()
self.assertEqual(2, self.tv1.getChannel(), "O canal deve ser 2")
for _ in range(3):
self.tv1.upChannel()
self.assertEqual(2, self.tv1.getChannel(), "O canal deve ser 2")
# down canal
self.tv1.downChannel()
self.assertEqual(1, self.tv1.getChannel(), "O canal deve ser 1")
for _ in range(3):
self.tv1.downChannel()
self.assertEqual(1, self.tv1.getChannel(), "O canal deve ser 1")
| StarcoderdataPython |
1684744 | # SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
from pytest_embedded import Dut
def deepsleep_test(dut: Dut, case_name: str) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write(case_name)
reset_reason = 'DEEPSLEEP_RESET' if dut.target == 'esp32' else 'DSLEEP'
if dut.target == 'esp32c3':
# Known issue: IDF-5003
dut.expect(r'rst:.*\(%s\)' % reset_reason, timeout=40)
else:
dut.expect(r'rst:.*\(%s\)' % reset_reason, timeout=10)
@pytest.mark.supported_targets
@pytest.mark.generic
def test_rtc_8md256_deepsleep(dut: Dut) -> None:
deepsleep_test(dut, '"Can use 8MD256 as RTC clock source in deepsleep"')
# Only targets with SOC_PM_SUPPORT_RTC_PERIPH_PD defined
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.esp32s3
@pytest.mark.generic
def test_rtc_8md256_deepsleep_force_rtcperiph(dut: Dut) -> None:
deepsleep_test(dut, '"Can use 8MD256 as RTC clock source in deepsleep (force rtc_periph)"')
def lightsleep_test(dut: Dut, case_name: str) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write(case_name)
if dut.target == 'esp32c3':
# Known issue: IDF-5003
dut.expect(r'Returned from light sleep, reason: timer', timeout=40)
else:
dut.expect(r'Returned from light sleep, reason: timer', timeout=10)
@pytest.mark.supported_targets
@pytest.mark.generic
def test_rtc_8md256_lightsleep(dut: Dut) -> None:
lightsleep_test(dut, '"Can use 8MD256 as RTC clock source in lightsleep"')
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.esp32s3
@pytest.mark.generic
def test_rtc_8md256_lightsleep_force_rtcperiph(dut: Dut) -> None:
lightsleep_test(dut, '"Can use 8MD256 as RTC clock source in lightsleep (force rtc_periph)"')
| StarcoderdataPython |
3375359 | <gh_stars>0
import logging
import numpy as np
from demotivational_policy_descent.agents.agent_interface import AgentInterface
class Dummy(AgentInterface):
def __init__(self, env, player_id=1):
super().__init__(env=env, player_id=player_id)
self.reset() # Call reset here to avoid code duplication!
def reset(self):
logging.debug("Resetting parameters...")
self.test_attribute = 5
logging.debug("Reset!")
def get_action(self, frame: np.array=None) -> int:
logging.debug("Returning a random action sampled from the frame..")
return np.random.choice([0, 1, 2])
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
dummy = Dummy(env=None, player_id=1)
dummy.test_attribute = 100
name = "dummy_test_model"
rename = dummy.save_model(name)
print("Renamed:", rename)
dummy.test_attribute = 200
dummy.load_model(name)
assert dummy.test_attribute == 100
dummy.reset()
assert dummy.test_attribute == 5
print("Dummy action", dummy.get_action(np.zeros(1)))
| StarcoderdataPython |
1739302 | import io
from twisted.internet import reactor
from ygo.card import Card
from ygo.duel_reader import DuelReader
from ygo.parsers.duel_parser import DuelParser
from ygo.utils import process_duel
def msg_select_option(self, data):
data = io.BytesIO(data[1:])
player = self.read_u8(data)
size = self.read_u8(data)
options = []
for i in range(size):
options.append(self.read_u32(data))
self.cm.call_callbacks("select_option", player, options)
return data.read()
def select_option(self, player, options):
pl = self.players[player]
def r(caller):
idx = int(caller.text)
opt = options[idx]
for p in self.players + self.watchers:
if opt > 10000:
string = card.get_strings(p)[opt & 0xF]
else:
string = p._("Unknown option %d" % opt)
string = p.strings["system"].get(opt, string)
if p is pl:
p.notify(p._("You selected option {0}: {1}").format(idx + 1, string))
else:
p.notify(
p._("{0} selected option {1}: {2}").format(
pl.nickname, idx + 1, string
)
)
self.set_responsei(idx)
reactor.callLater(0, process_duel, self)
card = None
opts = []
for opt in options:
if opt > 10000:
code = opt >> 4
card = Card(code)
string = card.get_strings(pl)[opt & 0xF]
else:
string = pl._("Unknown option %d" % opt)
string = pl.strings["system"].get(opt, string)
opts.append(string)
pl.notify(
pl._("Select option:"),
no_abort=pl._("Invalid option."),
prompt=pl._("Select option:"),
persistent=True,
restore_parser=DuelParser,
)
for idx, opt in enumerate(opts):
pl.notify(str(idx) + ": " + str(opt))
pl.notify(DuelReader, r, no_abort=pl._("Invalid command"), restore_parser=DuelParser)
MESSAGES = {14: msg_select_option}
CALLBACKS = {"select_option": select_option}
| StarcoderdataPython |
127692 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitR04c_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitR04c_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitR04c_CompleteLHS, self).__init__(name='HUnitR04c_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR04c_CompleteLHS')
self["equations"] = []
# Set the node attributes
# match class State(State) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__State"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'State')
# apply class ProcDef(4.2.a.0ProcDef) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__ProcDef"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.0ProcDef')
# apply class LocalDef(4.2.a.1LocalDef) node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return True"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__LocalDef"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.1LocalDef')
# apply class Name(4.2.a.2Name) node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """return True"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__Name"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.2Name')
# apply class New(4.2.a.3New) node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """return True"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__New"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3New')
# apply class Name(4.2.a.4Name) node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """return True"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["mm__"] = """MT_pre__Name"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.4Name')
# apply class Name(4.2.a.5Name) node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """return True"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["mm__"] = """MT_pre__Name"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.5Name')
# apply class Name(4.2.a.6Name) node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """return True"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["mm__"] = """MT_pre__Name"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.6Name')
# apply class Par(4.2.a.7Par) node
self.add_node()
self.vs[8]["MT_pre__attr1"] = """return True"""
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["mm__"] = """MT_pre__Par"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.7Par')
# apply class Inst(4.2.a.8Inst) node
self.add_node()
self.vs[9]["MT_pre__attr1"] = """return True"""
self.vs[9]["MT_label__"] = """10"""
self.vs[9]["mm__"] = """MT_pre__Inst"""
self.vs[9]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Inst')
# apply class Inst(4.2.a.9Inst) node
self.add_node()
self.vs[10]["MT_pre__attr1"] = """return True"""
self.vs[10]["MT_label__"] = """11"""
self.vs[10]["mm__"] = """MT_pre__Inst"""
self.vs[10]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.9Inst')
# apply class Name(4.2.a.10Name) node
self.add_node()
self.vs[11]["MT_pre__attr1"] = """return True"""
self.vs[11]["MT_label__"] = """12"""
self.vs[11]["mm__"] = """MT_pre__Name"""
self.vs[11]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.10Name')
# apply class Name(4.2.a.11Name) node
self.add_node()
self.vs[12]["MT_pre__attr1"] = """return True"""
self.vs[12]["MT_label__"] = """13"""
self.vs[12]["mm__"] = """MT_pre__Name"""
self.vs[12]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.11Name')
# apply class Name(4.2.a.12Name) node
self.add_node()
self.vs[13]["MT_pre__attr1"] = """return True"""
self.vs[13]["MT_label__"] = """14"""
self.vs[13]["mm__"] = """MT_pre__Name"""
self.vs[13]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.12Name')
# apply class Name(4.2.a.13Name) node
self.add_node()
self.vs[14]["MT_pre__attr1"] = """return True"""
self.vs[14]["MT_label__"] = """15"""
self.vs[14]["mm__"] = """MT_pre__Name"""
self.vs[14]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.13Name')
# apply class Name(4.2.a.14Name) node
self.add_node()
self.vs[15]["MT_pre__attr1"] = """return True"""
self.vs[15]["MT_label__"] = """16"""
self.vs[15]["mm__"] = """MT_pre__Name"""
self.vs[15]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.14Name')
# apply class Name(4.2.a.15Name) node
self.add_node()
self.vs[16]["MT_pre__attr1"] = """return True"""
self.vs[16]["MT_label__"] = """17"""
self.vs[16]["mm__"] = """MT_pre__Name"""
self.vs[16]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.15Name')
# apply class Name(4.2.a.16Name) node
self.add_node()
self.vs[17]["MT_pre__attr1"] = """return True"""
self.vs[17]["MT_label__"] = """18"""
self.vs[17]["mm__"] = """MT_pre__Name"""
self.vs[17]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.16Name')
# apply association ProcDef--p-->LocalDefnode
self.add_node()
self.vs[18]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[18]["MT_label__"] = """19"""
self.vs[18]["mm__"] = """MT_pre__directLink_T"""
self.vs[18]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.0ProcDefassoc184.2.a.1LocalDef')
# apply association ProcDef--channelNames-->Namenode
self.add_node()
self.vs[19]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[19]["MT_label__"] = """20"""
self.vs[19]["mm__"] = """MT_pre__directLink_T"""
self.vs[19]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.0ProcDefassoc194.2.a.2Name')
# apply association LocalDef--p-->Newnode
self.add_node()
self.vs[20]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[20]["MT_label__"] = """21"""
self.vs[20]["mm__"] = """MT_pre__directLink_T"""
self.vs[20]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.1LocalDefassoc204.2.a.3New')
# apply association New--channelNames-->Namenode
self.add_node()
self.vs[21]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[21]["MT_label__"] = """22"""
self.vs[21]["mm__"] = """MT_pre__directLink_T"""
self.vs[21]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3Newassoc214.2.a.4Name')
# apply association New--channelNames-->Namenode
self.add_node()
self.vs[22]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[22]["MT_label__"] = """23"""
self.vs[22]["mm__"] = """MT_pre__directLink_T"""
self.vs[22]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3Newassoc224.2.a.5Name')
# apply association New--channelNames-->Namenode
self.add_node()
self.vs[23]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[23]["MT_label__"] = """24"""
self.vs[23]["mm__"] = """MT_pre__directLink_T"""
self.vs[23]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3Newassoc234.2.a.6Name')
# apply association New--p-->Parnode
self.add_node()
self.vs[24]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[24]["MT_label__"] = """25"""
self.vs[24]["mm__"] = """MT_pre__directLink_T"""
self.vs[24]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3Newassoc244.2.a.7Par')
# apply association Par--p-->Instnode
self.add_node()
self.vs[25]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[25]["MT_label__"] = """26"""
self.vs[25]["mm__"] = """MT_pre__directLink_T"""
self.vs[25]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.7Parassoc254.2.a.9Inst')
# apply association Par--p-->Instnode
self.add_node()
self.vs[26]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[26]["MT_label__"] = """27"""
self.vs[26]["mm__"] = """MT_pre__directLink_T"""
self.vs[26]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.7Parassoc264.2.a.8Inst')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[27]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[27]["MT_label__"] = """28"""
self.vs[27]["mm__"] = """MT_pre__directLink_T"""
self.vs[27]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Instassoc274.2.a.10Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[28]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[28]["MT_label__"] = """29"""
self.vs[28]["mm__"] = """MT_pre__directLink_T"""
self.vs[28]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Instassoc284.2.a.11Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[29]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[29]["MT_label__"] = """30"""
self.vs[29]["mm__"] = """MT_pre__directLink_T"""
self.vs[29]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Instassoc294.2.a.12Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[30]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[30]["MT_label__"] = """31"""
self.vs[30]["mm__"] = """MT_pre__directLink_T"""
self.vs[30]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Instassoc304.2.a.13Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[31]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[31]["MT_label__"] = """32"""
self.vs[31]["mm__"] = """MT_pre__directLink_T"""
self.vs[31]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.9Instassoc314.2.a.14Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[32]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[32]["MT_label__"] = """33"""
self.vs[32]["mm__"] = """MT_pre__directLink_T"""
self.vs[32]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.9Instassoc324.2.a.15Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[33]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[33]["MT_label__"] = """34"""
self.vs[33]["mm__"] = """MT_pre__directLink_T"""
self.vs[33]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.9Instassoc334.2.a.16Name')
# trace association ProcDef--trace-->nullnode
self.add_node()
self.vs[34]["MT_label__"] = """35"""
self.vs[34]["mm__"] = """MT_pre__trace_link"""
self.vs[34]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.0ProcDefassoc34State')
self['equations'].append(((3,'literal'),('constant','sh')))
self['equations'].append(((5,'literal'),('constant','exit_in')))
self['equations'].append(((6,'literal'),('constant','exack_in')))
self['equations'].append(((7,'literal'),('constant','sh_in')))
self['equations'].append(((9,'name'),('constant','C')))
self['equations'].append(((10,'name'),('constant','H')))
self['equations'].append(((11,'literal'),('constant','enp')))
self['equations'].append(((12,'literal'),('constant','exit_in')))
self['equations'].append(((13,'literal'),('constant','exack_in')))
self['equations'].append(((14,'literal'),('constant','sh_in')))
self['equations'].append(((15,'literal'),('constant','exit_in')))
self['equations'].append(((16,'literal'),('constant','exack_in')))
self['equations'].append(((17,'literal'),('constant','sh_in')))
# Add the edges
self.add_edges([
(1,18), # apply class ProcDef(4.2.a.0ProcDef) -> association p
(18,2), # association LocalDef -> apply class LocalDef(4.2.a.1LocalDef)
(1,19), # apply class ProcDef(4.2.a.0ProcDef) -> association channelNames
(19,3), # association Name -> apply class Name(4.2.a.2Name)
(2,20), # apply class LocalDef(4.2.a.1LocalDef) -> association p
(20,4), # association New -> apply class New(4.2.a.3New)
(4,21), # apply class New(4.2.a.3New) -> association channelNames
(21,5), # association Name -> apply class Name(4.2.a.4Name)
(4,22), # apply class New(4.2.a.3New) -> association channelNames
(22,6), # association Name -> apply class Name(4.2.a.5Name)
(4,23), # apply class New(4.2.a.3New) -> association channelNames
(23,7), # association Name -> apply class Name(4.2.a.6Name)
(4,24), # apply class New(4.2.a.3New) -> association p
(24,8), # association Par -> apply class Par(4.2.a.7Par)
(8,25), # apply class Par(4.2.a.7Par) -> association p
(25,10), # association Inst -> apply class Inst(4.2.a.9Inst)
(8,26), # apply class Par(4.2.a.7Par) -> association p
(26,9), # association Inst -> apply class Inst(4.2.a.8Inst)
(9,27), # apply class Inst(4.2.a.8Inst) -> association channelNames
(27,11), # association Name -> apply class Name(4.2.a.10Name)
(9,28), # apply class Inst(4.2.a.8Inst) -> association channelNames
(28,12), # association Name -> apply class Name(4.2.a.11Name)
(9,29), # apply class Inst(4.2.a.8Inst) -> association channelNames
(29,13), # association Name -> apply class Name(4.2.a.12Name)
(9,30), # apply class Inst(4.2.a.8Inst) -> association channelNames
(30,14), # association Name -> apply class Name(4.2.a.13Name)
(10,31), # apply class Inst(4.2.a.9Inst) -> association channelNames
(31,15), # association Name -> apply class Name(4.2.a.14Name)
(10,32), # apply class Inst(4.2.a.9Inst) -> association channelNames
(32,16), # association Name -> apply class Name(4.2.a.15Name)
(10,33), # apply class Inst(4.2.a.9Inst) -> association channelNames
(33,17), # association Name -> apply class Name(4.2.a.16Name)
(1,34), # apply class ProcDef(State) -> backward_association
(34,0), # backward_associationnull -> match_class null(State)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each apply class.
def eval_attr12(self, attr_value, this):
return True
def eval_attr13(self, attr_value, this):
return True
def eval_attr14(self, attr_value, this):
return True
def eval_attr15(self, attr_value, this):
return True
def eval_attr16(self, attr_value, this):
return True
def eval_attr17(self, attr_value, this):
return True
def eval_attr18(self, attr_value, this):
return True
def eval_attr19(self, attr_value, this):
return True
def eval_attr110(self, attr_value, this):
return True
def eval_attr111(self, attr_value, this):
return True
def eval_attr112(self, attr_value, this):
return True
def eval_attr113(self, attr_value, this):
return True
def eval_attr114(self, attr_value, this):
return True
def eval_attr115(self, attr_value, this):
return True
def eval_attr116(self, attr_value, this):
return True
def eval_attr117(self, attr_value, this):
return True
def eval_attr118(self, attr_value, this):
return True
# define evaluation methods for each match association.
# define evaluation methods for each apply association.
def eval_attr119(self, attr_value, this):
return attr_value == "p"
def eval_attr120(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr121(self, attr_value, this):
return attr_value == "p"
def eval_attr122(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr123(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr124(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr125(self, attr_value, this):
return attr_value == "p"
def eval_attr126(self, attr_value, this):
return attr_value == "p"
def eval_attr127(self, attr_value, this):
return attr_value == "p"
def eval_attr128(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr129(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr130(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr131(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr132(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr133(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr134(self, attr_value, this):
return attr_value == "channelNames"
def constraint(self, PreNode, graph):
return True
| StarcoderdataPython |
3257317 | """
Decorators to extend workflow functions
"""
from functools import wraps
from .util import find_on_path
from .provenance import BINARY_PROVENANCE as bin_provenance_registry
class requires(object):
"""Convenience wrapper for tracking binaries used to perform tasks
"""
def __init__(self, binaries=list(), version_methods=list()):
self.required_binaries = binaries
self.version_methods = version_methods
if len(binaries) == len(version_methods):
self._update_binary_provenance()
def _update_binary_provenance(self):
"""Add to the BINARY_PROVENANCE set the list of required binaries and
version commands in the form of (binary, command) tuples
"""
map( bin_provenance_registry.add,
zip(self.required_binaries, self.version_methods) )
def _maybe_halt(self):
missing = list()
for binary in self.required_binaries:
candidate = find_on_path(binary)
if not candidate:
missing.append(binary)
else:
yield candidate
if missing:
raise ValueError("Unable to continue, please install the "
"following binaries: %s" %(str(missing)))
def __call__(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
binaries_with_paths = list(self._maybe_halt())
ret = fn(*args, **kwargs)
if ret:
if type(ret) is dict:
ret = [ret]
for t in ret:
deps = t.get('file_dep', [])
t['file_dep'] = list(set(list(deps)+binaries_with_paths))
yield t
wrapper.required_binaries = self.required_binaries
wrapper.version_methods = self.version_methods
return wrapper
| StarcoderdataPython |
1602866 | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from cudf._lib.nvtx import annotate
from .minmax import MinMax
from .moments import Moments
from .operator import CONT
from .transform_operator import DFOperator
class Normalize(DFOperator):
"""
Standardizing the features around 0 with a standard deviation
of 1 is a common technique to compare measurements that have
different units. This operation can be added to the workflow
to standardize the features.
It performs Normalization using the mean std method.
Example usage::
# Initialize the workflow
proc = nvt.Workflow(
cat_names=CATEGORICAL_COLUMNS,
cont_names=CONTINUOUS_COLUMNS,
label_name=LABEL_COLUMNS
)
# Add Normalize to the workflow for continuous columns
proc.add_cont_feature(nvt.ops.Normalize())
Parameters
----------
columns : list of str, default None
Continous columns to target for this op. If None, the operation will target all known
continous columns.
replace : bool, default False
Whether to replace existing columns or create new ones.
"""
default_in = CONT
default_out = CONT
@property
def req_stats(self):
return [Moments(columns=self.columns)]
@annotate("Normalize_op", color="darkgreen", domain="nvt_python")
def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context=None):
cont_names = target_columns
if not cont_names or not stats_context["stds"]:
return
gdf = self.apply_mean_std(gdf, stats_context, cont_names)
return gdf
def apply_mean_std(self, gdf, stats_context, cont_names):
new_gdf = cudf.DataFrame()
for name in cont_names:
if stats_context["stds"][name] > 0:
new_col = f"{name}_{self._id}"
new_gdf[new_col] = (gdf[name] - stats_context["means"][name]) / (
stats_context["stds"][name]
)
new_gdf[new_col] = new_gdf[new_col].astype("float32")
return new_gdf
class NormalizeMinMax(DFOperator):
"""
Standardizing the features around 0 with a standard deviation
of 1 is a common technique to compare measurements that have
different units. This operation can be added to the workflow
to standardize the features.
It performs Normalization using the min max method.
Example usage::
# Initialize the workflow
proc = nvt.Workflow(
cat_names=CATEGORICAL_COLUMNS,
cont_names=CONTINUOUS_COLUMNS,
label_name=LABEL_COLUMNS
)
# Add NormalizeMinMax to the workflow for continuous columns
proc.add_cont_feature(nvt.ops.NormalizeMinMax())
Parameters
----------
columns : list of str, default None
Continous columns to target for this op. If None, the operation will target all known
continous columns.
replace : bool, default False
Whether to replace existing columns or create new ones.
"""
default_in = CONT
default_out = CONT
@property
def req_stats(self):
return [MinMax(columns=self.columns)]
@annotate("NormalizeMinMax_op", color="darkgreen", domain="nvt_python")
def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context=None):
cont_names = target_columns
if not cont_names or not stats_context["mins"]:
return
gdf = self.apply_min_max(gdf, stats_context, cont_names)
return gdf
def apply_min_max(self, gdf, stats_context, cont_names):
new_gdf = cudf.DataFrame()
for name in cont_names:
dif = stats_context["maxs"][name] - stats_context["mins"][name]
new_col = f"{name}_{self._id}"
if dif > 0:
new_gdf[new_col] = (gdf[name] - stats_context["mins"][name]) / dif
elif dif == 0:
new_gdf[new_col] = gdf[name] / (2 * gdf[name])
new_gdf[new_col] = new_gdf[new_col].astype("float32")
return new_gdf
| StarcoderdataPython |
1707795 | """
SGA.html
========
Code to generate HTML output for the various stages of the SGA analysis.
"""
import os
import numpy as np
def get_layer(onegal):
if onegal['DR'] == 'dr6':
layer = 'mzls+bass-dr6'
elif onegal['DR'] == 'dr7':
layer = 'decals-dr5'
else:
print('Unrecognized data release {}!'.format(onegal['DR']))
raise ValueError
return layer
def _get_cutouts_one(args):
"""Wrapper function for the multiprocessing."""
return get_cutouts_one(*args)
def get_cutouts_one(group, clobber=False):
"""Get viewer cutouts for a single galaxy."""
layer = get_layer(group)
groupname = get_groupname(group)
diam = group_diameter(group) # [arcmin]
size = np.ceil(diam * 60 / PIXSCALE).astype('int') # [pixels]
imageurl = '{}/?ra={:.8f}&dec={:.8f}&pixscale={:.3f}&size={:g}&layer={}'.format(
cutouturl, group['ra'], group['dec'], PIXSCALE, size, layer)
jpgfile = os.path.join(jpgdir, '{}.jpg'.format(groupname))
cmd = 'wget --continue -O {:s} "{:s}"' .format(jpgfile, imageurl)
if os.path.isfile(jpgfile) and not clobber:
print('File {} exists...skipping.'.format(jpgfile))
else:
if os.path.isfile(jpgfile):
os.remove(jpgfile)
print(cmd)
os.system(cmd)
def get_cutouts(groupsample, use_nproc=nproc, clobber=False):
"""Get viewer cutouts of the whole sample."""
cutoutargs = list()
for gg in groupsample:
cutoutargs.append( (gg, clobber) )
if use_nproc > 1:
p = multiprocessing.Pool(nproc)
p.map(_get_cutouts_one, cutoutargs)
p.close()
else:
for args in cutoutargs:
_get_cutouts_one(args)
return
def _add_labels_one(args):
"""Wrapper function for the multiprocessing."""
return add_labels_one(*args)
def add_labels_one(group, sample, clobber=False, nothumb=False):
jpgdir = os.path.join(SGAdir, 'cutouts', 'jpg')
pngdir = os.path.join(SGAdir, 'cutouts', 'png')
if not os.path.isdir(pngdir):
os.mkdir(pngdir)
groupname = get_groupname(group)
galaxy = get_galaxy(group, sample, html=True)
jpgfile = os.path.join(jpgdir, '{}.jpg'.format(groupname))
pngfile = os.path.join(pngdir, '{}.png'.format(groupname))
thumbfile = os.path.join(pngdir, 'thumb-{}.png'.format(groupname))
if os.path.isfile(jpgfile):
if os.path.isfile(pngfile) and not clobber:
print('File {} exists...skipping.'.format(pngfile))
else:
im = Image.open(jpgfile)
sz = im.size
fntsize = np.round(sz[0]/28).astype('int')
width = np.round(sz[0]/175).astype('int')
font = ImageFont.truetype(fonttype, size=fntsize)
draw = ImageDraw.Draw(im)
# Label the group--
draw.text((0+fntsize*2, 0+fntsize*2), galaxy, font=font)
# Add a scale bar--
x0, x1, yy = sz[1]-fntsize*2-barlen, sz[1]-fntsize*2, sz[0]-fntsize*2
draw.line((x0, yy, x1, yy), fill='white', width=width)
im.save(pngfile)
# Generate a thumbnail
if not nothumb:
cmd = 'convert -thumbnail 300x300 {} {}'.format(pngfile, thumbfile)
os.system(cmd)
def add_labels(groupsample, sample, clobber=False):
labelargs = list()
for group in groupsample:
labelargs.append((group, sample, clobber))
if nproc > 1:
p = multiprocessing.Pool(nproc)
res = p.map(_add_labels_one, labelargs)
p.close()
else:
for args in labelargs:
res = _add_labels_one(args)
def html_rows(_groupkeep, sample, nperrow=4):
# Not all objects may have been analyzed.
these = [os.path.isfile(os.path.join(SGAdir, 'cutouts', 'png', '{}.png'.format(
get_groupname(gg)))) for gg in _groupkeep]
groupkeep = _groupkeep[these]
nrow = np.ceil(len(groupkeep) / nperrow).astype('int')
groupsplit = list()
for ii in range(nrow):
i1 = nperrow*ii
i2 = nperrow*(ii+1)
if i2 > len(groupkeep):
i2 = len(groupkeep)
groupsplit.append(groupkeep[i1:i2])
print('Splitting the sample into {} rows with {} mosaics per row.'.format(nrow, nperrow))
html.write('<table class="ls-gallery">\n')
html.write('<tbody>\n')
for grouprow in groupsplit:
html.write('<tr>\n')
for group in grouprow:
groupname = get_groupname(group)
galaxy = get_galaxy(group, sample, html=True)
pngfile = os.path.join('cutouts', 'png', '{}.png'.format(groupname))
thumbfile = os.path.join('cutouts', 'png', 'thumb-{}.png'.format(groupname))
img = 'src="{}" alt="{}"'.format(thumbfile, galaxy)
#img = 'class="ls-gallery" src="{}" alt="{}"'.format(thumbfile, nicename)
html.write('<td><a href="{}"><img {}></a></td>\n'.format(pngfile, img))
html.write('</tr>\n')
html.write('<tr>\n')
for group in grouprow:
groupname = get_groupname(group)
galaxy = '{}: {}'.format(groupname.upper(), get_galaxy(group, sample, html=True))
layer = get_layer(group)
href = '{}/?layer={}&ra={:.8f}&dec={:.8f}&zoom=12'.format(viewerurl, layer, group['ra'], group['dec'])
html.write('<td><a href="{}" target="_blank">{}</a></td>\n'.format(href, galaxy))
html.write('</tr>\n')
html.write('</tbody>\n')
html.write('</table>\n')
def make_plots(sample, analysisdir=None, htmldir='.', refband='r',
band=('g', 'r', 'z'), clobber=False, verbose=True):
"""Make QA plots.
"""
sample_trends(sample, htmldir, analysisdir=analysisdir, verbose=verbose)
for gal in sample:
objid, objdir = get_objid(gal, analysisdir=analysisdir)
htmlobjdir = os.path.join(htmldir, '{}'.format(objid))
if not os.path.isdir(htmlobjdir):
os.makedirs(htmlobjdir, exist_ok=True)
# Build the ellipse plots.
qa_ellipse_results(objid, objdir, htmlobjdir, band=band,
clobber=clobber, verbose=verbose)
qa_sersic_results(objid, objdir, htmlobjdir, band=band,
clobber=clobber, verbose=verbose)
# Build the montage coadds.
qa_montage_coadds(objid, objdir, htmlobjdir, clobber=clobber, verbose=verbose)
# Build the MGE plots.
#qa_mge_results(objid, objdir, htmlobjdir, refband='r', band=band,
# clobber=clobber, verbose=verbose)
def _javastring():
"""Return a string that embeds a date in a webpage."""
import textwrap
js = textwrap.dedent("""
<SCRIPT LANGUAGE="JavaScript">
var months = new Array(13);
months[1] = "January";
months[2] = "February";
months[3] = "March";
months[4] = "April";
months[5] = "May";
months[6] = "June";
months[7] = "July";
months[8] = "August";
months[9] = "September";
months[10] = "October";
months[11] = "November";
months[12] = "December";
var dateObj = new Date(document.lastModified)
var lmonth = months[dateObj.getMonth() + 1]
var date = dateObj.getDate()
var fyear = dateObj.getYear()
if (fyear < 2000)
fyear = fyear + 1900
document.write(" " + fyear + " " + lmonth + " " + date)
</SCRIPT>
""")
return js
def make_html(sample=None, htmldir=None, dr='dr6-dr7', makeplots=True, clobber=False,
verbose=True):
"""Make the HTML pages.
"""
import SGA.io
if htmldir is None:
htmldir = SGA.io.html_dir()
sample = SGA.io.read_parent(dr=dr)
objid, objdir = legacyhalos.io.get_objid(sample)
reject = []
toss = np.zeros(len(groupsample), dtype=bool)
for ii, gg in enumerate(groupsample['groupid']):
for rej in np.atleast_1d(reject):
toss[ii] = rej in gg.lower()
if toss[ii]:
break
print('Rejecting {} groups.'.format(np.sum(toss)))
groupkeep = groupsample[~toss]
if np.sum(toss) > 0:
grouprej = groupsample[toss]
else:
grouprej = []
# Write the last-updated date to a webpage.
js = _javastring()
# Get the viewer link
def _viewer_link(gal, dr):
baseurl = 'http://legacysurvey.org/viewer/'
width = 2 * cutout_radius_150kpc(redshift=gal['z'], pixscale=0.262) # [pixels]
if width > 400:
zoom = 14
else:
zoom = 15
viewer = '{}?ra={:.6f}&dec={:.6f}&zoom={:g}&layer=decals-{}'.format(
baseurl, gal['ra'], gal['dec'], zoom, dr)
return viewer
homehtml = 'index.html'
# Build the home (index.html) page--
if not os.path.exists(htmldir):
os.makedirs(htmldir)
htmlfile = os.path.join(htmldir, homehtml)
with open(htmlfile, 'w') as html:
html.write('<html><head>\n')
html.write('<style type="text/css">\n')
html.write('table.ls-gallery {width: 90%;}\n')
html.write('p.ls-gallery {width: 80%;}\n')
html.write('</style>\n')
html.write('</head><body>\n')
html.write('<h1>Siena Galaxy Atlas 2020 (SGA-2020)</h1>\n')
html.write("""<p class="ls-gallery">Each thumbnail links to a larger image while the galaxy
name below each thumbnail links to the <a href="http://legacysurvey.org/viewer">Sky Viewer</a>.
For reference, the horizontal white bar in the lower-right corner of each image represents
one arcminute.</p>\n""")
html_rows(groupkeep, sample)
html.write('<br /><br />\n')
html.write('<b><i>Last updated {}</b></i>\n'.format(js))
html.write('</body></html>\n')
if makeplots:
make_plots(sample, analysisdir=analysisdir, htmldir=htmldir, refband=refband,
band=band, clobber=clobber, verbose=verbose)
| StarcoderdataPython |
4823099 | <reponame>BattleCisco/AAshe
import traceback
import aiohttp
import sqlite3
import asyncio
regions = [
"BR1",
"EUN1",
"EUW1",
"JP1",
"KR",
"LA1",
"LA2",
"NA1",
"OC1",
"TR1",
"RU",
"PBE1"]
class Config:
api_key = None
calls = 0
sql_cache = True
conn = None
def __init__(self):
pass
@classmethod
def get_api_key(cls):
return cls.api_key
# Why this?
# One unified place to have the request method and rate limit, in case its found in local db
@classmethod
def initiate(cls, api_key, conn: sqlite3.Connection=None):
cls.__API_KEY__ = api_key
if conn:
cls.__sql_cache__ = True
cls.__conn__ = conn
cls.__c__ = conn.cursor()
def run_async(func: asyncio.coroutine, **kwargs)->object:
"""Allows testing within the docstrings"""
loop = asyncio.get_event_loop()
aiosession = aiohttp.ClientSession(loop=loop)
try:
kwargs["aiosession"] = aiosession
m = loop.run_until_complete(func(**kwargs))
# loop.run_forever()
except:
traceback.print_exc()
m = None
aiosession.close()
return m
| StarcoderdataPython |
1750889 | <gh_stars>10-100
#!/usr/bin/env python
"""
Unit tests for M2Crypto.BN.
Copyright (c) 2005 Open Source Applications Foundation. All rights reserved.
"""
import re
import warnings
from M2Crypto import BN, Rand
from tests import unittest
loops = 16
class BNTestCase(unittest.TestCase):
def test_rand(self):
# defaults
for _ in range(loops):
r8 = BN.rand(8)
# top
for _ in range(loops):
r8 = BN.rand(8, top=0)
assert r8 & 128
for _ in range(loops):
r8 = BN.rand(8, top=1)
assert r8 & 192
# bottom
for _ in range(loops):
r8 = BN.rand(8, bottom=1)
assert r8 % 2 == 1
# make sure we can get big numbers and work with them
for _ in range(loops):
r8 = BN.rand(8, top=0)
r16 = BN.rand(16, top=0)
r32 = BN.rand(32, top=0)
r64 = BN.rand(64, top=0)
r128 = BN.rand(128, top=0)
r256 = BN.rand(256, top=0)
r512 = BN.rand(512, top=0)
assert r8 < r16 < r32 < r64 < r128 < r256 < r512 < (r512 + 1)
def test_rand_range(self):
# small range
for _ in range(loops):
r = BN.rand_range(1)
self.assertEqual(r, 0)
for _ in range(loops):
r = BN.rand_range(4)
assert 0 <= r < 4
# large range
r512 = BN.rand(512, top=0)
for _ in range(loops):
r = BN.rand_range(r512)
assert 0 <= r < r512
def test_randfname(self):
m = re.compile('^[a-zA-Z0-9]{8}$')
for _ in range(loops):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
r = BN.randfname(8)
assert m.match(r)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(BNTestCase)
if __name__ == '__main__':
Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
| StarcoderdataPython |
3267748 | <filename>model/models/feat.py
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
def conv3x3(in_channels, out_channels, stride=1):
return nn.Conv1d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=False)
class ChannelAttention(nn.Module):
def __init__(self, in_planes, rotio=3):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.sharedMLP = nn.Sequential(
nn.Conv1d(in_planes, in_planes // rotio, 1, bias=False), nn.ReLU(),
nn.Conv1d(in_planes // rotio, in_planes, 1, bias=False))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avgout = self.sharedMLP(self.avg_pool(x))
maxout = self.sharedMLP(self.max_pool(x))
return self.sigmoid(avgout + maxout)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), "kernel size must be 3 or 7"
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv1d(2, 1, kernel_size,
padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avgout = torch.mean(x, dim=1, keepdim=True)
maxout, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avgout, maxout], dim=1)
x = self.conv(x)
return self.sigmoid(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm1d(planes)
self.ca = ChannelAttention(planes)
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
def forward(self, x, y=0, z=0):
x = x
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.ca(out) * out # 广播机制
out = self.sa(out) * out # 广播机制
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(
temperature=np.power(d_k, 0.5))
self.slf_attn = BasicBlock(3, 3)
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1,
len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1,
len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1,
len_v, d_v) # (n*b) x lv x dv
q, k, v = self.slf_attn(torch.cat([
q.view(*(-1,)+(1, d_k)),
k.view(*(-1,)+(1, d_k)),
v.view(*(-1,)+(1, d_k))], dim=1)).chunk(3, dim=1)
q = q.contiguous().view(-1,
len_q, d_k) # (n*b) x lq x dk
k = k.contiguous().view(-1,
len_k, d_k) # (n*b) x lk x dk
v = v.contiguous().view(-1,
len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(
sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class FEAT(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
# self.slf_attn = BasicBlock(hdim, hdim)
# self.slf_attn2 = BasicBlock(20, 20)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# print("{}\n{}\n{}\n".format(instance_embs.shape,
# support_idx.shape, query_idx.shape))
# organize support/query data
support = instance_embs[support_idx.contiguous(
).view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous(
).view(-1)].contiguous().view(*(query_idx.shape + (-1,)))
# print("support:{} query:{}".format(support.shape, query.shape))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# print("proto:")
# print(proto.shape)
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
proto = self.slf_attn(proto, proto, proto)
if self.args.use_euclidean:
# (Nbatch*Nq*Nw, 1, d)
query = query.view(-1, emb_dim).unsqueeze(1)
proto = proto.unsqueeze(1).expand(
num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto,
emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / \
self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute(
[0, 2, 1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1: 3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
# print("{} {}".format(self.args.shot, self.args.query))
# print("aux_task")
# print(aux_task.shape)
aux_emb = self.slf_attn(
aux_task, aux_task, aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(
num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.permute([1, 0, 2]).contiguous(
).view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(
num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(
num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - \
torch.sum((aux_center - aux_task) ** 2, 2) / \
self.args.temperature2
else:
# normalize for cosine distance
aux_center = F.normalize(aux_center, dim=-1)
aux_task = aux_task.permute([1, 0, 2]).contiguous().view(
num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute(
[0, 2, 1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| StarcoderdataPython |
3321366 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 - 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from calvin.requests.calvinresponse import RESPONSE_CODES, CalvinResponse
pytestmark = pytest.mark.unittest
def test_boolean_value():
success_list = range(200, 207)
for code in RESPONSE_CODES:
response = CalvinResponse(code)
if code in success_list:
assert response
else:
assert not response
def test_comparisons():
first = CalvinResponse(100)
second = CalvinResponse(200)
third = CalvinResponse(200)
assert first < second
assert second > first
assert second == third
assert first != second
assert second <= third
assert third <= second
def test_set_status():
response = CalvinResponse(100)
assert response.status == 100
response.set_status(400)
assert response.status == 400
response.set_status(True)
assert response.status == 200
response.set_status(False)
assert response.status == 500
| StarcoderdataPython |
189081 | from test.unit.extraction.gaussian_parser_test import *
from test.unit.extraction.extraction_test import *
from test.unit.glassware_test import *
from test.unit.datawork_test import *
from test.unit.writer_test import *
from test.unit.tesliper_test import *
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
184026 | <gh_stars>0
import numpy as np
import pandas as pd
def extract_table(input_file, cancer_type):
Cancer_type = cancer_type
data = pd.read_csv(input_file)
columns = data.columns
selected_col = ['Mutation type', 'Trinucleotide']
for c in columns:
if Cancer_type in c:
selected_col.append(c)
sub_data = data[selected_col]
sub_data['mean'] = sub_data.mean(axis=1)
sub_data['percentile'] = np.percentile(sub_data, 20, axis=1)
sub_data['cutoff'] = sub_data[["mean", "percentile"]].max(axis=1)
selected_col = selected_col[2:]
for c in selected_col:
#sub_data[c] = sub_data[c] >= sub_data['mean']
sub_data[c] = sub_data[c] >= sub_data['cutoff']
sub_data = sub_data.drop('mean', 1)
sub_data = sub_data.drop('percentile', 1)
sub_data = sub_data.drop('cutoff', 1)
sub_data[list(sub_data.columns)[2:]] = sub_data[list(sub_data.columns)[2:]].astype(int)
sub_data.to_csv('../data/' + Cancer_type + '_0_1_percentile.csv', index=False)
selected_col = ['Mutation type', 'Trinucleotide']
for c in columns:
if Cancer_type in c:
selected_col.append(c)
sub_data = data[selected_col]
sub_data = sub_data.replace(0,np.NaN)
sub_data['mean'] = sub_data.mean(axis=1)
selected_col = selected_col[2:]
for c in selected_col:
sub_data[c] = sub_data[c] >= sub_data['mean']
sub_data = sub_data.drop('mean', 1)
sub_data[list(sub_data.columns)[2:]] = sub_data[list(sub_data.columns)[2:]].astype(int)
sub_data.to_csv('../data/' + Cancer_type + '_ignore_0.csv', index=False)
if __name__ == "__main__":
cancer_type = 'Skin-Melanoma'
input = '../data/WES_TCGA.96.csv'
extract_table(input, cancer_type)
| StarcoderdataPython |
168255 | <gh_stars>1-10
class Player(object):
INIT_STATE = 0x00
ENTERED_STATE = 0x01
READY_STATE = 0x02
def __init__(self):
#0 1 logined 2 entered
self.state = self.INIT_STATE
#hex
self.peerid = -1
self.character = None
self.qobject = None
self.active = False
self.aoilist = []
self.sendmsgs = []
| StarcoderdataPython |
1686373 | # coding=utf-8
# Module magneto_bold_16
# generated from Magneto 11.25pt
name = "<NAME>"
start_char = '!'
end_char = chr(127)
char_height = 16
space_width = 8
gap_width = 2
bitmaps = (
# @0 '!' (6 pixels wide)
0x00, #
0x1C, # OOO
0x38, # OOO
0x38, # OOO
0x30, # OO
0x70, # OOO
0x60, # OO
0x00, #
0xE0, # OOO
0xE0, # OOO
0xE0, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @16 '"' (6 pixels wide)
0x00, #
0x6C, # OO OO
0xD8, # OO OO
0xD8, # OO OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @32 '#' (11 pixels wide)
0x00, 0x00, #
0x0C, 0xC0, # OO OO
0x1D, 0xC0, # OOO OOO
0x19, 0x80, # OO OO
0x7F, 0xE0, # OOOOOOOOOO
0x19, 0x80, # OO OO
0x33, 0x00, # OO OO
0xFF, 0xC0, # OOOOOOOOOO
0x33, 0x00, # OO OO
0x66, 0x00, # OO OO
0x66, 0x00, # OO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @64 '$' (12 pixels wide)
0x01, 0x40, # O O
0x0F, 0xE0, # OOOOOOO
0x18, 0xF0, # OO OOOO
0x18, 0xE0, # OO OOO
0x1D, 0x00, # OOO O
0x0F, 0x00, # OOOO
0x0F, 0x80, # OOOOO
0x0B, 0x80, # O OOO
0x79, 0x80, # OOOO OO
0xF1, 0x80, # OOOO OO
0x7F, 0x00, # OOOOOOO
0x24, 0x00, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @96 '%' (9 pixels wide)
0x00, 0x00, #
0x71, 0x80, # OOO OO
0x73, 0x00, # OOO OO
0x76, 0x00, # OOO OO
0x06, 0x00, # OO
0x0C, 0x00, # OO
0x18, 0x00, # OO
0x10, 0x00, # O
0x37, 0x00, # OO OOO
0x67, 0x00, # OO OOO
0xC7, 0x00, # OO OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @128 '&' (11 pixels wide)
0x00, 0x00, #
0x0F, 0x80, # OOOOO
0x18, 0xC0, # OO OO
0x19, 0xC0, # OO OOO
0x1F, 0x80, # OOOOOO
0x1C, 0x00, # OOO
0x7C, 0x00, # OOOOO
0xEC, 0x00, # OOO OO
0xCE, 0x00, # OO OOO
0xC6, 0x00, # OO OO
0x7F, 0xE0, # OOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @160 ''' (3 pixels wide)
0x00, #
0x60, # OO
0xC0, # OO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @176 '(' (6 pixels wide)
0x0C, # OO
0x18, # OO
0x30, # OO
0x60, # OO
0x60, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0x40, # O
0x60, # OO
0x00, #
0x00, #
0x00, #
0x00, #
# @192 ')' (6 pixels wide)
0x18, # OO
0x08, # O
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x18, # OO
0x18, # OO
0x30, # OO
0x60, # OO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0x00, #
# @208 '*' (5 pixels wide)
0xA8, # O O O
0x70, # OOO
0xF8, # OOOOO
0x70, # OOO
0xA8, # O O O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @224 '+' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x06, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0xFF, 0x80, # OOOOOOOOO
0xFF, 0x00, # OOOOOOOO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x30, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @256 ',' (3 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x60, # OO
0x40, # O
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
# @272 '-' (6 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x7C, # OOOOO
0xF8, # OOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @288 '.' (3 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xE0, # OOO
0xE0, # OOO
0xE0, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @304 '/' (7 pixels wide)
0x00, #
0x02, # O
0x04, # O
0x08, # O
0x08, # O
0x10, # O
0x10, # O
0x20, # O
0x40, # O
0x40, # O
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @320 '0' (10 pixels wide)
0x00, 0x00, #
0x1F, 0x00, # OOOOO
0x3F, 0x80, # OOOOOOO
0x70, 0xC0, # OOO OO
0xE0, 0xC0, # OOO OO
0xC0, 0xC0, # OO OO
0xC0, 0xC0, # OO OO
0xC1, 0xC0, # OO OOO
0xC3, 0x80, # OO OOO
0x7F, 0x00, # OOOOOOO
0x3E, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @352 '1' (5 pixels wide)
0x00, #
0x70, # OOO
0x78, # OOOO
0x30, # OO
0x30, # OO
0x30, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0xE0, # OOO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @368 '2' (10 pixels wide)
0x00, 0x00, #
0x3F, 0x80, # OOOOOOO
0x7F, 0xC0, # OOOOOOOOO
0x71, 0xC0, # OOO OOO
0x00, 0xC0, # OO
0x01, 0x80, # OO
0x03, 0x80, # OOO
0x06, 0x00, # OO
0x3C, 0x00, # OOOO
0x7F, 0xC0, # OOOOOOOOO
0xFF, 0x80, # OOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @400 '3' (9 pixels wide)
0x00, 0x00, #
0x3F, 0x80, # OOOOOOO
0x7F, 0x00, # OOOOOOO
0x06, 0x00, # OO
0x0E, 0x00, # OOO
0x1F, 0x00, # OOOOO
0x03, 0x80, # OOO
0x01, 0x80, # OO
0xE1, 0x80, # OOO OO
0xE7, 0x00, # OOO OOO
0x7C, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @432 '4' (10 pixels wide)
0x00, 0x00, #
0x38, 0xC0, # OOO OO
0x39, 0x80, # OOO OO
0x39, 0x80, # OOO OO
0x11, 0x80, # O OO
0x63, 0x00, # OO OO
0xFF, 0xC0, # OOOOOOOOOO
0xFF, 0x80, # OOOOOOOOO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x0C, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @464 '5' (9 pixels wide)
0x00, 0x00, #
0x1F, 0x80, # OOOOOO
0x3F, 0x00, # OOOOOO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x7F, 0x00, # OOOOOOO
0x03, 0x80, # OOO
0x01, 0x80, # OO
0xF1, 0x80, # OOOO OO
0xF3, 0x00, # OOOO OO
0x7C, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @496 '6' (9 pixels wide)
0x00, 0x00, #
0x0C, 0x00, # OO
0x18, 0x00, # OO
0x30, 0x00, # OO
0x60, 0x00, # OO
0xC7, 0x00, # OO OOO
0xC7, 0x80, # OO OOOO
0xC3, 0x80, # OO OOO
0xE1, 0x80, # OOO OO
0x7F, 0x00, # OOOOOOO
0x3E, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @528 '7' (8 pixels wide)
0x00, #
0x7F, # OOOOOOO
0xFE, # OOOOOOO
0x86, # O OO
0x0C, # OO
0x0C, # OO
0x7E, # OOOOOO
0x18, # OO
0x30, # OO
0x30, # OO
0x60, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @544 '8' (9 pixels wide)
0x00, 0x00, #
0x0F, 0x00, # OOOO
0x3F, 0x80, # OOOOOOO
0x31, 0x80, # OO OO
0x31, 0x80, # OO OO
0x7E, 0x00, # OOOOOO
0xE3, 0x00, # OOO OO
0xC3, 0x00, # OO OO
0xC7, 0x00, # OO OOO
0xC6, 0x00, # OO OO
0x3C, 0x00, # OOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @576 '9' (8 pixels wide)
0x00, #
0x3C, # OOOO
0x62, # OO O
0xC3, # OO OO
0xC3, # OO OO
0xC3, # OO OO
0x67, # OO OOO
0x3E, # OOOOO
0x0C, # OO
0x18, # OO
0x30, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @592 ':' (4 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x70, # OOO
0x70, # OOO
0x00, #
0xE0, # OOO
0xE0, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @608 ';' (4 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x70, # OOO
0x70, # OOO
0x00, #
0x60, # OO
0x40, # O
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
# @624 '<' (8 pixels wide)
0x00, #
0x00, #
0x03, # OO
0x0E, # OOO
0x3C, # OOOO
0xF0, # OOOO
0xC0, # OO
0xF0, # OOOO
0x38, # OOO
0x1C, # OOO
0x04, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @640 '=' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xFE, # OOOOOOO
0xFC, # OOOOOO
0x00, #
0xFC, # OOOOOO
0xFC, # OOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @656 '>' (8 pixels wide)
0x00, #
0x00, #
0x20, # O
0x38, # OOO
0x1C, # OOO
0x0F, # OOOO
0x03, # OO
0x0F, # OOOO
0x3C, # OOOO
0x70, # OOO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @672 '?' (8 pixels wide)
0x00, #
0xFE, # OOOOOOO
0xE3, # OOO OO
0xE3, # OOO OO
0x07, # OOO
0x3C, # OOOO
0x70, # OOO
0x00, #
0x70, # OOO
0x70, # OOO
0x70, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @688 '@' (11 pixels wide)
0x00, 0x00, #
0x0F, 0x80, # OOOOO
0x30, 0xC0, # OO OO
0x40, 0x20, # O O
0x5F, 0xA0, # O OOOOOO O
0xB9, 0xA0, # O OOO OO O
0xB3, 0x20, # O OO OO O
0xBF, 0xC0, # O OOOOOOOO
0xC0, 0x00, # OO
0x61, 0x80, # OO OO
0x1E, 0x00, # OOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @720 'A' (21 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x03, 0x80, # OOO
0x00, 0x07, 0x00, # OOO
0x00, 0x0B, 0x00, # O OO
0x00, 0x17, 0x00, # O OOO
0x00, 0x26, 0x00, # O OO
0x0F, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOO
0x30, 0x8C, 0x00, # OO O OO
0x41, 0x8C, 0x00, # O OO OO
0xC7, 0x1C, 0x00, # OO OOO OOO
0xFC, 0x18, 0x00, # OOOOOO OO
0x78, 0x00, 0x00, # OOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @768 'B' (13 pixels wide)
0x00, 0x00, #
0x7F, 0xF0, # OOOOOOOOOOO
0xFF, 0xF8, # OOOOOOOOOOOOO
0x00, 0x18, # OO
0x0C, 0x10, # OO O
0x1C, 0x20, # OOO O
0x19, 0xC0, # OO OOO
0x38, 0xC0, # OOO OO
0x30, 0xC0, # OO OO
0x31, 0x80, # OO OO
0x7F, 0x00, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @800 'C' (10 pixels wide)
0x00, 0x00, #
0x0F, 0xC0, # OOOOOO
0x19, 0xC0, # OO OOO
0x31, 0xC0, # OO OOO
0x60, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0x60, 0x00, # OO
0x3F, 0xC0, # OOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @832 'D' (13 pixels wide)
0x00, 0x00, #
0x7F, 0xE0, # OOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0x00, 0x38, # OOO
0x18, 0x18, # OO OO
0x18, 0x18, # OO OO
0x38, 0x10, # OOO O
0x30, 0x30, # OO OO
0x30, 0x60, # OO OO
0x73, 0x80, # OOO OOO
0x7E, 0x00, # OOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @864 'E' (11 pixels wide)
0x00, 0x00, #
0x07, 0xE0, # OOOOOO
0x18, 0xE0, # OO OOO
0x18, 0xE0, # OO OOO
0x1C, 0x00, # OOO
0x1C, 0x00, # OOO
0x70, 0x00, # OOO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0x7F, 0xE0, # OOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @896 'F' (14 pixels wide)
0x00, 0x00, #
0x3F, 0xFC, # OOOOOOOOOOOO
0x7F, 0xF8, # OOOOOOOOOOOO
0xC0, 0xC0, # OO OO
0x9C, 0xC0, # O OOO OO
0xFC, 0xC0, # OOOOOO OO
0x79, 0xF0, # OOOO OOOOO
0x01, 0x80, # OO
0x01, 0x80, # OO
0x7B, 0x00, # OOOO OO
0xFE, 0x00, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @928 'G' (11 pixels wide)
0x00, 0x00, #
0x07, 0xE0, # OOOOOO
0x18, 0xE0, # OO OOO
0x30, 0xE0, # OO OOO
0x60, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0xC0, # OO OO
0xC0, 0xC0, # OO OO
0xE1, 0xC0, # OOO OOO
0x71, 0x80, # OOO OO
0x1F, 0x80, # OOOOOO
0x01, 0x80, # OO
0x01, 0x00, # O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @960 'H' (21 pixels wide)
0x00, 0x60, 0x00, # OO
0x00, 0x61, 0x80, # OO OO
0x00, 0xE3, 0x00, # OOO OO
0x00, 0xC3, 0x00, # OO OO
0x00, 0xC7, 0x00, # OO OOO
0x00, 0xC6, 0x00, # OO OO
0x1F, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOO
0x71, 0x8C, 0x00, # OOO OO OO
0xE3, 0x8C, 0x00, # OOO OOO OO
0xC3, 0x1C, 0x00, # OO OO OOO
0xC6, 0x18, 0x00, # OO OO OO
0x78, 0x00, 0x00, # OOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @1008 'I' (11 pixels wide)
0x00, 0x00, #
0x3F, 0xE0, # OOOOOOOOO
0x7F, 0xE0, # OOOOOOOOOO
0xC0, 0xC0, # OO OO
0x9C, 0xC0, # O OOO OO
0xFC, 0xC0, # OOOOOO OO
0x79, 0x80, # OOOO OO
0x01, 0x80, # OO
0x01, 0x80, # OO
0x73, 0x00, # OOO OO
0xFE, 0x00, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1040 'J' (12 pixels wide)
0x00, 0x00, #
0x3F, 0xF0, # OOOOOOOOOO
0x7F, 0xE0, # OOOOOOOOOO
0xC0, 0x60, # OO OO
0x9C, 0xC0, # O OOO OO
0xFC, 0xC0, # OOOOOO OO
0x79, 0x80, # OOOO OO
0x01, 0x80, # OO
0x03, 0x80, # OOO
0x03, 0x00, # OO
0xE6, 0x00, # OOO OO
0xFC, 0x00, # OOOOOO
0x78, 0x00, # OOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1072 'K' (13 pixels wide)
0x00, 0x00, #
0x7C, 0x18, # OOOOO OO
0xFC, 0x30, # OOOOOO OO
0x0C, 0x60, # OO OO
0x1C, 0xC0, # OOO OO
0x19, 0x80, # OO OO
0x1B, 0xC0, # OO OOOO
0x36, 0xC0, # OO OO OO
0x3D, 0xC0, # OOOO OOO
0x39, 0x80, # OOO OO
0x71, 0xF0, # OOO OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1104 'L' (16 pixels wide)
0x00, 0x00, #
0x00, 0x1E, # OOOO
0x00, 0x7F, # OOOOOOO
0x00, 0xE7, # OOO OOO
0x00, 0xC0, # OO
0x00, 0xC0, # OO
0x00, 0xC0, # OO
0x00, 0xC0, # OO
0x01, 0x80, # OO
0x63, 0x00, # OO OO
0xFF, 0xFE, # OOOOOOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1136 'M' (17 pixels wide)
0x00, 0x00, 0x00, #
0x7C, 0x00, 0x00, # OOOOO
0xFC, 0x60, 0x00, # OOOOOO OO
0x18, 0xE0, 0x00, # OO OOO
0x19, 0xCE, 0x00, # OO OOO OOO
0x1B, 0xDC, 0x00, # OO OOOO OOO
0x37, 0xBC, 0x00, # OO OOOO OOOO
0x3D, 0xEC, 0x00, # OOOO OOOO OO
0x39, 0xCC, 0x00, # OOO OOO OO
0x63, 0x98, 0x00, # OO OOO OO
0x63, 0x1F, 0x80, # OO OO OOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @1184 'N' (16 pixels wide)
0x00, 0x00, #
0x03, 0x87, # OOO OOO
0x03, 0x8F, # OOO OOOO
0x03, 0xCF, # OOOO OOOO
0x06, 0xCC, # OO OO OO
0x06, 0xD8, # OO OO OO
0x06, 0xF8, # OO OOOOO
0x0C, 0x78, # OO OOOO
0x0C, 0x70, # OO OOO
0x7C, 0x30, # OOOOO OO
0xF8, 0x30, # OOOOO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1216 'O' (13 pixels wide)
0x00, 0x00, #
0x7F, 0xE0, # OOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0x00, 0x38, # OOO
0x1C, 0x18, # OOO OO
0x38, 0x18, # OOO OO
0x30, 0x18, # OO OO
0x30, 0x30, # OO OO
0x30, 0x60, # OO OO
0x18, 0xC0, # OO OO
0x0F, 0x80, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1248 'P' (12 pixels wide)
0x00, 0x00, #
0x7F, 0xE0, # OOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0x00, 0x30, # OO
0x18, 0x30, # OO OO
0x18, 0xE0, # OO OOO
0x37, 0xC0, # OO OOOOO
0x3F, 0x00, # OOOOOO
0x30, 0x00, # OO
0x70, 0x00, # OOO
0x60, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1280 'Q' (12 pixels wide)
0x00, 0x00, #
0x0F, 0xC0, # OOOOOO
0x1F, 0xE0, # OOOOOOOO
0x30, 0x70, # OO OOO
0x3C, 0x30, # OOOO OO
0x3C, 0x30, # OOOO OO
0x1C, 0x30, # OOO OO
0x00, 0x60, # OO
0x00, 0xC0, # OO
0x61, 0x00, # OO O
0xFF, 0xF0, # OOOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1312 'R' (13 pixels wide)
0x00, 0x00, #
0x7F, 0xF0, # OOOOOOOOOOO
0xFF, 0xF8, # OOOOOOOOOOOOO
0x00, 0x18, # OO
0x0C, 0x18, # OO OO
0x0C, 0x30, # OO OO
0x1C, 0xE0, # OOO OOO
0x1B, 0x80, # OO OOO
0x19, 0x80, # OO OO
0x38, 0xC0, # OOO OO
0x30, 0xF8, # OO OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1344 'S' (15 pixels wide)
0x00, 0x00, #
0x00, 0x3E, # OOOOO
0x00, 0x6E, # OO OOO
0x00, 0x6E, # OO OOO
0x00, 0x60, # OO
0x00, 0x70, # OOO
0x7E, 0x30, # OOOOOO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0x60, 0x60, # OO OO
0x3F, 0x80, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1376 'T' (15 pixels wide)
0x00, 0x00, #
0x3F, 0xFE, # OOOOOOOOOOOOO
0x7F, 0xFC, # OOOOOOOOOOOOO
0xC0, 0x60, # OO OO
0x9C, 0xE0, # O OOO OOO
0xFC, 0xC0, # OOOOOO OO
0x78, 0xC0, # OOOO OO
0x01, 0xC0, # OOO
0x01, 0x80, # OO
0x71, 0x80, # OOO OO
0xFF, 0x00, # OOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1408 'U' (12 pixels wide)
0x00, 0x00, #
0x7C, 0x60, # OOOOO OO
0xFC, 0xF0, # OOOOOO OOOO
0x18, 0x70, # OO OOO
0x18, 0x30, # OO OO
0x18, 0x60, # OO OO
0x30, 0x60, # OO OO
0x30, 0xC0, # OO OO
0x31, 0xC0, # OO OOO
0x3F, 0x80, # OOOOOOO
0x1F, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1440 'V' (13 pixels wide)
0x00, 0x00, #
0x7C, 0x38, # OOOOO OOO
0xFC, 0x38, # OOOOOO OOO
0x0C, 0x38, # OO OOO
0x1C, 0x30, # OOO OO
0x1C, 0x60, # OOO OO
0x18, 0xC0, # OO OO
0x19, 0x80, # OO OO
0x1B, 0x00, # OO OO
0x1E, 0x00, # OOOO
0x1C, 0x00, # OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1472 'W' (17 pixels wide)
0x00, 0x00, 0x00, #
0x7C, 0x61, 0x80, # OOOOO OO OO
0xFC, 0xE3, 0x00, # OOOOOO OOO OO
0x0C, 0xE7, 0x00, # OO OOO OOO
0x0D, 0xE6, 0x00, # OO OOOO OO
0x0D, 0xEC, 0x00, # OO OOOO OO
0x0F, 0x6C, 0x00, # OOOO OO OO
0x1A, 0x78, 0x00, # OO O OOOO
0x1E, 0x70, 0x00, # OOOO OOO
0x1C, 0x70, 0x00, # OOO OOO
0x1C, 0x60, 0x00, # OOO OO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @1520 'X' (14 pixels wide)
0x00, 0x00, #
0x3E, 0x1C, # OOOOO OOO
0x7F, 0x3C, # OOOOOOO OOOO
0x03, 0x7C, # OO OOOOO
0x03, 0xC0, # OOOO
0x03, 0x80, # OOO
0x03, 0x80, # OOO
0x07, 0x80, # OOOO
0xFD, 0x80, # OOOOOO OO
0xF1, 0x80, # OOOO OO
0xE0, 0xF8, # OOO OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1552 'Y' (14 pixels wide)
0x00, 0x00, #
0xFF, 0x0C, # OOOOOOOO OO
0xE3, 0x1C, # OOO OO OOO
0xE6, 0x38, # OOO OO OOO
0x06, 0x78, # OO OOOO
0x06, 0xF8, # OO OOOOO
0x0F, 0xB0, # OOOOO OO
0x0F, 0x30, # OOOO OO
0x00, 0x30, # OO
0x00, 0x60, # OO
0x1F, 0xE0, # OOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1584 'Z' (15 pixels wide)
0x00, 0x00, #
0x0F, 0xF0, # OOOOOOOO
0x1F, 0xF0, # OOOOOOOOO
0x01, 0xC0, # OOO
0x0E, 0x00, # OOO
0x03, 0x00, # OO
0x0F, 0xFE, # OOOOOOOOOOO
0x39, 0x80, # OOO OO
0x71, 0x80, # OOO OO
0xE3, 0x00, # OOO OO
0xC3, 0x00, # OO OO
0xCE, 0x00, # OO OOO
0x78, 0x00, # OOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1616 '[' (7 pixels wide)
0x0E, # OOO
0x18, # OO
0x18, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0xE0, # OOO
0xC0, # OO
0xE0, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
# @1632 '\' (6 pixels wide)
0xC0, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x18, # OO
0x18, # OO
0x18, # OO
0x0C, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1648 ']' (7 pixels wide)
0x0E, # OOO
0x06, # OO
0x06, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x18, # OO
0x18, # OO
0x18, # OO
0x38, # OOO
0x30, # OO
0xF0, # OOOO
0x00, #
0x00, #
0x00, #
0x00, #
# @1664 '^' (6 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x18, # OO
0x38, # OOO
0x68, # OO O
0xCC, # OO OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1680 '_' (17 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x7F, 0xFF, 0x80, # OOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x00, # OOOOOOOOOOOOOOOO
# @1728 '`' (3 pixels wide)
0x00, #
0x00, #
0x00, #
0xC0, # OO
0x60, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1744 'a' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x7C, 0x00, # OOOOO
0xEC, 0x00, # OOO OO
0xCC, 0x00, # OO OO
0xDC, 0x00, # OO OOO
0x7F, 0x80, # OOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1776 'b' (13 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x18, 0x00, # OO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x67, 0xF8, # OO OOOOOOOO
0x66, 0x00, # OO OO
0x66, 0x00, # OO OO
0xEC, 0x00, # OOO OO
0xF8, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1808 'c' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x7E, 0x00, # OOOOOO
0xE6, 0x00, # OOO OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0x7F, 0x80, # OOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1840 'd' (10 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x03, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x3C, 0x00, # OOOO
0xEC, 0x00, # OOO OO
0xCC, 0x00, # OO OO
0xCC, 0x00, # OO OO
0xFF, 0xC0, # OOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1872 'e' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x7C, 0x00, # OOOOO
0xE6, 0x00, # OOO OO
0xC6, 0x00, # OO OO
0xC0, 0x00, # OO
0x7F, 0x80, # OOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1904 'f' (12 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x0C, 0x00, # OO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x7F, 0xF0, # OOOOOOOOOOO
0x3C, 0x00, # OOOO
0x6C, 0x00, # OO OO
0x6C, 0x00, # OO OO
0x6C, 0x00, # OO OO
0xD8, 0x00, # OO OO
0xD0, 0x00, # OO O
0xE0, 0x00, # OOO
0x00, 0x00, #
0x00, 0x00, #
# @1936 'g' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x3E, 0x00, # OOOOO
0xE6, 0x00, # OOO OO
0xC6, 0x00, # OO OO
0xCC, 0x00, # OO OO
0x7F, 0x80, # OOOOOOOO
0x6C, 0x00, # OO OO
0x6C, 0x00, # OO OO
0x78, 0x00, # OOOO
0x70, 0x00, # OOO
0x00, 0x00, #
# @1968 'h' (10 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x18, 0x00, # OO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x66, 0x00, # OO OO
0x6E, 0x00, # OO OOO
0x76, 0x00, # OOO OO
0xEE, 0x00, # OOO OOO
0xCF, 0xC0, # OO OOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2000 'i' (6 pixels wide)
0x00, #
0x00, #
0x38, # OOO
0x38, # OOO
0x38, # OOO
0x00, #
0x60, # OO
0x60, # OO
0x60, # OO
0xE0, # OOO
0xFC, # OOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @2016 'j' (8 pixels wide)
0x00, #
0x00, #
0x07, # OOO
0x07, # OOO
0x07, # OOO
0x00, #
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x1C, # OOO
0x7F, # OOOOOOO
0xD8, # OO OO
0xD8, # OO OO
0xF0, # OOOO
0x00, #
0x00, #
# @2032 'k' (10 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x18, 0x00, # OO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x34, 0x00, # OO O
0x6E, 0x00, # OO OOO
0x6E, 0x00, # OO OOO
0x76, 0x00, # OOO OO
0xEE, 0x00, # OOO OOO
0xCF, 0xC0, # OO OOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2064 'l' (6 pixels wide)
0x00, #
0x00, #
0x18, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0xE0, # OOO
0xFC, # OOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @2080 'm' (14 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x67, 0x40, # OO OOO O
0x6E, 0xC0, # OO OOO OO
0x77, 0xC0, # OOO OOOOO
0xEE, 0xC0, # OOO OOO OO
0xCC, 0xFC, # OO OO OOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2112 'n' (10 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x66, 0x00, # OO OO
0x6E, 0x00, # OO OOO
0x76, 0x00, # OOO OO
0xEE, 0x00, # OOO OOO
0xCF, 0xC0, # OO OOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2144 'o' (13 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x7F, 0xF8, # OOOOOOOOOOOO
0xE6, 0x00, # OOO OO
0xCE, 0x00, # OO OOO
0xCE, 0x00, # OO OOO
0x7C, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2176 'p' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x18, 0x00, # OO
0x18, 0x00, # OO
0x3F, 0x00, # OOOOOO
0x33, 0x80, # OO OOO
0x31, 0x80, # OO OO
0x73, 0x00, # OOO OO
0x67, 0xE0, # OO OOOOOO
0x60, 0x00, # OO
0x60, 0x00, # OO
0xC0, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
# @2208 'q' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x7C, 0x00, # OOOOO
0xEC, 0x00, # OOO OO
0xCC, 0x00, # OO OO
0xDC, 0x00, # OO OOO
0xFF, 0x80, # OOOOOOOOO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x10, 0x00, # O
0x00, 0x00, #
0x00, 0x00, #
# @2240 'r' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x60, 0x00, # OO
0xC0, 0x00, # OO
0xF8, 0x00, # OOOOO
0x58, 0x00, # O OO
0x98, 0x00, # O OO
0xB8, 0x00, # O OOO
0x3F, 0x80, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2272 's' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x08, 0x00, # O
0x18, 0x00, # OO
0x18, 0x00, # OO
0x1C, 0x00, # OOO
0x2C, 0x00, # O OO
0xEC, 0x00, # OOO OO
0xFF, 0x80, # OOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2304 't' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x10, # O
0x30, # OO
0x70, # OOO
0xF8, # OOOOO
0x60, # OO
0x60, # OO
0xE0, # OOO
0xFE, # OOOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @2320 'u' (10 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x66, 0x00, # OO OO
0x66, 0x00, # OO OO
0x66, 0x00, # OO OO
0xCC, 0x00, # OO OO
0xFF, 0xC0, # OOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2352 'v' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xC7, 0xE0, # OO OOOOOO
0xC8, 0x00, # OO O
0xD0, 0x00, # OO O
0xE0, 0x00, # OOO
0xC0, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2384 'w' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x66, 0x60, # OO OO OO
0x66, 0x60, # OO OO OO
0x66, 0x60, # OO OO OO
0xCC, 0xC0, # OO OO OO
0xFF, 0xC0, # OOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2416 'x' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x76, # OOO OO
0x3C, # OOOO
0x18, # OO
0x6C, # OO OO
0xCE, # OO OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @2432 'y' (10 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x66, 0x00, # OO OO
0x66, 0x00, # OO OO
0x66, 0x00, # OO OO
0xCC, 0x00, # OO OO
0xFF, 0xC0, # OOOOOOOOOO
0x6C, 0x00, # OO OO
0x6C, 0x00, # OO OO
0x78, 0x00, # OOOO
0x70, 0x00, # OOO
0x00, 0x00, #
# @2464 'z' (8 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xFC, # OOOOOO
0x18, # OO
0x30, # OO
0xE0, # OOO
0xFF, # OOOOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @2480 '{' (7 pixels wide)
0x0E, # OOO
0x18, # OO
0x18, # OO
0x30, # OO
0x30, # OO
0x40, # O
0x60, # OO
0x60, # OO
0x60, # OO
0xC0, # OO
0xC0, # OO
0xE0, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
# @2496 '|' (1 pixels wide)
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
# @2512 '}' (7 pixels wide)
0x0E, # OOO
0x06, # OO
0x06, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x18, # OO
0x18, # OO
0x38, # OOO
0x30, # OO
0xE0, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
# @2528 '~' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x38, 0x80, # OOO O
0x7F, 0x00, # OOOOOOO
0x8E, 0x00, # O OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2560 '°' (4 pixels wide)
0x60, # OO
0x90, # O O
0x90, # O O
0x60, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
)
descriptors = (
(6,0),# !
(6,16),# "
(11,32),# #
(12,64),# $
(9,96),# %
(11,128),# &
(3,160),# '
(6,176),# (
(6,192),# )
(5,208),# *
(9,224),# +
(3,256),# ,
(6,272),# -
(3,288),# .
(7,304),# /
(10,320),# 0
(5,352),# 1
(10,368),# 2
(9,400),# 3
(10,432),# 4
(9,464),# 5
(9,496),# 6
(8,528),# 7
(9,544),# 8
(8,576),# 9
(4,592),# :
(4,608),# ;
(8,624),# <
(7,640),# =
(8,656),# >
(8,672),# ?
(11,688),# @
(21,720),# A
(13,768),# B
(10,800),# C
(13,832),# D
(11,864),# E
(14,896),# F
(11,928),# G
(21,960),# H
(11,1008),# I
(12,1040),# J
(13,1072),# K
(16,1104),# L
(17,1136),# M
(16,1184),# N
(13,1216),# O
(12,1248),# P
(12,1280),# Q
(13,1312),# R
(15,1344),# S
(15,1376),# T
(12,1408),# U
(13,1440),# V
(17,1472),# W
(14,1520),# X
(14,1552),# Y
(15,1584),# Z
(7,1616),# [
(6,1632),# \
(7,1648),# ]
(6,1664),# ^
(17,1680),# _
(3,1728),# `
(9,1744),# a
(13,1776),# b
(9,1808),# c
(10,1840),# d
(9,1872),# e
(12,1904),# f
(9,1936),# g
(10,1968),# h
(6,2000),# i
(8,2016),# j
(10,2032),# k
(6,2064),# l
(14,2080),# m
(10,2112),# n
(13,2144),# o
(11,2176),# p
(9,2208),# q
(9,2240),# r
(9,2272),# s
(7,2304),# t
(10,2320),# u
(11,2352),# v
(11,2384),# w
(7,2416),# x
(10,2432),# y
(8,2464),# z
(7,2480),# {
(1,2496),# |
(7,2512),# }
(9,2528),# ~
(4,2560),# °
)
kerning = (
(3,5,3,3,5,3,5,4,2,5,4,3,3,3,3,4,5,4,4,4,3,4,5,3,5,3,2,4,4,3,6,4,3,5,4,5,3,5,4,3,5,5,5,3,5,3,5,5,3,5,3,5,5,5,5,4,6,3,3,5,1,2,0,5,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,2,3,4,3,3,3,3,3,3,3,3,3,6,1,3,6,),
(3,5,2,2,5,2,5,4,2,5,1,3,0,3,1,4,5,4,4,4,3,3,5,3,5,2,2,1,0,3,6,4,0,5,3,5,2,5,3,0,5,5,5,0,5,0,5,5,3,5,0,5,5,5,5,4,6,2,3,5,1,0,0,5,0,3,0,0,0,2,0,3,3,0,3,3,0,0,0,0,0,0,0,2,0,0,0,0,0,0,3,6,1,0,6,),
(9,10,10,8,9,10,10,10,7,11,9,8,9,8,8,11,9,9,9,10,9,10,10,10,11,9,9,10,9,8,10,10,8,10,10,10,10,11,10,9,11,11,10,7,10,7,10,10,9,10,10,11,10,10,10,9,10,8,9,9,7,10,0,10,10,9,10,10,10,8,10,9,9,6,9,9,9,9,10,8,10,10,7,9,9,10,9,8,9,8,9,11,7,9,10,),
(10,12,9,9,11,9,12,10,8,12,9,9,9,9,8,10,11,11,11,10,10,9,12,10,11,9,8,9,9,10,12,10,9,12,9,12,9,11,9,9,11,11,12,8,12,8,12,12,9,12,9,11,12,12,12,11,12,9,9,11,7,9,0,11,9,9,9,9,9,8,9,9,10,7,9,9,9,9,9,8,9,9,9,9,9,9,9,8,9,9,9,12,7,9,12,),
(8,8,7,8,8,8,8,8,7,8,6,7,8,8,8,8,8,8,8,7,8,8,8,8,7,8,7,6,8,8,9,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,8,8,8,6,4,0,7,8,8,8,8,8,7,8,8,8,7,8,8,8,8,8,7,8,8,8,8,8,8,8,8,8,8,8,9,6,8,9,),
(11,10,10,10,11,10,10,10,10,10,6,10,7,11,11,9,11,11,10,8,10,9,10,9,10,11,10,7,7,11,10,9,11,10,9,10,10,11,8,11,11,11,10,11,10,11,10,10,11,10,9,11,10,10,10,11,10,11,11,9,9,6,0,10,10,11,10,11,10,10,10,11,11,10,11,11,11,11,10,10,11,9,11,11,11,11,11,11,11,11,11,11,9,7,10,),
(0,2,0,0,2,0,2,1,0,2,0,0,0,0,0,1,2,1,1,1,0,0,2,0,2,0,0,0,0,0,3,1,0,2,0,2,0,2,0,0,2,2,2,0,2,0,2,2,0,2,0,2,2,2,2,1,3,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,3,),
(2,4,2,2,4,2,4,2,3,6,2,3,2,3,2,3,4,3,3,3,2,2,4,2,3,2,3,2,2,2,5,2,2,4,2,4,2,3,2,2,3,3,4,2,4,2,4,4,2,4,2,3,4,4,4,3,5,3,3,6,3,2,0,3,2,2,2,2,2,3,2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,6,3,2,5,),
(5,6,5,4,5,5,6,6,2,6,6,3,5,5,4,6,5,5,5,6,5,6,6,6,6,5,5,6,6,4,6,6,4,6,6,6,5,6,6,5,6,6,6,3,6,3,6,6,4,6,5,6,6,6,6,5,6,4,5,5,3,5,0,6,5,5,5,5,5,5,5,5,5,2,5,5,5,5,5,4,5,6,4,6,5,6,5,5,5,6,5,6,2,5,6,),
(3,5,4,2,4,2,5,4,2,5,1,2,0,2,1,5,4,4,4,3,3,4,5,3,5,1,1,3,0,3,5,4,0,5,4,5,2,5,4,0,5,5,5,0,5,0,5,5,3,5,0,5,5,5,5,4,5,2,3,5,1,2,0,4,0,3,0,0,0,2,0,3,3,0,3,3,0,0,0,2,0,4,1,3,0,0,0,0,0,0,3,5,1,0,5,),
(8,7,6,5,6,7,7,9,5,7,9,6,5,6,6,9,7,6,6,8,8,9,7,8,9,7,7,9,9,5,7,9,4,7,9,7,7,9,9,5,9,9,7,3,7,4,7,7,7,7,7,9,7,7,7,6,7,4,7,7,5,7,0,6,7,7,7,6,7,7,6,7,7,4,7,7,7,7,7,6,7,9,6,8,7,8,7,7,7,8,8,9,5,6,7,),
(3,0,2,3,2,3,0,3,1,0,1,2,0,3,2,2,3,2,3,0,3,2,1,3,0,3,2,0,3,2,2,2,3,1,2,2,3,2,2,3,2,2,1,2,2,2,0,2,2,1,2,2,1,0,0,3,0,3,3,0,1,0,0,0,3,3,3,3,3,2,3,3,3,1,3,3,3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,3,1,0,0,),
(5,0,6,4,3,6,3,6,3,1,3,3,5,5,4,6,5,3,5,6,5,6,3,6,4,5,5,6,5,3,4,6,4,4,6,4,6,0,6,5,0,0,4,0,4,2,4,4,0,3,6,0,4,3,3,5,2,4,5,3,3,6,0,3,6,5,6,6,6,4,6,5,5,2,5,5,5,5,6,4,6,5,3,5,5,6,5,4,5,3,5,6,3,5,2,),
(3,0,2,3,3,3,0,3,2,0,1,2,3,3,3,3,3,3,3,0,3,3,2,3,1,3,2,1,3,3,2,3,3,2,3,2,3,3,3,3,3,3,2,3,2,3,1,2,3,1,3,3,1,0,0,3,0,3,3,0,1,0,0,0,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,0,),
(4,6,4,3,6,3,6,4,3,6,4,4,2,4,1,5,6,5,5,5,4,4,6,4,5,3,3,4,4,4,7,4,2,6,4,6,3,5,4,2,5,5,6,1,6,1,6,6,3,6,3,5,6,6,6,5,7,3,4,6,2,3,0,5,3,3,3,3,3,3,3,3,4,1,3,3,3,3,3,2,3,4,2,4,3,4,3,3,3,4,4,7,2,2,7,),
(9,10,10,8,9,10,10,10,7,10,10,7,9,9,8,10,9,9,9,10,9,10,10,10,10,9,9,10,10,8,10,10,8,9,10,9,10,10,10,9,10,10,9,7,9,7,9,9,8,9,10,10,9,9,9,9,10,8,9,9,7,10,0,10,10,9,10,10,10,9,10,9,9,6,9,9,9,9,10,8,10,10,8,10,9,10,9,9,9,10,9,10,7,9,9,),
(3,5,3,3,4,3,5,4,1,5,4,2,3,3,2,4,4,4,4,3,3,4,5,3,4,3,2,4,4,3,5,4,3,5,4,5,3,4,4,3,4,4,5,2,5,2,5,5,2,5,3,4,5,5,5,4,5,3,3,4,1,3,0,4,3,3,3,3,3,2,3,3,3,1,3,3,3,3,3,2,3,4,3,3,3,3,3,2,3,3,3,5,1,3,5,),
(10,10,9,10,9,10,10,10,8,10,9,9,6,10,9,10,10,9,10,9,10,9,10,10,10,10,9,9,10,9,10,9,10,10,9,10,10,10,9,10,10,10,10,9,10,9,10,10,9,10,9,10,10,10,10,10,10,10,10,9,8,8,0,10,10,10,10,10,10,9,10,10,10,8,10,10,10,10,10,9,10,10,10,10,10,10,10,9,10,10,10,10,8,7,10,),
(9,8,9,8,8,9,8,9,6,8,9,7,9,9,8,9,8,7,9,9,9,9,8,9,8,8,8,9,9,7,9,9,8,8,9,8,9,8,9,9,8,8,8,7,8,7,8,8,7,8,9,8,8,8,8,9,9,8,8,8,6,9,0,7,9,8,9,9,9,8,9,8,8,5,8,8,8,8,9,7,9,9,8,9,8,9,8,8,8,9,8,9,6,9,9,),
(9,9,9,7,9,9,9,10,6,9,10,7,8,7,7,10,9,8,8,10,7,10,9,10,9,9,9,10,10,7,10,10,7,9,10,9,9,9,10,8,9,9,9,6,9,6,9,9,7,9,9,9,9,9,9,8,10,7,9,9,7,9,0,9,9,9,9,9,9,9,9,9,9,6,9,9,9,9,9,8,9,10,7,10,9,10,9,9,9,10,9,10,6,8,10,),
(9,8,9,8,8,9,8,9,6,8,9,7,9,9,8,9,8,7,9,9,9,9,8,9,8,8,8,9,9,7,9,9,8,8,9,8,9,8,9,9,8,8,8,7,8,7,8,8,7,8,9,8,8,8,8,9,9,8,8,8,6,9,0,6,9,8,9,9,9,8,9,8,8,5,8,8,8,8,9,7,9,9,8,9,8,9,8,8,8,9,8,9,6,9,9,),
(9,5,9,8,7,9,6,9,6,5,9,7,9,9,8,9,8,7,9,9,9,9,8,9,8,8,8,9,9,7,8,9,8,7,9,7,9,8,9,9,8,8,7,7,7,7,7,7,7,6,9,8,7,6,6,9,6,8,8,7,6,9,0,6,9,8,9,9,9,8,9,8,8,6,8,8,8,8,9,7,9,9,8,9,8,9,8,8,8,9,8,9,6,9,6,),
(6,7,5,4,7,6,7,7,4,7,7,5,4,5,4,7,7,6,6,7,5,7,7,7,7,6,6,7,7,5,8,7,4,7,7,7,6,7,7,4,7,7,7,3,7,3,7,7,5,7,6,7,7,7,7,6,8,4,6,7,4,6,0,7,6,6,6,5,6,6,5,6,6,3,6,6,6,6,6,5,6,7,4,7,6,7,6,6,6,7,6,8,3,5,8,),
(8,9,8,7,8,8,9,8,5,9,8,6,8,8,7,9,8,8,8,8,8,8,9,8,9,7,7,8,8,7,9,8,7,9,8,9,8,9,8,8,9,9,9,6,9,6,9,9,7,9,8,9,9,9,9,8,9,7,7,8,5,8,0,9,8,7,8,8,8,7,8,7,7,5,7,7,7,7,8,6,8,8,7,8,7,8,7,7,7,8,7,9,5,8,9,),
(7,8,7,5,7,7,8,8,4,8,8,5,6,6,5,8,7,7,6,8,7,8,8,8,8,7,7,8,8,6,8,8,5,7,8,7,7,8,8,6,8,8,7,4,7,4,7,7,6,7,7,8,7,7,7,6,8,5,7,7,5,7,0,8,7,7,7,7,7,7,7,7,7,4,7,7,7,7,7,6,7,8,5,8,7,8,7,7,7,8,7,8,4,6,7,),
(3,0,4,3,3,4,1,4,2,0,4,2,3,3,3,4,3,3,3,4,3,4,3,4,3,3,3,4,4,3,3,4,3,2,4,2,4,3,4,3,3,3,2,3,2,3,2,2,3,1,4,3,2,1,1,3,0,3,3,2,1,4,0,1,4,3,4,4,4,3,4,3,3,2,3,3,3,3,4,2,4,4,3,4,3,4,3,3,3,4,3,4,1,3,0,),
(3,0,4,3,2,4,1,4,1,0,4,2,3,3,2,4,3,2,3,4,3,4,3,4,3,3,3,4,4,2,3,4,3,2,4,2,4,3,4,3,3,3,2,2,2,2,2,2,2,1,4,3,2,1,1,3,0,3,3,2,1,4,0,1,4,3,4,4,4,3,4,3,3,1,3,3,3,3,4,2,4,4,3,4,3,4,3,3,3,4,3,4,1,3,0,),
(6,8,5,6,7,6,8,6,5,8,4,5,5,6,6,6,7,7,7,6,6,5,8,6,7,6,5,4,6,6,8,6,6,8,5,8,6,7,5,6,7,7,8,6,8,6,8,8,6,8,5,7,8,8,8,7,8,6,6,7,4,4,0,7,6,6,6,6,6,5,6,6,6,5,6,6,6,6,6,5,6,6,6,6,6,6,6,6,6,6,6,8,4,5,8,),
(6,1,5,6,5,6,4,7,4,2,7,5,6,6,5,7,6,5,6,6,6,7,5,6,7,6,5,7,7,5,5,7,6,4,7,5,6,7,7,6,7,7,4,5,5,5,5,5,5,4,6,7,4,4,3,6,2,6,6,5,4,5,0,4,6,6,6,6,6,5,6,6,6,3,6,6,6,6,6,5,6,7,6,6,6,6,6,5,6,6,6,7,4,6,3,),
(7,5,8,5,5,8,5,8,5,6,8,5,7,6,6,8,7,4,6,8,7,8,7,8,8,7,7,8,8,4,7,8,6,6,8,6,8,8,8,7,8,8,6,3,6,4,6,6,6,5,8,8,6,5,5,6,5,6,7,6,5,8,0,5,8,7,8,8,8,7,8,7,7,4,7,7,7,7,8,6,8,8,5,8,7,8,7,7,7,8,7,8,5,7,4,),
(6,8,7,5,7,5,8,7,4,8,6,5,4,5,4,8,7,7,7,6,6,7,8,6,8,4,4,6,6,6,8,7,4,8,7,8,5,8,7,4,8,8,8,4,8,4,8,8,6,8,4,8,8,8,8,7,8,5,6,7,4,5,0,8,4,6,4,4,4,5,4,6,6,3,6,6,4,4,4,5,4,7,4,6,4,4,4,4,4,4,6,8,4,4,8,),
(10,11,10,9,10,10,11,11,7,11,11,8,9,9,8,11,10,10,9,11,10,11,11,11,11,10,10,11,11,9,11,11,9,10,11,10,10,11,11,9,11,11,10,8,10,8,10,10,9,10,10,11,10,10,10,9,11,9,10,10,8,10,0,11,10,10,10,10,10,10,10,10,10,7,10,10,10,10,10,9,10,11,9,11,10,11,10,10,10,11,10,11,7,9,10,),
(20,16,19,17,18,20,18,21,17,16,21,18,15,18,18,21,20,15,15,21,15,21,20,21,20,20,20,21,21,15,20,21,17,18,21,19,20,20,21,18,20,20,18,13,19,16,19,19,18,18,20,20,19,18,17,15,17,17,20,19,18,20,4,18,20,20,20,19,20,20,19,20,20,17,20,20,20,20,20,19,20,21,18,21,20,21,20,20,20,21,20,21,17,19,17,),
(11,13,11,10,12,10,13,12,9,13,11,10,10,10,9,12,12,12,12,11,11,11,13,11,13,9,9,11,11,11,13,12,9,13,11,13,10,13,11,10,13,13,13,8,13,8,13,13,11,13,10,13,13,13,13,12,13,10,11,12,9,10,0,13,10,11,10,10,10,10,10,11,11,8,11,11,9,9,10,9,10,11,9,10,9,10,9,9,9,10,11,13,9,10,13,),
(10,10,9,9,10,9,10,9,9,10,6,9,4,10,10,9,10,10,9,8,9,8,10,8,10,10,9,6,3,10,10,9,10,10,8,10,9,10,8,10,10,10,10,10,10,10,10,10,10,10,8,10,10,10,10,10,10,10,10,9,8,4,0,10,9,10,9,10,9,9,9,10,10,9,10,10,10,10,9,9,10,8,10,10,10,10,10,10,10,10,10,10,8,2,10,),
(12,13,12,10,12,12,13,13,9,13,13,10,11,11,10,13,11,12,11,12,12,13,13,12,13,11,11,13,13,11,13,13,10,12,13,12,12,13,13,11,13,13,12,8,12,8,12,12,11,12,12,13,12,12,12,11,13,10,11,12,9,12,0,13,12,11,12,12,12,11,12,11,11,8,11,11,11,11,12,10,12,13,10,12,11,12,11,11,11,12,12,13,9,11,12,),
(11,11,10,10,11,10,11,10,10,11,7,10,5,11,11,10,11,11,10,9,10,9,11,9,11,11,10,7,6,11,11,10,11,11,9,11,10,11,9,11,11,11,11,11,11,11,11,11,11,11,9,11,11,11,11,11,11,11,11,10,9,5,0,11,10,11,10,11,10,10,10,11,11,10,11,11,11,11,10,10,11,9,11,11,11,11,11,11,11,11,11,11,9,2,11,),
(11,13,10,10,13,11,13,12,10,13,12,11,9,11,9,12,13,12,12,12,11,12,13,12,12,11,11,12,12,11,14,12,8,13,12,13,11,12,12,9,12,12,13,7,13,8,13,13,10,13,11,12,13,13,13,12,14,10,11,13,9,11,0,11,11,11,11,10,11,11,10,11,11,8,11,11,11,11,11,10,11,12,9,12,11,12,11,11,11,12,11,14,9,10,14,),
(10,11,10,9,10,10,11,10,9,11,10,9,10,10,9,10,10,10,10,10,10,10,11,10,11,9,9,10,10,9,11,10,9,11,10,11,10,11,10,10,11,11,11,9,11,9,11,11,9,11,10,11,11,11,11,10,11,9,9,10,9,10,0,11,10,9,10,10,10,9,10,9,9,9,9,9,9,9,10,8,10,10,9,10,9,10,9,9,9,10,9,11,9,10,11,),
(20,16,19,17,18,20,18,21,17,16,21,18,15,18,18,21,20,15,15,21,15,21,20,21,20,20,20,21,21,15,20,21,17,18,21,19,20,20,21,18,20,20,18,13,19,16,19,19,18,18,20,20,19,18,17,15,17,17,20,19,18,20,4,18,20,20,20,19,20,20,19,20,20,17,20,20,20,20,20,19,20,21,18,21,20,21,20,20,20,21,20,21,17,19,17,),
(9,11,9,8,10,9,11,10,7,11,10,8,9,9,8,10,10,10,10,9,9,10,11,9,10,8,8,10,10,9,11,10,8,11,10,11,9,10,10,9,10,10,11,7,11,7,11,11,8,11,9,10,11,11,11,10,11,8,8,10,6,9,0,10,9,8,9,9,9,8,9,8,9,6,8,8,8,8,9,7,9,10,8,9,8,9,8,8,8,9,9,11,6,9,11,),
(9,11,9,8,11,9,11,10,8,11,10,9,9,9,8,10,11,10,10,10,9,10,11,9,11,8,8,10,10,9,12,10,8,11,10,11,9,11,10,9,11,11,11,7,11,7,11,11,9,11,9,11,11,11,11,10,12,8,9,11,7,9,0,11,9,9,9,9,9,8,9,9,9,6,9,9,8,8,9,7,9,10,8,9,8,9,8,8,8,9,9,12,7,9,12,),
(12,12,11,11,12,11,12,11,11,12,10,11,10,12,12,10,12,12,11,11,11,10,12,10,11,12,11,10,10,12,13,10,12,12,10,12,11,12,10,12,12,12,12,12,12,12,12,12,12,12,10,12,12,12,12,12,13,12,12,12,10,10,0,11,11,12,11,12,11,11,11,12,12,11,12,12,12,12,11,11,12,10,12,12,12,12,12,12,12,12,12,13,10,10,13,),
(15,16,14,14,15,14,16,15,14,16,12,14,10,15,15,15,15,15,15,14,14,14,16,14,16,15,14,12,10,15,16,15,15,16,14,16,14,16,14,15,16,16,16,15,16,15,16,16,15,16,13,16,16,16,16,15,16,15,15,15,13,10,0,16,14,15,14,15,14,14,14,15,15,14,15,15,15,15,14,14,15,13,15,15,15,15,15,15,15,15,15,16,13,9,16,),
(17,11,16,16,17,16,14,16,16,15,14,16,14,17,17,15,17,17,16,14,16,15,16,15,15,17,16,14,14,17,16,14,17,16,15,16,16,17,14,17,17,17,16,17,16,17,13,16,17,15,15,17,14,14,14,17,14,17,17,13,15,14,0,14,16,17,16,17,16,16,16,17,17,16,17,17,17,17,16,16,17,15,17,17,17,17,17,17,17,17,17,17,15,14,13,),
(14,16,13,13,15,13,16,15,12,16,13,13,12,13,12,15,15,15,15,14,14,14,16,14,16,12,12,13,13,14,16,15,12,16,14,16,13,16,14,12,16,16,16,12,16,12,16,16,14,16,13,16,16,16,16,15,16,13,14,15,12,13,0,16,13,14,13,13,13,13,13,14,14,11,14,14,12,12,13,11,13,13,12,13,12,13,12,12,12,13,14,16,12,12,16,),
(12,13,12,10,12,12,13,13,9,13,13,10,11,11,10,13,12,12,11,13,12,13,13,13,13,12,12,13,13,11,13,13,10,12,13,12,12,13,13,11,13,13,12,9,12,9,12,12,11,12,12,13,12,12,12,11,13,10,12,12,10,12,0,13,12,12,12,12,12,12,12,12,12,9,12,12,12,12,12,11,12,13,10,13,12,13,12,12,12,13,12,13,9,11,12,),
(10,12,11,9,11,9,12,11,8,12,11,9,7,9,8,12,11,11,11,10,10,11,12,10,12,9,9,11,11,10,12,11,6,12,11,12,9,12,11,7,12,12,12,4,12,7,12,12,10,12,9,12,12,12,12,11,12,9,10,11,8,9,0,12,9,10,9,8,9,9,8,10,10,7,10,10,9,9,9,9,9,11,8,10,9,10,9,9,9,10,10,12,8,8,12,),
(12,12,11,11,12,11,12,12,11,12,12,11,10,12,12,12,12,12,11,12,11,12,12,12,12,12,11,12,12,12,12,12,12,11,12,11,11,12,12,12,12,12,11,12,11,12,11,11,12,11,11,12,11,11,11,12,12,12,12,11,10,11,0,12,11,12,11,12,11,11,11,12,12,11,12,12,12,12,11,11,12,12,12,12,12,12,12,12,12,12,12,12,10,10,11,),
(13,13,12,12,13,12,13,12,12,13,12,12,9,13,13,13,13,13,12,11,12,12,13,11,13,13,12,12,12,13,13,12,13,13,12,13,12,13,12,13,13,13,13,13,13,13,13,13,13,13,11,13,13,13,13,13,13,13,13,12,11,10,0,13,12,13,12,13,12,12,12,13,13,12,13,13,13,13,12,12,13,12,13,13,13,13,13,13,13,13,13,13,11,9,13,),
(13,15,12,12,14,12,15,14,11,15,12,12,12,12,11,14,14,14,14,13,13,13,15,13,15,11,11,12,12,13,15,14,11,15,13,15,12,15,13,12,15,15,15,10,15,10,15,15,13,15,12,15,15,15,15,14,15,12,13,14,11,12,0,15,12,13,12,12,12,12,12,13,13,10,13,13,11,11,12,10,12,12,11,12,11,12,11,11,11,12,13,15,11,12,15,),
(12,14,11,11,14,11,14,12,11,14,10,12,9,12,9,12,14,13,13,13,12,11,14,12,13,11,11,10,10,12,15,12,9,14,11,14,11,13,11,9,13,13,14,8,14,9,14,14,11,14,10,13,14,14,14,13,15,11,12,14,10,10,0,12,10,11,10,10,10,10,10,11,12,9,11,11,9,9,10,8,10,10,9,10,9,10,9,9,9,10,12,15,10,9,15,),
(10,12,11,9,11,10,12,11,8,12,11,9,10,10,9,12,11,11,11,11,10,11,12,11,12,10,10,11,11,10,12,11,9,12,11,12,10,12,11,10,12,12,12,8,12,8,12,12,10,12,10,12,12,12,12,11,12,9,10,11,8,10,0,12,10,10,10,10,10,10,10,10,10,7,10,10,10,10,10,9,10,11,9,11,10,11,10,10,10,11,10,12,8,10,12,),
(11,13,11,10,12,10,13,12,9,13,11,10,8,10,9,12,12,12,12,11,11,11,13,11,13,9,9,11,11,11,13,12,7,13,11,13,10,13,11,8,13,13,13,6,13,7,13,13,11,13,9,13,13,13,13,12,13,10,11,12,9,9,0,13,9,11,9,9,9,10,9,11,11,8,11,11,9,9,9,9,9,11,8,10,9,10,9,9,9,10,11,13,9,8,13,),
(14,16,14,13,16,13,16,15,13,16,14,14,12,14,12,15,16,15,15,15,14,14,16,14,16,13,13,14,14,14,17,15,12,16,14,16,13,16,14,12,16,16,16,11,16,11,16,16,14,16,13,16,16,16,16,15,17,13,14,16,12,13,0,16,13,14,13,13,13,13,13,14,14,11,14,14,13,13,13,12,13,14,12,14,13,14,13,13,13,14,14,17,12,12,17,),
(13,14,12,12,13,12,14,13,12,14,10,12,9,13,13,13,13,13,13,12,12,12,14,12,14,13,12,10,9,13,14,13,13,14,12,14,12,14,12,13,14,14,14,13,14,13,14,14,13,14,11,14,14,14,14,13,14,13,13,13,11,9,0,14,12,13,12,13,12,12,12,13,13,12,13,13,13,13,12,12,13,11,13,13,13,13,13,13,13,13,13,14,11,9,14,),
(12,14,12,11,13,12,14,13,10,14,13,11,12,12,11,13,13,13,13,12,12,13,14,12,13,11,11,13,13,12,14,13,11,14,13,14,12,13,13,12,13,13,14,11,14,11,14,14,11,14,12,13,14,14,14,13,14,11,11,13,9,12,0,13,12,11,12,12,12,11,12,11,12,10,11,11,11,11,12,10,12,13,11,12,11,12,11,11,11,12,12,14,9,12,14,),
(14,12,13,11,12,14,12,15,11,12,15,12,9,12,12,15,14,11,11,15,10,15,14,15,14,14,14,15,15,10,14,15,11,12,15,13,14,14,15,12,14,14,12,8,13,10,13,13,12,12,14,14,13,12,12,11,12,11,14,13,12,14,0,12,14,14,14,13,14,14,13,14,14,11,14,14,14,14,14,13,14,15,12,15,14,15,14,14,14,15,14,15,11,13,12,),
(3,5,3,3,4,3,5,4,4,7,4,4,3,4,2,4,4,4,4,3,3,4,5,3,4,3,3,4,4,3,5,4,3,5,4,5,3,4,4,3,4,4,5,2,5,2,5,5,2,5,3,4,5,5,5,4,5,3,3,7,3,3,0,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,4,3,3,3,3,3,2,3,3,3,7,3,3,6,),
(6,3,5,5,6,5,3,5,5,4,4,5,5,6,6,5,6,6,5,5,5,5,5,5,4,6,5,5,5,6,5,5,6,5,5,5,5,6,5,6,6,6,5,6,5,6,3,5,6,4,5,6,3,3,3,6,3,6,6,2,4,5,0,3,5,6,5,6,5,5,5,6,6,5,6,6,6,6,5,5,6,5,6,6,6,6,6,6,6,6,6,6,4,5,3,),
(5,7,5,5,6,5,7,6,4,7,6,4,5,5,4,6,6,6,6,5,5,6,7,5,6,5,4,6,6,5,7,6,5,7,6,7,5,6,6,5,6,6,7,4,7,4,7,7,4,7,5,6,7,7,7,6,7,5,5,7,4,5,0,6,5,5,5,5,5,4,5,5,5,4,5,5,5,5,5,4,5,6,5,5,5,5,5,4,5,5,5,7,4,5,7,),
(4,0,6,2,3,6,3,6,3,5,5,3,5,3,4,6,5,1,2,6,4,6,4,6,5,5,5,6,5,2,4,6,4,4,6,4,6,5,6,5,5,5,4,0,4,2,4,4,3,3,6,5,4,3,3,1,2,4,5,3,3,6,0,4,6,5,6,6,6,4,6,5,5,2,5,5,5,5,6,4,6,5,3,5,5,6,5,4,5,5,5,6,3,5,2,),
(11,11,6,5,8,6,14,11,11,12,8,14,11,14,10,7,12,7,8,7,8,8,9,8,9,13,13,9,10,9,9,6,0,4,7,4,6,3,6,0,6,5,4,1,0,1,4,5,5,4,2,2,5,4,0,3,3,2,10,11,10,11,16,14,8,4,8,7,8,5,16,7,11,9,7,11,3,7,4,6,8,8,8,10,7,6,6,10,16,9,10,16,10,8,13,),
(1,2,2,0,1,0,2,2,0,3,0,0,0,0,0,3,1,1,0,0,1,2,2,1,3,0,0,1,0,0,2,2,0,0,2,0,0,3,2,0,3,3,0,0,0,0,0,0,1,0,0,3,0,0,0,0,2,0,1,1,0,0,0,2,0,1,0,0,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,1,0,0,0,0,0,0,1,3,0,0,1,),
(9,3,8,8,9,8,6,8,8,4,6,8,6,9,9,7,9,9,8,6,8,7,8,7,7,9,8,6,6,9,8,6,9,8,7,8,8,9,6,9,9,9,8,9,8,9,5,8,9,7,7,9,6,6,6,9,6,9,9,5,7,6,0,6,8,9,8,9,8,8,8,9,9,8,9,9,9,9,8,8,9,7,9,9,9,9,9,9,9,9,9,9,7,6,5,),
(12,7,11,9,10,12,10,13,9,8,13,10,7,10,10,13,12,7,7,13,7,13,12,13,12,12,12,13,13,7,12,13,9,10,13,11,12,12,13,10,12,12,10,5,11,8,11,11,10,10,12,12,11,10,9,7,9,9,12,11,10,12,0,10,12,12,12,11,12,12,11,12,12,9,12,12,12,12,12,11,12,13,10,13,12,13,12,12,12,13,12,13,9,11,9,),
(9,3,8,8,9,8,6,8,8,4,7,8,6,9,9,7,9,9,8,7,8,7,8,7,7,9,8,7,7,9,8,7,9,8,7,8,8,9,7,9,9,9,8,9,8,9,5,8,9,7,7,9,6,6,6,9,6,9,9,5,7,7,0,6,8,9,8,9,8,8,8,9,9,8,9,9,9,9,8,8,9,7,9,9,9,9,9,9,9,9,9,9,7,6,5,),
(10,8,9,9,10,9,8,9,9,8,7,9,6,10,10,8,10,10,9,6,9,8,9,8,8,10,9,7,7,10,9,7,10,9,8,9,9,10,7,10,10,10,9,10,9,10,8,9,10,8,8,10,8,8,8,10,8,10,10,7,8,6,0,7,9,10,9,10,9,9,9,10,10,9,10,10,10,10,9,9,10,8,10,10,10,10,10,10,10,10,10,10,8,6,8,),
(9,3,8,8,9,8,6,8,8,4,6,8,7,9,9,7,9,9,8,7,8,7,8,7,7,9,8,7,7,9,8,7,9,8,7,8,8,9,7,9,9,9,8,9,8,9,5,8,9,7,7,9,6,6,6,9,6,9,9,5,7,7,0,6,8,9,8,9,8,8,8,9,9,8,9,9,9,9,8,8,9,7,9,9,9,9,9,9,9,9,9,9,7,7,5,),
(11,6,10,8,9,11,9,12,8,7,12,9,6,9,9,12,11,6,6,12,6,12,11,12,11,11,11,12,12,6,11,12,8,9,12,10,11,11,12,9,11,11,9,6,10,7,10,10,9,9,11,11,10,9,8,6,8,8,11,10,9,11,0,9,11,11,11,10,11,11,10,11,11,8,11,11,11,11,11,10,11,12,9,12,11,12,11,11,11,12,11,12,8,10,8,),
(9,3,8,8,9,8,6,8,8,4,7,8,7,9,9,7,9,9,8,7,8,7,8,7,7,9,8,7,7,9,8,7,9,8,7,8,8,9,7,9,9,9,8,9,8,9,5,8,9,7,7,9,6,6,6,9,6,9,9,5,7,7,3,6,8,9,8,9,8,8,8,9,9,8,9,9,9,9,8,8,9,7,9,9,9,9,9,9,9,9,9,9,7,7,5,),
(10,5,9,9,10,9,7,9,9,5,7,9,7,10,10,8,10,10,9,7,9,8,9,8,8,10,9,7,7,10,9,7,10,9,8,9,9,10,7,10,10,10,9,10,9,10,6,9,10,8,8,10,7,7,7,10,7,10,10,6,8,7,0,7,9,10,9,10,9,9,9,10,10,9,10,10,10,10,9,9,10,8,10,10,10,10,10,10,10,10,10,10,8,7,6,),
(6,5,5,5,6,5,5,5,5,5,3,5,3,6,6,5,6,6,5,3,5,4,5,4,5,6,5,3,3,6,5,4,6,5,4,5,5,6,4,6,6,6,5,6,5,6,5,5,6,5,4,6,5,5,5,6,5,6,6,4,4,3,0,5,5,6,5,6,5,5,5,6,6,5,6,6,6,6,5,5,6,4,6,6,6,6,6,6,6,6,6,6,4,3,5,),
(8,8,7,7,8,7,8,7,7,8,6,7,6,8,8,8,8,8,7,6,7,7,8,6,8,8,7,6,6,8,8,7,8,8,7,8,7,8,7,8,8,8,8,8,8,8,8,8,8,8,6,8,8,8,8,8,8,8,8,7,6,6,0,8,7,8,7,8,7,7,7,8,8,7,8,8,8,8,7,7,8,7,8,8,8,8,8,8,8,8,8,8,6,6,8,),
(10,5,9,9,10,9,7,9,9,5,7,9,7,10,10,8,10,10,9,7,9,8,9,8,8,10,9,7,7,10,9,7,10,9,8,9,9,10,7,10,10,10,9,10,9,10,6,9,10,8,8,10,7,7,7,10,7,10,10,6,8,7,0,7,9,10,9,10,9,9,9,10,10,9,10,10,10,10,9,9,10,8,10,10,10,10,10,10,10,10,10,10,8,7,6,),
(6,5,5,5,6,5,5,5,5,5,4,5,3,6,6,4,6,6,5,3,5,4,5,4,4,6,5,4,4,6,5,4,6,5,4,5,5,6,4,6,6,6,5,6,5,6,5,5,6,5,4,6,5,5,5,6,5,6,6,4,4,3,0,4,5,6,5,6,5,5,5,6,6,5,6,6,6,6,5,5,6,4,6,6,6,6,6,6,6,6,6,6,4,3,5,),
(14,8,13,13,14,13,11,13,13,9,10,13,10,14,14,12,14,14,13,10,13,12,13,12,12,14,13,10,10,14,13,11,14,13,12,13,13,14,11,14,14,14,13,14,13,14,10,13,14,12,12,14,11,11,11,14,11,14,14,10,12,10,0,11,13,14,13,14,13,13,13,14,14,13,14,14,14,14,13,13,14,12,14,14,14,14,14,14,14,14,14,14,12,10,10,),
(10,4,9,9,10,9,7,9,9,5,7,9,7,10,10,8,10,10,9,7,9,8,9,8,8,10,9,7,7,10,9,7,10,9,8,9,9,10,7,10,10,10,9,10,9,10,6,9,10,8,8,10,7,7,7,10,7,10,10,6,8,7,0,7,9,10,9,10,9,9,9,10,10,9,10,10,10,10,9,9,10,8,10,10,10,10,10,10,10,10,10,10,8,7,6,),
(12,7,11,9,10,12,10,13,9,8,13,10,7,10,10,13,12,7,7,13,7,13,12,13,12,12,12,13,13,7,12,13,9,10,13,11,12,12,13,10,12,12,10,6,11,8,11,11,10,10,12,12,11,10,9,7,9,9,12,11,10,12,0,10,12,12,12,11,12,12,11,12,12,9,12,12,12,12,12,11,12,13,10,13,12,13,12,12,12,13,12,13,9,11,9,),
(11,5,10,10,11,10,8,10,10,6,8,10,9,11,11,9,11,11,10,9,10,9,10,9,9,11,10,9,9,11,10,9,11,10,9,10,10,11,9,11,11,11,10,11,10,11,7,10,11,9,9,11,8,8,8,11,8,11,11,7,9,9,0,8,10,11,10,11,10,10,10,11,11,10,11,11,11,11,10,10,11,9,11,11,11,11,11,11,11,11,11,11,9,9,7,),
(9,3,8,8,9,8,6,8,8,4,6,8,6,9,9,7,9,9,8,6,8,7,8,7,7,9,8,6,6,9,8,6,9,8,7,8,8,9,6,9,9,9,8,9,8,9,5,8,9,7,7,9,6,6,6,9,6,9,9,5,7,6,0,6,8,9,8,9,8,8,8,9,9,8,9,9,9,9,8,8,9,7,9,9,9,9,9,9,9,9,9,9,7,6,5,),
(9,3,8,8,9,8,6,8,8,4,5,8,5,9,9,7,9,9,8,5,8,7,8,7,7,9,8,5,5,9,8,6,9,8,7,8,8,9,6,9,9,9,8,9,8,9,5,8,9,7,7,9,6,6,6,9,6,9,9,5,7,5,0,6,8,9,8,9,8,8,8,9,9,8,9,9,9,9,8,8,9,7,9,9,9,9,9,9,9,9,9,9,7,5,5,),
(9,3,8,8,9,8,6,8,8,5,5,8,6,9,9,7,9,9,8,6,8,7,8,7,7,9,8,6,6,9,8,6,9,8,7,8,8,9,6,9,9,9,8,9,8,9,5,8,9,7,7,9,6,6,6,9,6,9,9,5,7,6,0,6,8,9,8,9,8,8,8,9,9,8,9,9,9,9,8,8,9,7,9,9,9,9,9,9,9,9,9,9,7,6,5,),
(7,4,6,6,7,6,4,6,6,4,5,6,3,7,7,5,7,7,6,5,6,5,6,5,5,7,6,5,5,7,6,5,7,6,5,6,6,7,5,7,7,7,6,7,6,7,3,6,7,5,5,7,4,4,4,7,4,7,7,3,5,4,0,4,6,7,6,7,6,6,6,7,7,6,7,7,7,7,6,6,7,5,7,7,7,7,7,7,7,7,7,7,5,3,3,),
(10,4,9,9,10,9,7,9,9,5,7,9,7,10,10,8,10,10,9,7,9,8,9,8,8,10,9,7,7,10,9,7,10,9,8,9,9,10,7,10,10,10,9,10,9,10,6,9,10,8,8,10,7,7,7,10,7,10,10,6,8,7,0,7,9,10,9,10,9,9,9,10,10,9,10,10,10,10,9,9,10,8,10,10,10,10,10,10,10,10,10,10,8,7,6,),
(10,5,9,7,8,10,8,11,7,6,11,8,5,8,8,11,10,5,5,11,5,11,10,11,10,10,10,11,11,5,10,11,7,8,11,9,10,10,11,8,10,10,8,3,9,6,9,9,8,8,10,10,9,8,7,5,7,7,10,9,8,10,0,8,10,10,10,9,10,10,9,10,10,7,10,10,10,10,10,9,10,11,8,11,10,11,10,10,10,11,10,11,7,9,7,),
(11,5,11,10,10,11,8,11,9,6,11,9,11,11,10,11,10,10,11,11,11,11,10,11,10,10,10,11,11,10,10,11,10,9,11,9,11,10,11,11,10,10,9,10,9,10,9,9,10,8,11,10,9,8,8,11,7,10,10,9,8,11,0,8,11,10,11,11,11,10,11,10,10,9,10,10,10,10,11,9,11,11,10,11,10,11,10,10,10,11,10,11,8,11,7,),
(7,1,6,6,7,6,4,7,6,2,7,6,5,7,7,7,7,7,6,7,6,7,6,7,6,7,6,7,7,7,6,7,7,6,7,6,6,7,7,7,7,7,6,7,6,7,5,6,7,5,6,7,5,4,4,7,4,7,7,5,5,6,0,4,6,7,6,7,6,6,6,7,7,6,7,7,7,7,6,6,7,7,7,7,7,7,7,7,7,7,7,7,5,5,3,),
(10,4,9,9,10,9,7,9,9,5,7,9,7,10,10,8,10,10,9,7,9,8,9,8,8,10,9,7,7,10,9,7,10,9,8,9,9,10,7,10,10,10,9,10,9,10,6,9,10,8,8,10,7,7,7,10,7,10,10,6,8,7,3,7,9,10,9,10,9,9,9,10,10,9,10,10,10,10,9,9,10,8,10,10,10,10,10,10,10,10,10,10,8,7,6,),
(8,2,7,7,8,7,5,7,7,3,6,7,4,8,8,6,8,8,7,6,7,6,7,6,6,8,7,6,6,8,7,6,8,7,6,7,7,8,6,8,8,8,7,8,7,8,4,7,8,6,6,8,5,5,5,8,5,8,8,4,6,5,0,5,7,8,7,8,7,7,7,8,8,7,8,8,8,8,7,7,8,6,8,8,8,8,8,8,8,8,8,8,6,4,4,),
(3,5,3,2,4,3,5,3,4,7,3,4,3,4,2,4,4,4,4,3,3,3,5,3,4,3,3,3,3,3,5,3,2,5,3,5,3,4,3,3,4,4,5,2,5,2,5,5,2,5,3,4,5,5,5,4,5,3,3,7,3,3,0,4,3,2,3,3,3,3,3,2,3,3,2,2,2,2,3,2,3,3,2,3,2,3,2,2,2,3,3,7,3,3,6,),
(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,),
(5,7,5,5,6,5,7,6,4,7,6,4,5,5,4,6,6,6,6,6,5,6,7,6,6,5,5,6,6,5,7,6,5,7,6,7,5,6,6,5,6,6,7,4,7,4,7,7,4,7,5,6,7,7,7,6,7,5,5,7,3,5,0,6,5,5,5,5,5,5,5,5,5,3,5,5,5,5,5,4,5,6,5,6,5,6,5,5,5,6,5,7,3,5,7,),
(8,3,8,6,6,8,6,9,5,4,9,6,7,7,6,9,8,5,7,9,7,9,8,9,8,8,8,9,9,5,8,9,6,6,9,7,8,8,9,7,8,8,6,1,7,4,7,7,6,6,8,8,7,6,5,7,5,6,8,7,6,8,0,6,8,8,8,8,8,8,8,8,8,5,8,8,8,8,8,7,8,9,6,9,8,9,8,8,8,9,8,9,5,7,5,),
(2,4,1,1,3,1,4,2,0,4,0,1,0,1,0,2,3,3,3,2,2,1,4,2,3,0,0,0,0,2,4,2,0,4,1,4,1,3,1,0,3,3,4,0,4,0,4,4,1,4,0,3,4,4,4,3,4,1,1,3,0,0,0,3,0,1,0,0,0,0,0,1,2,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,4,0,0,4,),
)
# End of font
| StarcoderdataPython |
1757564 | <filename>amfeti/preconditioners/k_preconditioners.py<gh_stars>10-100
#
# Copyright (c) 2020 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Preconditioning module for AMfeti and linearized systems, that have a K-matrix-like structure similar to the linear
static local problem.
"""
from scipy.sparse import csr_matrix
from amfeti.linalg import cal_schur_complement
from .preconditioner_base import PreconditionerBase
import numpy as np
all = ['LumpedPreconditioner',
'SuperLumpedPreconditioner',
'DirichletPreconditioner',
'LumpedDirichletPreconditioner']
class LumpedPreconditioner(PreconditionerBase):
"""
Lumped preconditioner, that uses the stiffnesses on the boundary only
"""
def __init__(self):
super().__init__()
@property
def K_bb(self):
"""
Part of the operator matrix on the interface
"""
if self.interior_dofs is None:
self.interior_dofs = self._identify_interior_dofs(self.interface_dofs)
return self.K.matrix[np.ix_(self.interface_dofs, self.interface_dofs)]
def _set_Q(self):
self.Q = csr_matrix(np.zeros(self.K.shape))
self.Q[np.ix_(self.interface_dofs, self.interface_dofs)] = self.K_bb
class SuperLumpedPreconditioner(LumpedPreconditioner):
"""
Similar preconditioner as the lumped preconditioner, but it utilizes the diagonal entries of the lumped
preconditioner only.
"""
def __init__(self):
super().__init__()
def _set_Q(self):
self.Q = csr_matrix(np.zeros(self.K.shape))
self.Q[np.ix_(self.interface_dofs, self.interface_dofs)] = np.diag(self.K_bb)
class DirichletPreconditioner(LumpedPreconditioner):
"""
Dirichlet preconditioner, that uses a Schur-complement to estimate interface forces. Hence the most accurate,
but expensive preconditioner
"""
def __init__(self):
super().__init__()
@property
def K_ii(self):
"""
Interior part of the operator matrix, which is not related to any boundary-dof
"""
if self.interior_dofs is None:
self.interior_dofs = self._identify_interior_dofs(self.interface_dofs)
return self.K.matrix[np.ix_(self.interior_dofs, self.interior_dofs)]
@property
def K_ib(self):
"""
Connecting part of the operator matrix between interior and interface-dofs
"""
if self.interior_dofs is None:
self.interior_dofs = self._identify_interior_dofs(self.interface_dofs)
return self.K.matrix[np.ix_(self.interior_dofs, self.interface_dofs)]
@property
def K_bi(self):
"""
Connecting part of the operator matrix between interface and interior-dofs
"""
if self.interior_dofs is None:
self.interior_dofs = self._identify_interior_dofs(self.interface_dofs)
return self.K.matrix[np.ix_(self.interface_dofs, self.interior_dofs)]
def schur_complement(self):
"""
Caller for a Schur-complement calculation
"""
return cal_schur_complement(self.K_bi, self.K_ii, self.K_ib, self.K_bb)
def _set_Q(self):
self.Q = csr_matrix(np.zeros(self.K.shape))
S = self.schur_complement()
self.Q[np.ix_(self.interface_dofs, self.interface_dofs)] = S
class LumpedDirichletPreconditioner(DirichletPreconditioner):
"""
Lumped Dirichlet preconditioner, that uses a Schur-complement, but with die diagonal entries of the interior
stiffnesses.
"""
def __init__(self):
super().__init__()
def schur_complement(self):
"""
Caller for a Schur-complement calculation with an extraction of the lumped interior operator matrix
"""
K_ii_diag = np.diag(self.K_ii.matrix)
return cal_schur_complement(self.K_ib.T, K_ii_diag, self.K_ib, self.K_bb)
| StarcoderdataPython |
134397 | <filename>scihub2pdf/libgen.py
from __future__ import unicode_literals, print_function, absolute_import
import logging
import requests
from lxml import html
from lxml.etree import ParserError
from tool import norm_url, download_pdf
logger = logging.getLogger("scihub")
class LibGen(object):
def __init__(self,
headers={},
libgen_url="http://libgen.io/scimag/ads.php",
xpath_pdf_url="/html/body/table/tr/td[3]/a"):
self.libgen_url = libgen_url
self.xpath_pdf_url = xpath_pdf_url
self.headers = headers
self.s = None
self.doi = None
self.pdf_file = None
self.pdf_url = None
self.page_url = None
self.html_tree = None
self.html_content = None
def start(self):
self.s = requests.Session()
def exit(self):
if self.s is not None:
self.s.close()
def download(self):
found, r = download_pdf(
self.s,
self.pdf_file,
self.pdf_url,
self.headers,
filetype="application/octet-stream")
return found, r
def navigate_to(self, doi, pdf_file):
params = {"doi": doi, "downloadname": ""}
r = self.s.get(
self.libgen_url,
params=params,
headers=self.headers
)
self.page_url = r.url
self.pdf_file = pdf_file
logger.info("DOI: %s, LibGen Link: %s", doi, self.page_url)
found = r.status_code == 200
if not found:
logger.error("Maybe libgen is down. Try to use sci-hub instead.")
return found, r
self.html_content = r.content
return found, r
def generate_tree(self):
try:
self.html_tree = html.fromstring(self.html_content)
success = True
except ParserError:
logger.error("The LibGen page has a strange behaviour. Please try open in browser to check: %s",
self.page_url)
success = False
return success, self.html_tree
def get_pdf_url(self):
html_a = self.html_tree.xpath(self.xpath_pdf_url)
if len(html_a) == 0:
logger.error("PDF link for not found: %s", self.page_url)
found = False
self.pdf_url = None
else:
self.pdf_url = norm_url(html_a[0].attrib["href"])
found = True
return found, self.pdf_url
| StarcoderdataPython |
1760217 | ## Generate the dataset, save the data and a plot of it as well
import numpy as np
import skfuzzy as fuzz
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from skfuzzy import control as ctrl
import scipy.io
# Set for reproducibility
np.random.seed(seed=123)
loc_risk = ctrl.Antecedent(np.arange(0, 11, 1), 'loc_risk')
maintenance = ctrl.Antecedent(np.arange(0, 11, 1), 'maintenance')
downtime = ctrl.Consequent(np.arange(0, 101, 1), 'downtime')
# Membership functions with parameters pre-specified.
# Data would be generated from this fuzzy system
# MCMC would be used later to identify whether the parameters of the fuzzy system can be recovered from the data.
loc_risk['low'] = fuzz.trimf(loc_risk.universe, [0, 0, 5])
loc_risk['medium'] = fuzz.trimf(loc_risk.universe, [0, 5, 10])
loc_risk['high'] = fuzz.trimf(loc_risk.universe, [5, 10, 10])
maintenance['poor'] = fuzz.trimf(maintenance.universe, [0, 0, 5])
maintenance['average'] = fuzz.trimf(maintenance.universe, [0, 5, 10])
maintenance['good'] = fuzz.trimf(maintenance.universe, [5, 10, 10])
downtime['low'] = fuzz.trimf(downtime.universe, [0, 0, 50])
downtime['medium'] = fuzz.trimf(downtime.universe, [0, 50, 100])
downtime['high'] = fuzz.trimf(downtime.universe, [50, 100, 100])
# Rule objects connect one or more antecedent membership functions with
# one or more consequent membership functions, using 'or' or 'and' to combine the antecedents.
rule1 = ctrl.Rule(loc_risk['high'] | maintenance['poor'], downtime['high'])
rule2 = ctrl.Rule(maintenance['average'] | loc_risk['medium'], downtime['medium'])
rule3 = ctrl.Rule(maintenance['good'] & loc_risk['low'], downtime['low'])
outputs=[[],[],[]]
loc_inp=np.random.random(500)
maint_inp=np.random.random(500)
for x,y in zip(loc_inp,maint_inp):
tipping_ctrl = ctrl.ControlSystem([rule1, rule2, rule3])
tipping = ctrl.ControlSystemSimulation(tipping_ctrl)
tipping.input['loc_risk'] = 10.0*x
tipping.input['maintenance'] = 10.0*y
tipping.compute()
outputs[0].append(x)
outputs[1].append(y)
outputs[2].append(tipping.output['downtime'])
## Plot the outputs on a graph for visualization
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elev=27, azim=139)
ax.plot_trisurf(outputs[0],outputs[1],outputs[2], cmap=cm.jet, linewidth=0.2)
ax.set_xlabel('Location risk')
ax.set_ylabel('Maintenance')
ax.set_zlabel('System downtime')
plt.savefig('data_plot.png')
## Save the data
scipy.io.savemat('data_gen.mat',{'outputs': outputs})
| StarcoderdataPython |
188802 | <filename>pdf_merge.py
# pdf_merging.py
import os
from PyPDF2 import PdfFileReader, PdfFileWriter
def merge_pdfs(dir_path, output):
pdf_writer = PdfFileWriter()
paths = os.listdir(dir_path)
#print("Looking in",paths,"for PDFs")
#checks if existing merged.pdf exists, if so deletes it
if os.path.isfile(dir_path+output):
os.remove(dir_path+output)
print('Deleted a previous',output)
for i in paths:
if i.endswith('.pdf'):
pdf_reader = PdfFileReader(dir_path+i)
for page in range(pdf_reader.getNumPages()):
# Add each page to the writer object
pdf_writer.addPage(pdf_reader.getPage(page))
# Write out the merged PDF
with open(output, 'wb') as out:
pdf_writer.write(out)
print("Merged PDFs into",output)
if __name__ == '__main__':
merge_pdfs(dir_path='pdfs/', output='pdfs/merged.pdf') | StarcoderdataPython |
1609530 | <reponame>angelaaaateng/awesome-panel
"""This module implements the Page Model"""
import param
# from awesome_panel.application.models.author import Author
from package.awesome_panel.application.models.author import Author
from package.awesome_panel.utils import OrderByNameMixin
class Page(OrderByNameMixin, param.Parameterized):
"""This Page Model contains the
- page component (For example a PageComponent)
- meta parameters like author and description"""
author = param.ClassSelector(class_=Author, allow_None=True)
description = param.String()
tags = param.List()
source_code_url = param.String()
thumbnail_png_url = param.String()
component = param.Parameter()
show_loading_page = param.Boolean(default=False)
restrict_max_width = param.Boolean(default=True)
url = param.String()
def __hash__(
self,
):
return hash(self.name)
| StarcoderdataPython |
3396585 | <reponame>ChuanleiGuo/AlgorithmsPlayground
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
buckets = [[] for _ in range(len(nums) + 1)]
frequency_dict = {}
for num in nums:
frequency_dict[num] = frequency_dict.get(num, 0) + 1
for key in frequency_dict.keys():
frequency = frequency_dict[key]
buckets[frequency].append(key)
res = []
pos = len(buckets) - 1
while len(res) < k and pos >= 0:
if len(buckets[pos]) > 0:
res += buckets[pos]
pos -= 1
return res
| StarcoderdataPython |
27900 | #!/usr/bin/python3
import numpy as np
def meeting_planes(a1, b1, c1, a2, b2, c2, a3, b3, c3):
return []
def main():
a1=1
b1=4
c1=5
a2=3
b2=2
c2=1
a3=2
b3=4
c3=1
x, y, z = meeting_planes(a1, b1, c1, a2, b2, c2, a3, b3, c3)
print(f"Planes meet at x={x}, y={y} and z={z}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
4807481 | <gh_stars>0
import os
import os.path
import subprocess
import yaml
from crontab import CronTab
import wakeup.config as config
import wakeup.util as util
from wakeup.schedule import Schedule, parse_schedule
from wakeup.constants import CONFIG_FILE_NAME, ALARM_SCRIPT_NAME
def schedule(args):
alarm_schedule = parse_schedule(" ".join([args.time, args.days_of_week]))
_add_cron_job(alarm_schedule)
def _add_cron_job(schedule: Schedule):
cron = CronTab(user=True)
command = _create_cron_command()
job = cron.new(command=command)
_set_job_time(job, schedule)
cron.write()
def _create_cron_command():
pipenv_bin = _find_pipenv_bin()
# On Linux, pynput uses the X window system to interact with the keyboard.
# Therefore, the DISPLAY variable must be set, which is not the case when
# this script is run as a cron job.
command = f"(cd '{os.getcwd()}' && env DISPLAY=':0' {pipenv_bin} run python {ALARM_SCRIPT_NAME})"
return command
def _find_pipenv_bin():
which_result = subprocess.run(
["which", "pipenv"], capture_output=True, encoding="utf-8"
)
return which_result.stdout.strip()
def _set_job_time(job, schedule: Schedule):
job.hour.on(schedule.hours)
job.minutes.on(schedule.minutes)
job.dow.on(*schedule.days_of_week)
def status(args):
cron = CronTab(user=True)
job_descriptions = []
for job in cron.find_command(ALARM_SCRIPT_NAME):
desc = f"\t* {job.hours}:{job.minutes} on {job.dow}"
job_descriptions.append(desc)
if job_descriptions:
first_line = "The following alarms are scheduled:"
job_lines = "\n".join(job_descriptions)
msg = "\n".join([first_line, job_lines])
else:
msg = "No alarms scheduled."
print(msg)
def clear(args):
cron = CronTab(user=True)
num_jobs = cron.remove_all(command=ALARM_SCRIPT_NAME)
cron.write()
print(f"Cleared {num_jobs} scheduled alarms.")
def init(args):
skeleton_config = {
config.CONSUMER_KEY: "<not set>",
config.CONSUMER_SECRET: "<not set>",
config.ACCESS_TOKEN_KEY: "<not set>",
config.ACCESS_TOKEN_SECRET: "<not set>",
}
config.load_defaults(skeleton_config)
user_home = util.get_user_home()
config_path = os.path.join(user_home, CONFIG_FILE_NAME)
with open(config_path, "w") as config_file:
yaml.dump(skeleton_config, config_file)
msg = f"""\
Created a config file at {config_path}.
You still need to set all the Twitter API keys."""
print(msg)
| StarcoderdataPython |
3380810 | #!/usr/bin/env python3
try:
from pip.req import parse_requirements # pip 9.x
except ImportError:
from pip._internal.req import parse_requirements # pip 10.x
from setuptools import find_packages, setup
from app.util import autoversioning
version = autoversioning.get_version()
# bdist_pex runs in a temp dir, therefore requirements.txt must be added to data_files.
requirements = [str(r.req) for r in parse_requirements('requirements.txt', session=False)]
name = 'clusterrunner'
setup(
name=name,
version=version,
description="ClusterRunner makes it easy to execute test suites across your "
"infrastructure in the fastest and most efficient way possible.",
maintainer="Box",
maintainer_email="<EMAIL>",
url="https://github.com/box/ClusterRunner",
license="ASL 2.0",
python_requires='>=3.4',
packages=find_packages(exclude=('test', 'test.*')),
# Data files are packaged into the wheel using the following defines.
data_files=[
('', ['requirements.txt']),
('conf', ['conf/default_clusterrunner.conf']),
],
install_requires=requirements,
entry_points={
'console_scripts': ['{} = app.__main__:main'.format(name)],
},
)
| StarcoderdataPython |
1675893 | from django.shortcuts import render
from rest_framework import viewsets, filters
import django_filters
from . import models
from . import serializers
class TeamViewSet(viewsets.ModelViewSet):
queryset = models.Team.objects.all()
serializer_class = serializers.TeamSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('name',)
class ServiceProviderViewSet(viewsets.ModelViewSet):
queryset = models.ServiceProvider.objects.all()
serializer_class = serializers.ServiceProviderSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('name',)
class ContactTypeViewSet(viewsets.ModelViewSet):
queryset = models.ContactType.objects.all()
serializer_class = serializers.ContactTypeSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('name',)
class PointOfContactViewSet(viewsets.ModelViewSet):
queryset = models.PointOfContact.objects.all()
serializer_class = serializers.PointOfContactSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = (
'team',
'team__name',
'contact_type',
'contact_type__name',
'service_provider',
'service_provider__name'
)
| StarcoderdataPython |
9355 | <filename>code/send.py
import sys
import time
from datetime import datetime
from bot import FbMessengerBot
if __name__ == "__main__":
if len(sys.argv) < 3:
print("No email or password provided")
else:
bot = FbMessengerBot(sys.argv[1], sys.argv[2])
with open("users.txt", "r") as file:
users = dict.fromkeys(file.read().split("\n"))
for user in users:
users[user] = bot.uid(user)
with open("message.txt", "r") as file:
message = file.read()
time_now = datetime.now()
send_time = datetime(time_now.year + 1, 1, 1)
wait_time = (send_time - time_now).total_seconds()
print("Waiting...")
time.sleep(wait_time)
for uid in users.values():
bot.send_message(message, uid)
bot.logout()
| StarcoderdataPython |
133882 | # coding: utf-8
"""Test the /files/ handler."""
import io
import os
from unicodedata import normalize
pjoin = os.path.join
import requests
import json
from nbformat import write
from nbformat.v4 import (new_notebook,
new_markdown_cell, new_code_cell,
new_output)
from notebook.utils import url_path_join
from .launchnotebook import NotebookTestBase
from ipython_genutils import py3compat
class FilesTest(NotebookTestBase):
def test_hidden_files(self):
not_hidden = [
u'å b',
u'å b/ç. d',
]
hidden = [
u'.å b',
u'å b/.ç d',
]
dirs = not_hidden + hidden
nbdir = self.notebook_dir.name
for d in dirs:
path = pjoin(nbdir, d.replace('/', os.sep))
if not os.path.exists(path):
os.mkdir(path)
with open(pjoin(path, 'foo'), 'w') as f:
f.write('foo')
with open(pjoin(path, '.foo'), 'w') as f:
f.write('.foo')
url = self.base_url()
for d in not_hidden:
path = pjoin(nbdir, d.replace('/', os.sep))
r = requests.get(url_path_join(url, 'files', d, 'foo'))
r.raise_for_status()
self.assertEqual(r.text, 'foo')
r = requests.get(url_path_join(url, 'files', d, '.foo'))
self.assertEqual(r.status_code, 404)
for d in hidden:
path = pjoin(nbdir, d.replace('/', os.sep))
for foo in ('foo', '.foo'):
r = requests.get(url_path_join(url, 'files', d, foo))
self.assertEqual(r.status_code, 404)
def test_contents_manager(self):
"make sure ContentsManager returns right files (ipynb, bin, txt)."
nbdir = self.notebook_dir.name
base = self.base_url()
nb = new_notebook(
cells=[
new_markdown_cell(u'Created by test ³'),
new_code_cell("print(2*6)", outputs=[
new_output("stream", text="12"),
])
]
)
with io.open(pjoin(nbdir, 'testnb.ipynb'), 'w',
encoding='utf-8') as f:
write(nb, f, version=4)
with io.open(pjoin(nbdir, 'test.bin'), 'wb') as f:
f.write(b'\xff' + os.urandom(5))
f.close()
with io.open(pjoin(nbdir, 'test.txt'), 'w') as f:
f.write(u'foobar')
f.close()
r = requests.get(url_path_join(base, 'files', 'testnb.ipynb'))
self.assertEqual(r.status_code, 200)
self.assertIn('print(2*6)', r.text)
json.loads(r.text)
r = requests.get(url_path_join(base, 'files', 'test.bin'))
self.assertEqual(r.status_code, 200)
self.assertEqual(r.headers['content-type'], 'application/octet-stream')
self.assertEqual(r.content[:1], b'\xff')
self.assertEqual(len(r.content), 6)
r = requests.get(url_path_join(base, 'files', 'test.txt'))
self.assertEqual(r.status_code, 200)
self.assertEqual(r.headers['content-type'], 'text/plain')
self.assertEqual(r.text, 'foobar')
def test_download(self):
nbdir = self.notebook_dir.name
base = self.base_url()
text = 'hello'
with open(pjoin(nbdir, 'test.txt'), 'w') as f:
f.write(text)
r = requests.get(url_path_join(base, 'files', 'test.txt'))
disposition = r.headers.get('Content-Disposition', '')
self.assertNotIn('attachment', disposition)
r = requests.get(url_path_join(base, 'files', 'test.txt') + '?download=1')
disposition = r.headers.get('Content-Disposition', '')
self.assertIn('attachment', disposition)
self.assertIn('filename="test.txt"', disposition)
def test_old_files_redirect(self):
"""pre-2.0 'files/' prefixed links are properly redirected"""
nbdir = self.notebook_dir.name
base = self.base_url()
os.mkdir(pjoin(nbdir, 'files'))
os.makedirs(pjoin(nbdir, 'sub', 'files'))
for prefix in ('', 'sub'):
with open(pjoin(nbdir, prefix, 'files', 'f1.txt'), 'w') as f:
f.write(prefix + '/files/f1')
with open(pjoin(nbdir, prefix, 'files', 'f2.txt'), 'w') as f:
f.write(prefix + '/files/f2')
with open(pjoin(nbdir, prefix, 'f2.txt'), 'w') as f:
f.write(prefix + '/f2')
with open(pjoin(nbdir, prefix, 'f3.txt'), 'w') as f:
f.write(prefix + '/f3')
url = url_path_join(base, 'notebooks', prefix, 'files', 'f1.txt')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, prefix + '/files/f1')
url = url_path_join(base, 'notebooks', prefix, 'files', 'f2.txt')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, prefix + '/files/f2')
url = url_path_join(base, 'notebooks', prefix, 'files', 'f3.txt')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.text, prefix + '/f3')
| StarcoderdataPython |
3278997 | <gh_stars>0
#!\usr\bin\python
"""
File: unstable_ODE.py
Copyright (c) 2016 <NAME>
License: MIT
Excercise C.4:
Implements the derivative of an unstable function u_k
to test how well the Euler method approximates it given
different initial conditions for alpha, and delta t
"""
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
from Euler import Euler
def u_prime(x, u, a = -1):
"""
For backwards compatibility, the input variable x is included,
but it does not contribute to the derivative
"""
return a * u
def plot_Euler_approx(dfdx, y_a, delta_x, a, b):
"""
Plots the Euler approximation of y(x) given function dfdx and
the initial point y_a from a to b with stepsize delta_x
"""
xList = np.arange(a, b, delta_x)
yList = Euler(dfdx, y_a, delta_x, a, b)
plt.plot(xList, yList)
plt.show()
def plot_approx_and_exact(exact, resolution, dfdx, y_a, delta_x, a, b):
"""
Plots the Euler approximation y(x) in red alongside its
given exact value in green from a to b
"""
xList_a = np.arange(a, b, delta_x)
yList_a = Euler(dfdx, y_a, delta_x, a, b)
xList_e = np.linspace(a, b, resolution)
yList_e = np.vectorize(exact)(xList_e)
plt.plot(xList_a, yList_a, 'r')
plt.plot(xList_e, yList_e, 'g')
plt.show()
| StarcoderdataPython |
3375445 | import torch.nn.functional as F
import torch.nn as nn
import torch
"""
这个是把PICK对应论文中的GraphModel中的Graph Learning的代码,
看起来忒费劲,我就笨方法,一行行肢解后,打印出来看、理解:
我理解就是
- 降维
- 做softmax算每个节点和彼此之间的关系权重
- 然后计算loss:
'''
\mathcal{L}_{GL}=\frac{1}{N}\sum_{i,j=1}^N exp(A_{ij}+\eta \Vert v_i - v_j \Vert^2_2) + \gamma \Vert A \Vert_F^2
'''
"""
N = 8 # 有多少个bbox,一张图里
in_dim = 10 # 10:每个bbox的隐层表达的维度
learning_dim = 5 # 应该是降维后的维度
eta = 1
gamma = 1
box_num = torch.tensor([1,8])
x = torch.randn(1, N, in_dim)
print("我是bbox的feature: x = ", x.shape)
learn_w = nn.Parameter(torch.empty(learning_dim))
projection = nn.Linear(in_dim, learning_dim, bias=False)
adj = torch.ones((1, N, N))
print("我是整个图的邻接矩阵:A = ", adj)
print("我是整个图的邻接矩阵:A.shape ", adj.shape)
B, N, D = x.shape # todo 难道是N,T,D?
# (B, N, D)
x_hat = projection(x) # todo 这个是做了一个降维么?3维度=>1维,还是最后1维降维了?
print("我被线性变换了一下: x -> hat_x %r=>%r" % (x.shape, x_hat.shape))
_, _, learning_dim = x_hat.shape # 紧接着看这个,应该是最后一维降维了 : self.projection = nn.Linear(in_dim, learning_dim, bias=False)
# (B, N, N, learning_dim)
x_i = x_hat.unsqueeze(2).expand(B, N, N, learning_dim)# todo <--- ???
x_j = x_hat.unsqueeze(1).expand(B, N, N, learning_dim)
print("骚操作之后的x_i:", x_i.shape)
print("骚操作之后的x_j:", x_j.shape)
distance = torch.abs(x_i - x_j) # <--- 感觉是两两做差
print("| x_i - x_j |: ", distance.shape)
distance = torch.einsum('bijd, d->bij', distance, learn_w) # todo <--- ???
print("| x_i - x_j | 啥爱因斯坦sum后 : ", distance.shape)
out = F.leaky_relu(distance)
print(" Relu(| x_i - x_j |) : ", distance.shape)
# for numerical stability, due to softmax operation mable produce large value
max_out_v, _ = out.max(dim=-1, keepdim=True) # todo <--- ???
print(" Max(Relu(| x_i - x_j |)) : ", max_out_v.shape)
out = out - max_out_v
soft_adj = torch.exp(out)
print(" Exp(Max(Relu(| x_i - x_j |))) : soft邻接矩阵 ", soft_adj.shape)
soft_adj = adj * soft_adj # <-- ??? 为何要用新的邻接矩阵,和就的邻接矩阵相乘
print("两个邻接矩阵相乘:adj * soft_adj:adj.shape=%r, soft_adj.shape=%r" % (adj.shape, soft_adj.shape))
sum_out = soft_adj.sum(dim=-1, keepdim=True)
soft_adj = soft_adj / sum_out + 1e-10
print("终于算出了新的邻接矩阵了:")
print(soft_adj)
print("新的邻接矩阵的shape:", soft_adj.shape)
print("=======================================================")
print("开始计算loss了!")
B, N, D = x_hat.shape
x_i = x_hat.unsqueeze(2).expand(B, N, N, D)
x_j = x_hat.unsqueeze(1).expand(B, N, N, D)
box_num_div = 1 / torch.pow(box_num.float(), 2)
# (B, N, N)
dist_loss = adj + eta * torch.norm(x_i - x_j, dim=3) # remove square operation duo to it can cause nan loss.
dist_loss = torch.exp(dist_loss)
# (B,)
dist_loss = torch.sum(dist_loss, dim=(1, 2)) * box_num_div.squeeze(-1)
# (B,)
f_norm = torch.norm(adj, dim=(1, 2)) # remove square operation duo to it can cause nan loss.
gl_loss = dist_loss + gamma * f_norm
print("loss结果为:", gl_loss)
| StarcoderdataPython |
42621 | <filename>main.py
import wikipedia
import webbrowser
def getPage():
# 1 means number of random articles
random_article = wikipedia.random(1)
# print to the user the choice of random article
print("The random generated wikipedia article is " + random_article)
# User input to view the page or not
choice = input("Continue Loading more info about "
+ random_article + " (y/n/q) ")
if(choice == 'y' or choice == 'Y'):
# Prints the summary of the random picked article
print("")
print("##############################")
print(wikipedia.summary(random_article, 10, 0
, auto_suggest=True, redirect=True))
# user choice to open in webbrowser
web_browser = input("Do you want to open this in your web browser? (y/n) ")
if (web_browser == 'y' or web_browser == 'Y'):
wiki_load = wikipedia.page(random_article)
webbrowser.open(wiki_load.url)
elif (web_browser == 'n' or web_browser == 'N'):
print("")
print("##############################")
getPage()
elif(choice == 'n' or choice == 'N'):
getPage()
else:
exit(0)
getPage()
| StarcoderdataPython |
3289035 | <reponame>Ida-Ida/hecktor-2020
import torch
from torch import nn
from torch.nn import functional as F
class BasicConv3d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv3d, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs)
self.norm = nn.InstanceNorm3d(out_channels, affine=True)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
x = F.relu(x, inplace=True)
return x
class FastSmoothSENorm(nn.Module):
class SEWeights(nn.Module):
def __init__(self, in_channels, reduction=2):
super().__init__()
self.conv1 = nn.Conv3d(in_channels, in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True)
self.conv2 = nn.Conv3d(in_channels // reduction, in_channels, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x):
b, c, d, h, w = x.size()
out = torch.mean(x.view(b, c, -1), dim=-1).view(b, c, 1, 1, 1) # output_shape: in_channels x (1, 1, 1)
out = F.relu(self.conv1(out))
out = self.conv2(out)
return out
def __init__(self, in_channels, reduction=2):
super(FastSmoothSENorm, self).__init__()
self.norm = nn.InstanceNorm3d(in_channels, affine=False)
self.gamma = self.SEWeights(in_channels, reduction)
self.beta = self.SEWeights(in_channels, reduction)
def forward(self, x):
gamma = torch.sigmoid(self.gamma(x))
beta = torch.tanh(self.beta(x))
x = self.norm(x)
return gamma * x + beta
class FastSmoothSeNormConv3d(nn.Module):
def __init__(self, in_channels, out_channels, reduction=2, **kwargs):
super(FastSmoothSeNormConv3d, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, bias=True, **kwargs)
self.norm = FastSmoothSENorm(out_channels, reduction)
def forward(self, x):
x = self.conv(x)
x = F.relu(x, inplace=True)
x = self.norm(x)
return x
class RESseNormConv3d(nn.Module):
def __init__(self, in_channels, out_channels, reduction=2, **kwargs):
super().__init__()
self.conv1 = FastSmoothSeNormConv3d(in_channels, out_channels, reduction, **kwargs)
if in_channels != out_channels:
self.res_conv = FastSmoothSeNormConv3d(in_channels, out_channels, reduction, kernel_size=1, stride=1, padding=0)
else:
self.res_conv = None
def forward(self, x):
residual = self.res_conv(x) if self.res_conv else x
x = self.conv1(x)
x += residual
return x
class UpConv(nn.Module):
def __init__(self, in_channels, out_channels, reduction=2, scale=2):
super().__init__()
self.scale = scale
self.conv = FastSmoothSeNormConv3d(in_channels, out_channels, reduction, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.conv(x)
x = F.interpolate(x, scale_factor=self.scale, mode='trilinear', align_corners=False)
return x
| StarcoderdataPython |
1679652 | '''
Created on Dec, 2016
@author: hugo
'''
from __future__ import absolute_import
import os
import re
from random import shuffle
import numpy as np
from collections import Counter, defaultdict
from ..preprocessing.preprocessing import build_vocab, generate_bow, count_words
from ..utils.io_utils import dump_json
class CorpusIterReuters(object):
def __init__(self, path_list, train_docs, with_docname=False):
self.path_list = path_list
self.train_docs = train_docs
self.with_docname = with_docname
def __iter__(self):
shuffle(self.path_list)
count = 0
for path in self.path_list:
with open(path, 'r') as f:
texts = re.split('\n\s*\n', f.read())[:-1]
for block in texts:
tmp = block.split('\n')
did = tmp[0].split(' ')[-1]
if not did in self.train_docs:
continue
words = (' '.join(tmp[2:])).split()
count += 1
if self.with_docname:
yield [words, [did]]
else:
yield words
print count
def load_data(path_list, test_split, seed=666):
'''Loads the Reuters RCV1-v2 newswire dataset.
@Params
path_list : a list of file paths
test_split : fraction of the dataset to be used as test data.
seed : random seed for sample shuffling.
'''
# count the number of times a word appears in a doc
corpus = {}
for path in path_list:
with open(path, 'r') as f:
texts = re.split('\n\s*\n', f.read())[:-1]
for block in texts:
tmp = block.split('\n')
did = tmp[0].split(' ')[-1]
count = Counter((' '.join(tmp[2:])).split())
corpus[did] = dict(count) # doc-word frequency
corpus = corpus.items()
np.random.seed(seed)
np.random.shuffle(corpus)
n_docs = len(corpus)
train_data = dict(corpus[:-int(n_docs * test_split)])
test_data = dict(corpus[-int(n_docs * test_split):])
return train_data, test_data
def construct_corpus(doc_word_freq, word_freq, training_phase, vocab_dict=None, threshold=5, topn=None):
if not (training_phase or isinstance(vocab_dict, dict)):
raise ValueError('vocab_dict must be provided if training_phase is set False')
if training_phase:
vocab_dict = build_vocab(word_freq, threshold=threshold, topn=topn)
docs = generate_bow(doc_word_freq, vocab_dict)
new_word_freq = dict([(vocab_dict[word], freq) for word, freq in word_freq.iteritems() if word in vocab_dict])
return docs, vocab_dict, new_word_freq
def construct_train_test_corpus(path_list, test_split, output, threshold=10, topn=20000):
train_data, test_data = load_data(path_list, test_split)
train_word_freq = count_words(train_data.values())
train_docs, vocab_dict, train_word_freq = construct_corpus(train_data, train_word_freq, True, threshold=threshold, topn=topn)
train_corpus = {'docs': train_docs, 'vocab': vocab_dict, 'word_freq': train_word_freq}
dump_json(train_corpus, os.path.join(output, 'train.corpus'))
print 'Generated training corpus'
test_word_freq = count_words(test_data.values())
test_docs, _, _ = construct_corpus(test_data, test_word_freq, False, vocab_dict=vocab_dict)
test_corpus = {'docs': test_docs, 'vocab': vocab_dict}
dump_json(test_corpus, os.path.join(output, 'test.corpus'))
print 'Generated test corpus'
def extract_labels(docs, path, output):
# it will be fast if docs is a dict instead of a list
doc_labels = defaultdict(set)
with open(path, 'r') as f:
for line in f:
label, did, _ = line.strip('\n').split()
if did in docs:
doc_labels[did].add(label)
doc_labels = dict([(x, list(y)) for x, y in doc_labels.iteritems()])
dump_json(doc_labels, output)
return doc_labels
| StarcoderdataPython |
3281630 | from game import i2c
import gevent
from gevent.wsgi import WSGIServer
from threading import Thread
from game.game import Game
go = Game("piet","pol","normal","subtile")
gameRunning = False
import webBack
import MySQLdb
import time
def startServer():
server = WSGIServer(("", 5000), webBack.app)
server.serve_forever()
def whiletrue():
while(True):
try:
i2c.setinputs()
#print(i2c.sensor1)
#print(i2c.readSensor(1))
time.sleep(0.25)
except:
print("ERROR READING")
time.sleep(0.25)
if __name__ == "__main__":
print("start Game")
go = Game("jan","jaap","normal","subtile")
#go.startGame()
print("game running")
db = MySQLdb.connect("localhost","root","root","MindGame" )
dbc = db.cursor()
sql = "DELETE FROM Game;"
print(sql)
dbc.execute(sql)
db.commit()
print("start server")
thread = Thread(target = startServer, args = ())
thread.start()
whiletruethread = Thread(target = whiletrue, args = ())
whiletruethread.start()
print("server running")
| StarcoderdataPython |
3377083 | from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.signup),
path('check_duplication/', views.check_duplication),
path('delete/', views.delete),
]
| StarcoderdataPython |
1723408 | #!/usr/bin/env python3
#-m pip3 install pypdf2
import os
from PyPDF2 import PdfFileWriter, PdfFileReader
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--in', type=str, nargs='+', dest='pdfs_to_merge', required=True, help='List of pdf fragments to merge')
parser.add_argument('--out', type=str, dest='output_name', required=True, help='Output pdf name')
return parser.parse_args()
def merge_pdfs(pdfs_to_merge, output_name):
print('FROM:', pdfs_to_merge)
print('TO :', output_name)
output = PdfFileWriter()
inputs = []
total_pages = 0
for input_pdf_path in pdfs_to_merge:
input_file = open(input_pdf_path, "rb")
input_pdf = PdfFileReader(input_file)
print('Reading', input_pdf_path, 'with', input_pdf.numPages, 'pages...')
for i in range(0, input_pdf.numPages):
output.addPage(input_pdf.getPage(i))
total_pages += 1
inputs.append(input_file)
with open(output_name, "wb") as outputStream:
output.write(outputStream)
for input_file in inputs:
input_file.close()
print('Merge completed:', output_name, 'with', total_pages, 'pages.')
def main():
args = parse_args()
merge_pdfs(args.pdfs_to_merge, args.output_name)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3203785 | <gh_stars>0
## @defgroup Methods-Weights-Correlations-Propulsion Propulsion
# Contains some methods for calculating different propulsion system weights
# @ingroup Methods-Weights-Correlations
from .air_cooled_motor import air_cooled_motor
from .engine_jet import engine_jet
from .engine_piston import engine_piston
from .hts_motor import hts_motor
from .integrated_propulsion import integrated_propulsion
from .integrated_propulsion_general_aviation import integrated_propulsion_general_aviation
from .nasa_motor import nasa_motor | StarcoderdataPython |
1648333 | <filename>Python/ILoveYou.py
# Language: Python
# Level: 8kyu
# Name of Problem: I love you, a little , a lot, passionately ... not at all
# Instructions: Who remembers back to their time in the schoolyard, when girls would take a flower and tear its petals, saying each of the following phrases each time a petal was torn:
# I love you, a little, a lot, passionately, madly, not at all
# Your goal in this kata is to determine which phrase the girls would say for a flower of a given number of petals, where nb_petals > 0.
# Solution:
# subtract one from array b/c it is 0 based; girl starts counting at 1 but array starts at 0
def how_much_i_love_you(petals):
return ['I love you', 'a little', 'a lot', 'passionately', 'madly', 'not at all'][petals % 6 - 1]
# Sample Tests Passed:
# def testing(actual, expected):
# Test.assert_equals(actual, expected)
# Test.describe("decode")
# Test.it("Basic tests")
# testing(how_much_i_love_you(7),"I love you")
# testing(how_much_i_love_you(3),"a lot")
# testing(how_much_i_love_you(6),"not at all") | StarcoderdataPython |
3387983 | import math
def funcion_factorial(n: int ):
"""El factorial de un numero.
Parameters
----------
n : int
Numero entero `n`.
Returns
-------
int
Retorna el factorial del numero `n`
"""
facto= 1
for i in range(1,n+1):
facto = facto * i
return facto | StarcoderdataPython |
3358510 | <filename>reader.py<gh_stars>0
#Based on script by <NAME> @alexram1313
#https://github.com/alexram1313/text-to-speech-sample
import simpleaudio as sa
import re
import _thread
import time
from pydub import AudioSegment
from pydub.playback import play
class TextToSpeech:
CHUNK = 1024
#apparently you can both indicate type (see type hints) and instantiate a variable in the def statement
#here we point to the txt file that breaks words in to phonemes
# l = the phoneme strings dict
# _load_words is the method that parses the CMU dictionary
def __init__(self, CMUdict:str = 'CMUdict.txt'):
self._l = {}
self._load_words(CMUdict)
def _load_words(self, CMUdict:str):
with open(CMUdict, 'r') as file:
print("opened CMU dict!")
for line in file:
#ignore comments in txt file
if not line.startswith(';;;'):
#key is the word, val is the string of phonemes, 1st arg = delimiter, 2nd is how many times to split max
key, val = line.split(' ',2)
#use regex to parse the phonemes in val
self._l[key] = re.findall(r"[A-Z]+",val)
def get_pronunciation(self, str_input):
phoneme_list = []
#the following parses the user input (typed words to be spoken)
#\w is any alphanumeric character. see https://docs.python.org/3/howto/regex.html
for word in re.findall(r"[\w']+",str_input.upper()):
if word in self._l:
phoneme_list += self._l[word]
print(phoneme_list)
return phoneme_list
#plays wav files using anythingbut pyaudio
def make_audio(self, phoneme_list):
full_audio = AudioSegment.empty()
for phoneme in phoneme_list:
segment = AudioSegment.from_wav("sounds/"+phoneme+".wav")
full_audio += segment
return full_audio
def export_audio(text, audio):
full_audio.export("mashup.mp3", format="mp3")
if __name__ == '__main__':
tts = TextToSpeech()
while True:
user_input = input('Enter a word or phrase: ')
phoneme_list = tts.get_pronunciation(user_input)
if len(phoneme_list) > 0:
audio = tts.make_audio(phoneme_list)
#audio.export("<FILE PATH HERE FOR EXPORT".wav", format ="wav")
play(audio)
else:
print("I couldn't pronounce \"" + user_input + "\"")
| StarcoderdataPython |
105432 | """This module implements uploading videos on YouTube via Selenium using metadata JSON file
to extract its title, description etc."""
from typing import DefaultDict, Optional
from selenium_firefox.firefox import Firefox, By, Keys
from collections import defaultdict
import json
import time
from youtube_uploader_selenium import const
from pathlib import Path
import logging
import random
logging.basicConfig()
def load_metadata(metadata_json_path: Optional[str] = None) -> DefaultDict[str, str]:
if metadata_json_path is None:
return defaultdict(str)
with open(metadata_json_path) as metadata_json_file:
return defaultdict(str, json.load(metadata_json_file))
class YouTubeUploader:
"""A class for uploading videos on YouTube via Selenium using metadata JSON file
to extract its title, description etc"""
def __init__(self, video_path: str, metadata: dict, browser: Firefox) -> None:
self.video_path = video_path
self.metadata_dict = metadata
self.browser = browser
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self._validate_inputs()
def _validate_inputs(self):
if not self.metadata_dict[const.VIDEO_TITLE]:
self.logger.warning("The video title was not found in a metadata file")
self.metadata_dict[const.VIDEO_TITLE] = Path(self.video_path).stem
self.logger.warning("The video title was set to {}".format(Path(self.video_path).stem))
if not self.metadata_dict[const.VIDEO_DESCRIPTION]:
self.logger.warning("The video description was not found in a metadata file")
def upload(self, use_monetization=True):
try:
self._login()
return self._upload(use_monetization)
except Exception as e:
print(e)
self._quit()
raise
def _wait(self):
time.sleep(const.USER_WAITING_TIME + random.uniform(0, 2))
def _short_wait(self):
time.sleep(random.uniform(0, 1))
def _login(self):
self.browser.get(const.YOUTUBE_URL)
self._wait()
if self.browser.has_cookies_for_current_website():
self.browser.load_cookies()
self._wait()
self.browser.refresh()
else:
self.logger.info('Please sign in and then press enter')
input()
self.browser.get(const.YOUTUBE_URL)
self._wait()
self.browser.save_cookies()
def _upload(self, use_monetization: bool) -> (bool, Optional[str]):
self._go_to_upload()
self._send_video()
self._wait()
self._set_title()
self._set_description()
self._set_kids_section()
self._set_tags()
self._click_next()
# Monetization
if use_monetization:
self._set_monetization_on()
self._click_next()
self._set_monetization_suitability()
self._click_next()
self._click_next()
else:
self._click_next()
try:
self._set_video_public()
except Exception:
# Deals with copyright 'checks'
self._click_next()
self._set_video_public()
video_id = self._get_video_id()
self._wait_while_upload()
done_button = self.browser.find(By.ID, const.DONE_BUTTON)
# Catch such error as
# "File is a duplicate of a video you have already uploaded"
if done_button.get_attribute('aria-disabled') == 'true':
error_message = self.browser.find(By.XPATH,
const.ERROR_CONTAINER).text
self.logger.error(error_message)
return False, None
done_button.click()
self._wait()
# Monetization
if use_monetization:
self._publish_anyway()
self.logger.debug("Published the video with video_id = {}".format(video_id))
self._wait()
self.browser.get(const.YOUTUBE_URL)
self._quit()
return True, video_id
def _go_to_upload(self):
self.browser.get(const.YOUTUBE_URL)
self._wait()
self.browser.get(const.YOUTUBE_UPLOAD_URL)
self._wait()
def _send_video(self):
absolute_video_path = str(Path.cwd() / self.video_path)
self.browser.find(By.XPATH, const.INPUT_FILE_VIDEO).send_keys(absolute_video_path)
self.logger.debug('Attached video {}'.format(self.video_path))
def _set_title(self):
title_field = self.browser.find(By.ID, const.TEXTBOX, timeout=10)
title_field.click()
self._short_wait()
title_field.clear()
self._short_wait()
title_field.send_keys(Keys.COMMAND + 'a')
self._short_wait()
title_field.send_keys(self.metadata_dict[const.VIDEO_TITLE])
self.logger.debug('The video title was set to \"{}\"'.format(self.metadata_dict[const.VIDEO_TITLE]))
self._wait()
def _set_description(self):
video_description = self.metadata_dict[const.VIDEO_DESCRIPTION]
if video_description:
description_container = self.browser.find(By.XPATH,
const.DESCRIPTION_CONTAINER)
description_field = self.browser.find(By.ID, const.TEXTBOX, element=description_container)
description_field.click()
self._wait()
description_field.clear()
self._wait()
description_field.send_keys(self.metadata_dict[const.VIDEO_DESCRIPTION].replace('\\n', u'\ue007'))
self.logger.debug(
'The video description was set to \"{}\"'.format(self.metadata_dict[const.VIDEO_DESCRIPTION]))
def _set_kids_section(self):
kids_section = self.browser.find(By.NAME, const.NOT_MADE_FOR_KIDS_LABEL)
self.browser.find(By.ID, const.RADIO_LABEL, kids_section).click()
self.logger.debug('Selected \"{}\"'.format(const.NOT_MADE_FOR_KIDS_LABEL))
def _set_tags(self):
more_options = self.browser.find(By.CLASS_NAME, const.ADVANCED_BUTTON)
more_options.click()
self._wait()
self._wait()
tags_container = self.browser.find(By.ID, const.TAGS_CONTAINER)
tags_container.click()
tags_input = tags_container.find_element(By.ID, const.TEXT_INPUT)
tags_input.click()
self._wait()
tags_input.send_keys(self.metadata_dict[const.VIDEO_TAGS])
self._wait()
def _set_monetization_on(self):
monetization_bar = self.browser.find(By.ID, const.MONETIZATION_LABEL)
monetization_bar.click()
self._wait()
on_label = self.browser.find(By.ID, const.MONETIZATION_ON_LABEL)
on_label.click()
self._wait()
done_button = self.browser.find(By.ID, const.MONETIZATION_DONE)
done_button.click()
self._wait()
self.logger.debug(
'The video monetization was set to on')
def _set_monetization_suitability(self):
button = self.browser.find(By.CLASS_NAME, const.MONETIZATION_SUITABILITY_LABEL)
button.click()
self._wait()
self.logger.debug(
'The video monetization was set to none of the above')
def _publish_anyway(self):
button = self.browser.find(By.ID, const.PUBLISH_ANYWAY_LABEL)
button.click()
self._wait()
self.logger.debug(
'The video published publicly')
def _click_next(self):
self.browser.find(By.ID, const.NEXT_BUTTON).click()
self.logger.debug('Clicked {}'.format(const.NEXT_BUTTON))
self._wait()
def _set_video_public(self):
public_main_button = self.browser.find(By.NAME, const.PUBLIC_BUTTON)
self.browser.find(By.ID, const.RADIO_LABEL, public_main_button).click()
self._wait()
self.logger.debug('Made the video {}'.format(const.PUBLIC_BUTTON))
def _wait_while_upload(self):
status_container = self.browser.find(By.XPATH,
const.STATUS_CONTAINER)
while True:
in_process = status_container.text.find(const.UPLOADED) != -1
if in_process:
self._wait()
else:
break
def _get_video_id(self) -> Optional[str]:
video_id = None
try:
video_url_container = self.browser.find(By.XPATH, const.VIDEO_URL_CONTAINER)
video_url_element = self.browser.find(By.XPATH, const.VIDEO_URL_ELEMENT,
element=video_url_container)
video_id = video_url_element.get_attribute(const.HREF).split('/')[-1]
except:
self.logger.warning(const.VIDEO_NOT_FOUND_ERROR)
pass
return video_id
def _quit(self):
self.browser.driver.quit()
| StarcoderdataPython |
1662258 | # V0
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/82820204
# IDEA : GREEDY
class MyCalendarTwo(object):
def __init__(self):
# every booked interval
self.booked = list()
# every overlap interval
self.overlaped = list()
def book(self, start, end):
"""
:type start: int
:type end: int
:rtype: bool
"""
for os, oe in self.overlaped:
if max(os, start) < min(oe, end):
return False
for bs, be in self.booked:
ss = max(bs, start)
ee = min(be, end)
if ss < ee:
self.overlaped.append((ss, ee))
self.booked.append((start, end))
return True
# Your MyCalendarTwo object will be instantiated and called as such:
# obj = MyCalendarTwo()
# param_1 = obj.book(start,end)
# V2
# Time: O(n^2)
# Space: O(n)
class MyCalendarTwo(object):
def __init__(self):
self.__overlaps = []
self.__calendar = []
def book(self, start, end):
"""
:type start: int
:type end: int
:rtype: bool
"""
for i, j in self.__overlaps:
if start < j and end > i:
return False
for i, j in self.__calendar:
if start < j and end > i:
self.__overlaps.append((max(start, i), min(end, j)))
self.__calendar.append((start, end))
return True | StarcoderdataPython |
126991 | from __future__ import annotations
from dataclasses import dataclass
from typing import Sequence, Tuple, Optional, List, Dict, Any, Iterable
import logging
from pathlib import Path
import os
from concurrent.futures import ThreadPoolExecutor
from requests import HTTPError
from catpy.applications import CatmaidClientApplication
from catpy.applications.morphology import lol_to_df
import pandas as pd
import numpy as np
from .bbox import Bbox
from .utils import CoordZYX
logger = logging.getLogger(__name__)
DEFAULT_WORKERS = 10
def treenode_table(response):
edit_time_dtype = None
df = lol_to_df(
response,
[
"treenode_id",
"parent_id",
"x",
"y",
"z",
"confidence",
"radius",
"skeleton_id",
"edit_time",
"user_id",
],
[
np.uint64,
pd.UInt64Dtype(),
np.float64,
np.float64,
np.float64,
np.int8,
np.float64,
np.uint64,
edit_time_dtype,
np.uint64,
],
)
# df.index = df.treenode_id
return df
def connector_node_table(response):
edit_time_dtype = None
df = lol_to_df(
response,
["connector_id", "x", "y", "z", "confidence", "edit_time", "user_id"],
[
np.uint64,
np.float64,
np.float64,
np.float64,
np.int8,
edit_time_dtype,
np.uint64,
],
)
return df
def merge_node_tables(dfs: Sequence[pd.DataFrame], drop_subset=None):
merged = pd.concat(dfs, ignore_index=True)
deduped = merged.drop_duplicates(subset=drop_subset)
return deduped
def merge_treenode_tables(dfs: Sequence[pd.DataFrame]):
df = merge_node_tables(dfs, ["treenode_id", "skeleton_id"])
# if len(df.treenode_id) == len(np.unique(df.treenode_id)):
# df.index = df.treenode_id
# else:
# raise ValueError("Resulting treenode table does not have unique rows")
return df
def merge_connector_tables(dfs: Sequence[pd.DataFrame]):
return merge_node_tables(dfs, ["connector_id"])
@dataclass
class ConnectorPartner:
link_id: int
partner_id: int
confidence: int
skeleton_id: int
relation_id: int
relation_name: str
@dataclass
class ConnectorDetail:
# 'connector_id': detail[0],
# 'x': detail[1],
# 'y': detail[2],
# 'z': detail[3],
# 'confidence': detail[4],
# 'partners': [p for p in detail[5]]
connector_id: int
location: CoordZYX
confidence: int
partners: List[ConnectorPartner]
@classmethod
def from_response(cls, response):
return cls(
response["connector_id"],
CoordZYX(response["z"], response["y"], response["x"]),
response["confidence"],
[ConnectorPartner(**p) for p in response["partners"]],
)
@staticmethod
def to_connector_partners_df(details: Iterable[ConnectorDetail]):
dims = ["x", "y", "z"]
conn_ids = []
locs = []
partners_dfs = []
for det in details:
conn_ids.append(det.connector_id)
locs.append([det.location[d] for d in dims])
partners_dfs.append(det.to_partners_df())
connectors = pd.DataFrame(
np.array(conn_ids, dtype=np.uint64), columns=["connector_id"]
)
connectors[dims] = pd.DataFrame(np.array(locs), columns=dims)
first, *others = partners_dfs
partners = first.append(list(others))
return connectors, partners
def to_partners_df(self):
headers = ("skeleton_id", "treenode_id", "connector_id", "is_presynaptic")
is_presyn = []
ids = []
for p in self.partners:
is_presyn.append(p.relation_name.startswith("pre"))
ids.append([p.skeleton_id, p.partner_id, self.connector_id])
df = pd.DataFrame(np.array(ids, dtype=np.uint64), columns=headers[:-1])
df[headers[-1]] = np.array(is_presyn, bool)
return df
class Catmaid(CatmaidClientApplication):
def nodes_in_bbox(
self,
bbox: Bbox,
treenodes=True,
connectors=True,
splits: Sequence[int] = (2, 2, 2),
) -> Tuple[Optional[pd.DataFrame], Optional[pd.DataFrame]]:
logger.debug("Getting nodes in bbox %s", bbox)
data = bbox.to_catmaid()
try:
response = self.post((self.project_id, "/node/list"), data)
except HTTPError as e:
if e.errno == 504:
logger.warning("Server timeout; splitting Bbox")
response = {3: True}
else:
raise e
if not response[3]:
tn_df = treenode_table(response[0]) if treenodes else None
conn_df = connector_node_table(response[1]) if connectors else None
logger.debug("Got %s treenodes, %s connectors", len(tn_df), len(conn_df))
return tn_df, conn_df
# node limit reached
logger.info("Splitting bbox into %s", splits)
tn_dfs = []
conn_dfs: List[pd.DataFrame] = []
for sub_bb in bbox.split(*splits):
tn_df, conn_df = self.nodes_in_bbox(sub_bb, treenodes, connectors, splits)
if treenodes and tn_df is not None:
tn_dfs.append(tn_df)
if connectors and conn_df is not None:
conn_dfs.append(conn_df)
return (
merge_treenode_tables(tn_dfs) if treenodes else None,
merge_connector_tables(conn_dfs) if connectors else None,
)
def connector_detail(self, conn_id: int):
return ConnectorDetail.from_response(
self.get(f"{self.project_id}/connectors/{conn_id}")
)
def connector_detail_many(self, conn_ids, threads=DEFAULT_WORKERS):
yield from batch(
self.connector_detail, [to_args_kwargs(c) for c in conn_ids], threads
)
ArgsKwargs = Tuple[Sequence, Dict[str, Any]]
def to_args_kwargs(*args, **kwargs):
return args, kwargs
def batch(fn, args_kwargs: Iterable[ArgsKwargs], workers=DEFAULT_WORKERS):
with ThreadPoolExecutor(workers) as exe:
futs = [exe.submit(fn, *args, **kwargs) for args, kwargs in args_kwargs]
for f in futs:
yield f.result()
def get_creds() -> Path:
try:
return Path(os.environ["CATMAID_CREDENTIALS"])
except KeyError:
raise RuntimeError(
"Use CATMAID_CREDENTIALS env var to give location of catmaid credentials file"
)
def get_catmaid() -> Catmaid:
return Catmaid.from_json(get_creds())
| StarcoderdataPython |
167107 | """
Author: michealowen
Last edited: 2019.11.1,Friday
LASSO回归算法,使用波士顿房价数据集
在损失函数中加入L1正则项,后验概率的符合拉普拉斯分布
"""
#encoding=UTF-8
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
class ridgeRegression:
'''
LASSO回归模型类
'''
def __init__(self,X,x_test,Y,y_test,k=1.0):
'''
Params:
X:样本,shape=(m,n)
Y:为标注,shape=(1,m)
x_test:测试集的数据
y_test:测试集的标签
k:正则项系数
'''
self.X=X
self.Y=Y
n = self.X.shape[1]
self.w = np.array([0 for i in range(n+1)],dtype='float64')
self.x_test = x_test
self.y_test = y_test
self.k=k
def preProcess(self,x):
'''
加上x0=1,并对样本进行归一化
'''
x = np.c_[x,np.array([1 for i in range(x.shape[0])])]
for i in range(x.shape[1]-1):
x[:,i] = (x[:,i] - np.mean(x[:,i]))/np.std(x[:,i])
return x
def fit(self,method='GBD',alpha=None,iterNums=None,batchSize=None):
'''
使用传入的method参数,选择适当的拟合方法
GBD:梯度下降,SGD:随机梯度下降,SBGD:小批量梯度下降,MT:矩阵法求解
'''
self.X = self.preProcess(self.X) #预处理数据
if method == 'BGD':
self.BGD(alpha,iterNums)
elif method == 'SGD':
self.SGD(alpha)
elif method == 'SBGD':
self.SBGD(alpha)
return None
def MT(self):
"""
基于矩阵求解的方法
"""
self.w = np.dot(np.linalg.pinv(np.dot(self.X.T,self.X)+self.k*np.eye(len(self.w))),np.dot(self.X.T,self.Y.T))
m,n = self.X.shape #m为样本数,n为维度
J = 1/m * np.dot( (np.dot(self.X,self.w.T) - self.Y),(np.dot(self.X,self.w.T) - self.Y).T) #MSE
print(J)
return None
def BGD(self,alpha,iterNums):
'''
使用所有样本进行梯度下降
'''
if alpha == None:
print('缺少参数:迭代步长')
if iterNums == None:
print('缺少参数:迭代次数')
m,n = self.X.shape #m为样本数,n为维度
i = 0
MinCost = float("inf")
while i<iterNums:
J = 1/m * (np.dot( (np.dot(self.X,self.w.T) - self.Y),(np.dot(self.X,self.w.T) - self.Y).T)+self.k*np.sum(np.abs(self.w)))
if J < MinCost:
MinCost = J
print(J," ",i)
self.w -= 2/m * alpha * ((np.dot(self.X.T ,(np.dot( self.X ,self.w.T) - self.Y.T ))).T+self.k*np.array([1 for i in range(len(self.w))]))
i += 1
else:
break
return None
def SGD(self,alpha):
'''
随机梯度下降
'''
if alpha == None:
print('缺少参数:迭代步长')
m,n = self.X.shape #m为样本数,n为维度
i = 0
MinCost = float("inf")
while True:
partIndex = np.random.randint(len(self.X))
X_part = self.X[partIndex]
Y_part = self.Y[partIndex]
J = 1/m * (np.dot( (np.dot(X_part,self.w) - Y_part),(np.dot(X_part,self.w) - Y_part).T)+self.k*np.sum(np.abs(self.w)))
if abs(J - MinCost) < 0.0001:
break
else:
print("J:",J," ",i)
self.w -= 2/m * alpha * ((np.dot(X_part.T ,(np.dot( X_part ,self.w.T) - Y_part.T ))).T+self.k*np.array([1 for i in range(len(self.w))]))
i = i+1
MinCost = J
return None
def SBGD(self,alpha):
'''
小批量梯度下降
'''
if alpha == None:
print('缺少参数:迭代步长')
m,n = self.X.shape #m为样本数,n为维度
i = 0
MinCost = float("inf")
while True:
partIndex = np.random.choice(range(m),int(m/10))
X_part = self.X[partIndex]
Y_part = self.Y[partIndex]
J = 1/m * (np.dot( (np.dot(X_part,self.w) - Y_part),(np.dot(X_part,self.w) - Y_part).T)+self.k*np.sum(np.abs(self.w)))
if abs(J - MinCost) < 0.0001:
break
else:
print("J:",J," ",i)
self.w -= 2/m * alpha * ((np.dot(X_part.T ,(np.dot( X_part ,self.w.T) - Y_part.T ))).T +self.k*np.array([1 for i in range(len(self.w))]))
i = i+1
MinCost = J
return None
def predict(self,data):
'''
预测输入数据对应的输出
'''
data = self.preProcess(data)
y = np.dot(data,self.w)
print(y)
return None
def evaluate(self):
'''
通过测试集评估模型的好坏,计算RSS(sum of squares for errors)
'''
print('评估')
print(np.sum(np.square((np.dot(self.preProcess(self.x_test),self.w.T)-y_test))))
return None
if __name__ == '__main__':
boston = load_boston()
#print(type(boston))
x_train,x_test,y_train,y_test= train_test_split(boston.data,boston.target,test_size=0.1,random_state=0)
model = ridgeRegression(x_train,x_test,y_train,y_test,k=1.0)
model.fit('SGD',alpha=0.1,iterNums=10000)
#model.fit('MT',alpha=0.1,iterNums=10000)
model.evaluate() | StarcoderdataPython |
1681828 | <reponame>kozhevnikov-peter/curve-dao-contracts
import brownie
WEEK = 86400 * 7
YEAR = 365 * 86400
ZERO_ADDRESS = "0x0000000000000000000000000000000000000000"
def test_burn(accounts, token):
balance = token.balanceOf(accounts[0])
initial_supply = token.totalSupply()
token.burn(31337, {'from': accounts[0]})
assert token.balanceOf(accounts[0]) == balance - 31337
assert token.totalSupply() == initial_supply - 31337
def test_burn_not_admin(accounts, token):
initial_supply = token.totalSupply()
token.transfer(accounts[1], 1000000, {'from': accounts[0]})
token.burn(31337, {'from': accounts[1]})
assert token.balanceOf(accounts[1]) == 1000000 - 31337
assert token.totalSupply() == initial_supply - 31337
def test_burn_all(accounts, token):
initial_supply = token.totalSupply()
token.burn(initial_supply, {'from': accounts[0]})
assert token.balanceOf(accounts[0]) == 0
assert token.totalSupply() == 0
def test_overburn(accounts, token):
initial_supply = token.totalSupply()
with brownie.reverts("Integer underflow"):
token.burn(initial_supply + 1, {'from': accounts[0]})
| StarcoderdataPython |
1716067 | <gh_stars>1-10
#! python3
"""
Generate JSON configuration files for the Tethys simulation code.
This script generates experimental settings with a constant p and m, as
well as a fixed number of iterations. Other parameters, such as the generation
method of the list (more precisely the lists' length), the initial orientation
of the edges in the graph, and the choice of the starting and ending edges, are
also fixed and can be customized.
The values for epsilon are given as a list (see other comments in the code) and
the values for n are derived from these.
"""
import json
import math
import copy
import argparse
m = 2**10
p = 512
iterations = 6000000
# the different values for epsilon
epsilons = [-0.35, - 0.3, - 0.25, - 0.2, - 0.15, - 0.1, - 0.05, 0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14,
0.16, 0.18, 0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.38, 0.4, 0.42, 0.44, 0.46, 0.48, 0.5]
# values of n computed from the above list and parameters
n_values = [math.ceil(m * p / (2 + e)) for e in epsilons]
print("Values for epsilon:")
print(epsilons)
print("Values for n:")
print(n_values)
base_params_dict = dict()
base_params_dict["m"] = m
base_params_dict["bucket_capacity"] = p
base_params_dict["list_max_len"] = p
base_params_dict["generation_method"] = "WorstCaseGeneration"
base_params_dict["edge_orientation"] = "RandomOrientation"
base_params_dict["location_generation"] = "HalfRandom"
base_dict = dict()
base_dict["exp_params"] = base_params_dict
base_dict["iterations"] = iterations
data = list()
for n in n_values:
exp_dict = copy.deepcopy(base_dict)
exp_dict["exp_params"]["n"] = n
data.append(exp_dict)
parser = argparse.ArgumentParser(
description='Maxflow configuration generator (variable epsilon)')
parser.add_argument('filename', metavar='path',
help='Path of the output JSON file')
args = parser.parse_args()
with open(args.filename, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
| StarcoderdataPython |
155605 | <reponame>Vinson-sheep/DRL-Algorithms-with-Pytorch-for-Beginners-<filename>Char10 TD3/TD3.py
import os
import numpy as np
import copy
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from buffer import ReplayBuffer
'''
Twin Delayed Deep Deterministic Policy Gradients (TD3)
Original paper:
Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3) https://arxiv.org/abs/1802.09477
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# parameters
env_name = "Pendulum-v1"
tau = 0.01
epsilon = 0.8
epsilon_decay = 0.9999
actor_lr = 3e-4
critic_lr = 3e-4
discount = 0.99
buffer_size = 10000
batch_size = 512
policy_freq = 2
policy_noise = 0.2
noise_clip = 0.5
max_episode = 40000
max_step_size = 500
seed = 1
render = True
load = False
env = gym.make(env_name)
def envAction(action):
low = env.action_space.low
high = env.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
# Set seeds
env.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, init_w=3e-3):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.l3.weight.data.uniform_(-init_w, init_w)
self.l3.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
a = F.tanh(self.l3(a))
return a
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, init_w=3e-3):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
self.l3.weight.data.uniform_(-init_w, init_w)
self.l3.bias.data.uniform_(-init_w, init_w)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
self.l6.weight.data.uniform_(-init_w, init_w)
self.l6.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
sa = torch.cat((state, action), 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat((state, action), 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class TD3:
def __init__(self):
super(TD3, self).__init__()
self.actor = Actor(state_dim, action_dim).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr)
self.buffer = ReplayBuffer(buffer_size, batch_size)
self.num_training = 1
self.writer = SummaryWriter('./log')
os.makedirs('./model/', exist_ok=True)
def act(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def put(self, *transition):
state, action, reward, next_state, done = transition
state = torch.FloatTensor(state).to(device).unsqueeze(0)
action = torch.FloatTensor(action).to(device).unsqueeze(0)
Q = self.critic.Q1(state, action).detach()
self.buffer.add(transition)
return Q.cpu().item()
def update(self):
if not self.buffer.sample_available():
return
state, action, reward, next_state, done = self.buffer.sample()
# state = (state - self.buffer.state_mean())/(self.buffer.state_std() + 1e-7)
# next_state = (next_state - self.buffer.state_mean())/(self.buffer.state_std() + 1e-6)
# reward = reward / (self.buffer.reward_std() + 1e-6)
state = torch.tensor(state, dtype=torch.float).to(device)
action = torch.tensor(action, dtype=torch.float).to(device)
reward = torch.tensor(reward, dtype=torch.float).view(batch_size, -1).to(device)
next_state = torch.tensor(next_state, dtype=torch.float).to(device)
done = torch.tensor(done, dtype=torch.float).to(device).view(batch_size, -1).to(device)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * policy_noise
).clamp(-noise_clip, noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-1, 1)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + (1 - done) * discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
self.writer.add_scalar('Loss/critic_loss', critic_loss, global_step=self.num_training)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.num_training % policy_freq == 0:
# Compute actor losse
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
self.writer.add_scalar('Loss/actor_loss', actor_loss, global_step=self.num_training)
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
self.num_training += 1
def save(self):
torch.save(self.actor.state_dict(), './model/actor.pth')
torch.save(self.critic.state_dict(), './model/critic.pth')
print("====================================")
print("Model has been saved...")
print("====================================")
def load(self):
torch.load(self.actor.state_dict(), './model/actor.pth')
torch.load(self.critic.state_dict(), './model/critic.pth')
print("====================================")
print("Model has been loaded...")
print("====================================")
if __name__ == '__main__':
agent = TD3()
state = env.reset()
if load:
agent.load()
if render:
env.render()
print("====================================")
print("Collection Experience...")
print("====================================")
total_step = 0
for episode in range(max_episode):
total_reward = 0
state = env.reset()
for step in range(max_step_size):
total_step += 1
action = agent.act(state)
if epsilon > np.random.random():
action = (action + np.random.normal(0, 0.2, size=action_dim)).clip(-1, 1)
next_state, reward, done, _ = env.step(envAction(action))
# reward trick of BipedalWalker-v3
# if reward == -100:
# reward = -1
if render:
env.render()
agent.put(state, action, reward, next_state, done)
agent.update()
total_reward += reward
state = next_state
epsilon = max(epsilon_decay*epsilon, 0.10)
agent.writer.add_scalar('Other/epsilon', epsilon, global_step=total_step)
if done:
break
if episode % 10 == 0:
agent.save()
agent.writer.add_scalar('Other/total_reward', total_reward, global_step=episode)
| StarcoderdataPython |
4813534 | <reponame>yehuohan/ln-ss<filename>test/fourier-FT-test.py
#!/usr/bin/env python3
import sys,os
sys.path.append(os.getcwd() + '/../')
import lnss.fourier as fourier
import numpy as np
import scipy as sp
import sympy as sy
from sympy import Piecewise, integrate, fourier_series, symbols, DiracDelta
from sympy import Sum, exp, cos, sin, pi, I, Abs, oo
from sympy.plotting import plot
from sympy.abc import t, w, W, n, k
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['font.family'] = 'Consolas'
mpl.rcParams['font.size'] = 11
def on_key(event:mpl.backend_bases.KeyEvent):
if event.key == 'escape':
plt.close()
#%% FT
# 三角波函数,三角波宽T=2,高度=1
T = 2
f = Piecewise((0, (t < -T/2) | (t > T/2)), (t/(T/2)+1, t < 0), (1-t/(T/2), t >= 0))
fw = fourier.ft(f)
# fw = sy.fourier_transform(f, t, w) # sympy用的另一个ft形式,e的指数多了2pi
print('fw : ', [Abs(fw.subs(w, j).evalf()) for j in range(5)])
fig = plt.figure('Fourier transform')
fig.canvas.mpl_connect('key_press_event', on_key)
ax = fig.add_subplot(1, 2, 1)
fx_range = np.linspace(-T, T, 100)
ax.plot(fx_range, [f.subs(t, j) for j in fx_range])
ax = fig.add_subplot(1, 2, 2)
fw_range = np.linspace(-T*8, T*8, 100)
ax.plot(fw_range, [Abs(fw.subs(w, j).evalf()) for j in fw_range])
ax.set_xlabel(r'$|F(\omega)|-\omega$')
plt.show()
sys.exit()
| StarcoderdataPython |
3337566 | from urllib import parse
from ..utils.collections import flatten
from ..exceptions import RouteNotFoundException, MethodNotAllowedException
class Router:
http_methods = ["GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"]
def __init__(self, *routes, module_location=None):
self.routes = flatten(routes)
def find(self, path, request_method, subdomain=None):
from .HTTPRoute import HTTPRoute
for route in self.routes:
if route.match(path, request_method, subdomain=subdomain):
return route
# we did not find a route matching the given path and method.
# we will try to find a route matching other methods
other_methods = [
method for method in self.http_methods if method != request_method
]
matched_methods = []
for other_method in other_methods:
for route in self.routes:
if route.match(path, other_method, subdomain=subdomain):
matched_methods.append(other_method)
break
# we really did not find a route
if not matched_methods:
return None
# if alternative methods have been found, check if current request method is OPTIONS
# to build a proper reponse else build a method not allowed response
if request_method == "OPTIONS":
def preflight_response(app):
return (
app.make("response")
.with_headers({"Allow": ", ".join(matched_methods)})
.status(204)
)
preflight_route = HTTPRoute(path, request_method=["options"])
preflight_route.get_response = preflight_response
return preflight_route
else:
raise MethodNotAllowedException(matched_methods, request_method)
def matches(self, path):
for route in self.routes:
if route.matches(path):
return route
def find_by_name(self, name):
for route in self.routes:
if route.match_name(name):
return route
def route(self, name: str, parameters: dict = {}, query_params: dict = {}) -> str:
"""Return URL string from given route name and parameters."""
route = self.find_by_name(name)
if route:
return route.to_url(parameters, query_params)
raise RouteNotFoundException(f"Could not find route with the name '{name}'")
def set_controller_locations(self, location):
self.controller_locations = location
return self
def add(self, *routes):
self.routes.append(*routes)
self.routes = flatten(self.routes)
def set(self, *routes):
self.routes = []
self.routes.append(*routes)
self.routes = flatten(self.routes)
@classmethod
def compile_to_url(cls, uncompiled_route, params={}, query_params={}):
"""Compile the route url into a usable url: converts /url/@id into /url/1.
Used for redirection
Arguments:
route {string} -- An uncompiled route like (/dashboard/@user:string/@id:int)
Keyword Arguments:
params {dict} -- Dictionary of parameters to pass to the route (default: {{}})
query_params {dict} -- Dictionary of query parameters to pass to the route (default: {{}})
Returns:
string -- Returns a compiled string (/dashboard/joseph/1)
"""
if "http" in uncompiled_route:
return uncompiled_route
# Split the url into a list
split_url = uncompiled_route.split("/")
# Start beginning of the new compiled url
compiled_url = "/"
# Iterate over the list
for url in split_url:
if url:
# if the url contains a parameter variable like @id:int
if "@" in url:
url = url.replace("@", "").split(":")[0]
if isinstance(params, dict):
compiled_url += str(params[url]) + "/"
elif isinstance(params, list):
compiled_url += str(params.pop(0)) + "/"
elif "?" in url:
url = url.replace("?", "").split(":")[0]
if isinstance(params, dict):
compiled_url += str(params.get(url, "/")) + "/"
elif isinstance(params, list):
compiled_url += str(params.pop(0)) + "/"
else:
compiled_url += url + "/"
compiled_url = compiled_url.replace("//", "")
# The loop isn't perfect and may have an unwanted trailing slash
if compiled_url.endswith("/") and not uncompiled_route.endswith("/"):
compiled_url = compiled_url[:-1]
# The loop isn't perfect and may have 2 slashes next to eachother
if "//" in compiled_url:
compiled_url = compiled_url.replace("//", "/")
# Add eventual query parameters
if query_params:
compiled_url += "?" + parse.urlencode(query_params)
return compiled_url
| StarcoderdataPython |
3271028 | <gh_stars>0
"""
给出二叉 搜索 树的根节点, 该树的节点值各不相同, 请你将其转换为累加树(Greater Sum Tree)
使每个节点 node 的新值等于原树中大于或等于 node.val 的值之和。
提醒一下,二叉搜索树满足下列约束条件:
节点的左子树仅包含键 小于 节点键的节点。
节点的右子树仅包含键 大于 节点键的节点。
左右子树也必须是二叉搜索树。
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
from typing import Optional
class Solution:
"""
反向中序遍历, 右中左
"""
def convertBST(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
res = 0
def dfs(root: TreeNode):
if root:
nonlocal res # 闭包嵌套函数要用nonlocal, global没用
dfs(root.right)
res += root.val
root.val = res
dfs(root.left)
dfs(root)
return root | StarcoderdataPython |
3212107 | <filename>Db_connection.py
import sqlite3
from datetime import datetime as datetime
#this file contains methods for inserting sensor data into an sqlite database, import this to another file to use the functions.
#running this file wil create the database and table for storing the sensordata
conn=sqlite3.connect('sensordata')
c = conn.cursor()
table_create_string = """CREATE TABLE IF NOT EXISTS sensorreadings(
id integer PRIMARY KEY,
temperature NUMERIC,
humidity NUMERIC,
pressure NUMERIC,
datetime datetime
);"""
#tables_insert_string = """INSERT INTO sensorreadings(temperature, humidity, pressure)VALUES(1,1,1)"""
try:
c.execute(table_create_string)
except sqlite3.Error as e:
print(e)
# try:
# c.execute(tables_insert_string)
# except sqlite3.IntegrityError as e:
# print(e)
# conn.commit()
def create_connection():
try:
conn=sqlite3.connect('sensordata')
except sqlite3.Error as e:
print(e)
#c = conn.cursor()
return conn
def insert_dbvalues(connection,temperature,humidity,pressure):
# tables_insert_string = """INSERT INTO sensorreadings(temperature, humidity, pressure, datetime)VALUES({},{},{},{});""".format(temperature,humidity,pressure,datetime.now())
datetime1 = datetime.now()
data_tuple = (temperature, humidity, pressure)
tables_insert_string = """INSERT INTO sensorreadings(temperature, humidity, pressure)VALUES(?,?,?)"""
#print(tables_insert_string)
c = connection.cursor()
try:
c.execute(tables_insert_string, data_tuple)
except sqlite3.IntegrityError as e:
print(e)
connection.commit()
c.close()
# insert_dbvalues(create_connection(),1,2,3)
# try:
# c.execute("INSERT INTO {tn} ({cn1}, {cn2}, {cn3}) VALUES (123456, 123456, 123456)".\
# format(tn=table_name, cn1=temperature_column, cn2=humidity_column, cn3=pressure_column))
# except sqlite3.IntegrityError:
# print('ERROR: integerityError')
conn.close() | StarcoderdataPython |
3286980 | <reponame>RideGreg/LeetCode
# Time: O(n)
# Space: O(h)
# 951
# For a binary tree T, we define a flip operation as follows: choose any node, swap the left and right child subtrees.
#
# A binary tree X is flip equivalent to a binary tree Y iff we can make X equal to Y after some number of flip operations.
#
# Write a function that determines whether two binary trees are flip equivalent.
# The trees are given by root nodes root1 and root2.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
import collections
# bfs solution
class Solution(object):
def flipEquiv(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
dq1, dq2 = collections.deque([root1]), collections.deque([root2])
while dq1 and dq2:
node1, node2 = dq1.pop(), dq2.pop()
if not node1 and not node2:
continue
if not node1 or not node2 or node1.val != node2.val:
return False
if (not node1.left and not node2.right) or \
(node1.left and node2.right and node1.left.val == node2.right.val):
dq1.extend([node1.right, node1.left])
else:
dq1.extend([node1.left, node1.right])
dq2.extend([node2.left, node2.right])
return not dq1 and not dq2
# Time: O(n)
# Space: O(h)
# iterative dfs solution
class Solution2(object):
def flipEquiv(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
stk1, stk2 = [root1], [root2]
while stk1 and stk2:
node1, node2 = stk1.pop(), stk2.pop()
if not node1 and not node2:
continue
if not node1 or not node2 or node1.val != node2.val:
return False
if (not node1.left and not node2.right) or \
(node1.left and node2.right and node1.left.val == node2.right.val):
stk1.extend([node1.right, node1.left])
else:
stk1.extend([node1.left, node1.right])
stk2.extend([node2.left, node2.right])
return not stk1 and not stk2
# Time: O(n)
# Space: O(h)
# recursive dfs solution
class Solution3(object):
def flipEquiv(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
if not root1 and not root2:
return True
if not root1 or not root2 or root1.val != root2.val:
return False
return (self.flipEquiv(root1.left, root2.left) and
self.flipEquiv(root1.right, root2.right) or
self.flipEquiv(root1.left, root2.right) and
self.flipEquiv(root1.right, root2.left))
| StarcoderdataPython |
1668802 | <reponame>xram64/AdventOfCode2021<filename>day03/day03.py<gh_stars>1-10
## Advent of Code 2021: Day 3
## https://adventofcode.com/2021/day/3
## <NAME> | github.com/xram64
## Answers: [Part 1]: 3633500, [Part 2]: 4550283
import sys
# Return most commonly-found bit, breaking ties in favor of '1'
def get_most_common_bit(numbers, pos):
frequency_of = [0, 0] # frequencies for each bit [<0's>, <1's>]
for num in numbers:
if num[pos] == 0: frequency_of[0] += 1
elif num[pos] == 1: frequency_of[1] += 1
most_common_bit = 0 if (frequency_of[0] > frequency_of[1]) else 1
return most_common_bit
# Return least commonly-found bit, breaking ties in favor of '0'
def get_least_common_bit(numbers, pos):
return (get_most_common_bit(numbers, pos) ^ 1)
if __name__ == '__main__':
with open('day03_input.txt', 'r') as f:
def parse(line): return [int(bit) for bit in list(line) if (bit != '\n')]
diag_numbers = list( map(parse, f.readlines()) )
diag_numbers_len = len(diag_numbers[0])
##############
### Part 1 ###
most_common_bits = ''
for pos in range(diag_numbers_len):
most_common_bits += str( get_most_common_bit(diag_numbers, pos) )
# Epsilon rate: Most common bits from input
ε = int(most_common_bits, 2)
# Gamma rate: Least common bits from input
γ = ε ^ int('1'*diag_numbers_len, 2) # invert bits in ε
print(f"[Part 1] Epsilon rate: ε = {ε}. Gamma rate: γ = {γ}. Power consumption: {ε*γ}.")
##############
### Part 2 ###
oxy_rating_filtered_nums = diag_numbers
co2_rating_filtered_nums = diag_numbers
## Oxygen generator rating
for pos in range(diag_numbers_len):
# Find most common bit (MCB) and filter numbers not matching bit criteria
mcb = get_most_common_bit(oxy_rating_filtered_nums, pos)
oxy_rating_filtered_nums = list( filter(lambda num: num[pos] == mcb, oxy_rating_filtered_nums) )
if len(oxy_rating_filtered_nums) == 1:
# Convert final element matching bit criteria to a decimal integer
oxy_rating = int(''.join( [str(n) for n in oxy_rating_filtered_nums[0]] ), 2)
break
elif len(oxy_rating_filtered_nums) <= 0:
print('Error: Oxygen rating list empty.')
sys.exit()
else:
print('Error: Multiple numbers found matching bit criteria for oxygen rating.')
sys.exit()
## CO2 scrubber rating
for pos in range(diag_numbers_len):
# Find least common bit (LCB) and filter numbers not matching bit criteria
lcb = get_least_common_bit(co2_rating_filtered_nums, pos)
co2_rating_filtered_nums = list( filter(lambda num: num[pos] == lcb, co2_rating_filtered_nums) )
if len(co2_rating_filtered_nums) == 1:
# Convert final element matching bit criteria to a decimal integer
co2_rating = int(''.join( [str(n) for n in co2_rating_filtered_nums[0]] ), 2)
break
elif len(co2_rating_filtered_nums) <= 0:
print('Error: CO2 rating list empty.')
sys.exit()
else:
print('Error: Multiple numbers found matching bit criteria for CO2 rating.')
sys.exit()
print(f"[Part 2] Oxygen generator rating: {oxy_rating}. CO2 scrubber rating: {co2_rating}. Life support rating: {oxy_rating*co2_rating}.")
| StarcoderdataPython |
4825723 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import os
from lxml import html
from lxml import etree
from lxml.html.clean import Cleaner
def usage():
str = """
Usage:
Importe les fichiers depuis une archive IMS de Moodle de Lille3...
Nettoie le HTML en enlevant les attributs de style
Réécrit les liens en utilisant un préfixe __BASE__ et sort le contenu uniquement de l'élément body.
fromIMS zipin directory_out
"""
print (str)
exit(1)
def myRewriteLink(link):
return link.replace("https://ged.univ-lille3.fr/nuxeo/site/dav/EspacePublicWWW/","__BASE__/")
def convert(filein, fileout):
tree = html.fromstring(filein)
cleaner(tree)
body = tree.find('body')
body.rewrite_links(myRewriteLink)
f = open(fileout,"wb")
f.write(html.tostring(body))
f.close()
if len(sys.argv) != 3:
usage()
dirin = sys.argv[1]
dirout= sys.argv[2]
cleaner = Cleaner(style=True)
import zipfile
with zipfile.ZipFile(dirin,'r') as myzip:
for orig in myzip.namelist():
print(orig)
dest = os.path.join(dirout,orig)
if orig.endswith('/'):
os.makedirs(dest,exist_ok=True)
elif orig.endswith(".html"):
f = myzip.read(orig)
convert(f,dest)
else :
myzip.extract(orig,dirout)
| StarcoderdataPython |
3269749 | <filename>example8_dict.py
# -*- coding: utf-8 -*-
# An empty dict
a = {}
# Result: None
print a.get("abc")
# Add a value to the dict
a["abc"] = 5
# Result: 5
print a.get("abc")
# Add another value
a[16] = "A text"
# Result: A text
print a.get(16)
# The whole dict {16: 'A text', 'abc': 5}
print a
# You can add content when creating the dict
b = {"ccc": "f", 16: 165}
# You can put dicts in a list
a_list = [a, b]
# Result: [{16: 'En text', 'abc': 5}, {16: 165, 'ccc': 'f'}]
print a_list
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.