hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7d57cb53958a854e64b6d878a9826f34dbca7a63 | 96 | py | Python | venv/lib/python3.8/site-packages/pip/_internal/operations/install/editable_legacy.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
]
| 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/pip/_internal/operations/install/editable_legacy.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
]
| 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/pip/_internal/operations/install/editable_legacy.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
]
| null | null | null | /home/runner/.cache/pip/pool/6e/30/4e/6df13ab33dd498623bcb8f860a029ad969938275a514553b6fe8b4b10b | 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7d58040a8760df0e7d462d968892a9628d5e39f3 | 8,960 | py | Python | corrector_module/opmon_corrector/corrector_worker.py | nordic-institute/X-Road-Metrics | 249d859466bf6065257cf8b3c27d0e9db4ab2378 | [
"MIT"
]
| 2 | 2021-06-30T11:12:31.000Z | 2021-09-24T08:50:03.000Z | corrector_module/opmon_corrector/corrector_worker.py | nordic-institute/X-Road-Metrics | 249d859466bf6065257cf8b3c27d0e9db4ab2378 | [
"MIT"
]
| null | null | null | corrector_module/opmon_corrector/corrector_worker.py | nordic-institute/X-Road-Metrics | 249d859466bf6065257cf8b3c27d0e9db4ab2378 | [
"MIT"
]
| 2 | 2021-07-02T12:31:37.000Z | 2021-11-09T08:44:09.000Z |
# The MIT License
# Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS)
# Copyright (c) 2017-2020 Estonian Information System Authority (RIA)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import queue
from . import database_manager
class CorrectorWorker:
def __init__(self, settings, name):
self.settings = settings
self.db_m = None
self.worker_name = name
def run(self, to_process, duplicates):
""" Process run entry point
:param to_process: Queue of documents to be processed
:param duplicates: Variable to hold the number of duplicates
:return: None
"""
self.db_m = database_manager.DatabaseManager(self.settings)
try:
# Process queue while is not empty
while True:
data = to_process.get(True, 1)
duplicate_count = self.consume_data(data)
with duplicates.get_lock():
duplicates.value += duplicate_count
except queue.Empty:
pass
def consume_data(self, data):
"""
The Corrector worker. Processes a batch of documents with the same message_id.
:param data: Contains LoggerManager, DatabaseManager, DocumentManager, message_id and documents to be processed.
:return: Returns number of duplicates found.
"""
# Get parameters
logger_manager = data['logger_manager']
doc_m = data['document_manager']
message_id = data['message_id']
documents = data['documents']
to_remove_queue = data['to_remove_queue']
duplicates = no_requestInTs = 0
hash_set = set()
for current_document in documents:
# Mark to removal documents without requestInTs immediately (as of bug in xRoad software ver 6.22.0)
if current_document['requestInTs'] is None and current_document['securityServerType'] is None:
to_remove_queue.put(current_document['_id'])
no_requestInTs += 1
self.db_m.mark_as_corrected(current_document)
"""
:logger_manager.log_warning('no_requestInTs',
:'_id : ObjectId(\'' + str(current_document['_id']) + '\'),
:messageId : ' + str(current_document['messageId']))
"""
continue
# Check if is batch duplicated
current_document_hash = doc_m.calculate_hash(current_document)
if current_document_hash in hash_set:
# If yes, mark to removal
to_remove_queue.put(current_document['_id'])
duplicates += 1
self.db_m.mark_as_corrected(current_document)
"""
:logger_manager.log_warning('batch_duplicated',
:'_id : ObjectId(\'' + str(current_document['_id']) + '\'),
:messageId : ' + str(current_document['messageId']))
"""
continue
# Check if is database duplicated
if self.db_m.check_if_hash_exists(current_document_hash):
# If here, add to batch duplicate cache
hash_set.add(current_document_hash)
duplicates += 1
self.db_m.mark_as_corrected(current_document)
"""
:logger_manager.log_warning('database_duplicated',
:'_id : ObjectId(\'' + str(current_document['_id']) + '\'),
:messageId : ' + str(current_document['messageId']))
"""
continue
# Mark hash as seen
hash_set.add(current_document_hash)
# Find possible matching documents
matching_documents = self.db_m.find_by_message_id(current_document)
# Try to match the current document with possible pairs (regular)
merged_document = doc_m.find_match(current_document, matching_documents)
matching_type = ''
if merged_document is None:
# Try to match the current document with orphan-matching
merged_document = doc_m.find_match(current_document, matching_documents, orphan=True)
if merged_document is not None:
matching_type = 'orphan_pair'
else:
matching_type = 'regular_pair'
if merged_document is None:
matching_type = 'orphan'
if current_document['securityServerType'] == 'Producer':
new_document = doc_m.create_json(None, current_document, None, current_document_hash, message_id)
else:
if current_document['securityServerType'] != 'Client':
current_document['securityServerType'] = 'Client'
new_document = doc_m.create_json(current_document, None, current_document_hash, None, message_id)
new_document = doc_m.apply_calculations(new_document)
new_document['correctorTime'] = database_manager.get_timestamp()
new_document['correctorStatus'] = 'processing'
new_document['matchingType'] = matching_type
# Mark non-xRoad queries as 'done' instantly. No reason to wait matching pair
if 'client' in new_document and new_document['client'] is not None and 'clientXRoadInstance' in new_document['client'] \
and new_document['client']['clientXRoadInstance'] is None:
new_document['correctorStatus'] = 'done'
new_document['matchingType'] = 'orphan'
self.db_m.add_to_clean_data(new_document)
else:
if current_document['securityServerType'] == 'Client':
if merged_document['client'] is None:
merged_document['client'] = current_document
merged_document = doc_m.apply_calculations(merged_document)
merged_document['clientHash'] = current_document_hash
merged_document['correctorTime'] = database_manager.get_timestamp()
merged_document['correctorStatus'] = 'done'
merged_document['matchingType'] = matching_type
self.db_m.update_document_clean_data(merged_document)
else:
# This should never-ever happen in >= v0.4.
msg = '[{0}] 2 matching clients for 1 producer: {1}'.format(self.worker_name, current_document)
logger_manager.log_warning('corrector_merging', msg)
else:
if merged_document['producer'] is None:
merged_document['producer'] = current_document
merged_document = doc_m.apply_calculations(merged_document)
merged_document['producerHash'] = current_document_hash
merged_document['correctorTime'] = database_manager.get_timestamp()
merged_document['correctorStatus'] = 'done'
merged_document['matchingType'] = matching_type
self.db_m.update_document_clean_data(merged_document)
else:
# This should never-ever happen in >= v0.4.
msg = '[{0}] 2 matching producers for 1 client: {1}'.format(self.worker_name, current_document)
logger_manager.log_error('corrector_merging', msg)
self.db_m.mark_as_corrected(current_document)
if no_requestInTs:
msg = '[{0}] {1} document(s) without requestInTs present'.format(self.worker_name, no_requestInTs)
logger_manager.log_warning('corrector_no_requestInTs', msg)
return duplicates
| 48.172043 | 136 | 0.60904 | 7,670 | 0.856027 | 0 | 0 | 0 | 0 | 0 | 0 | 3,816 | 0.425893 |
7d5889cacaec1535d87725d19f570fd238dc7beb | 724 | py | Python | autosa_tests/large/mm_int16/unroll.py | mfkiwl/AutoSA-SystolicArray | 699742eefda66bd3fd6cac608f7c96f5bf60a2a0 | [
"MIT"
]
| 102 | 2020-05-18T04:52:26.000Z | 2022-03-29T06:53:10.000Z | autosa_tests/large/mm_int16/unroll.py | mfkiwl/AutoSA-SystolicArray | 699742eefda66bd3fd6cac608f7c96f5bf60a2a0 | [
"MIT"
]
| 14 | 2020-06-04T11:12:33.000Z | 2022-03-14T20:55:00.000Z | autosa_tests/large/mm_int16/unroll.py | mfkiwl/AutoSA-SystolicArray | 699742eefda66bd3fd6cac608f7c96f5bf60a2a0 | [
"MIT"
]
| 26 | 2020-05-20T02:47:04.000Z | 2022-03-16T15:09:41.000Z | import math
# Modify the parameters here
UNROLL_FACTOR = 32
DATA_T = 'unsigned short'
# Generate the code
data_type = DATA_T
level = int(math.log2(UNROLL_FACTOR))
for layer in range(level - 1, -1, -1):
pair = int(math.pow(2, layer))
for i in range(pair):
# data_t tmp_[layer]_[pair] = tmp_[layer+1]_[pair*2]_[pair*2+1]
if layer == level - 1:
print(f'{data_type} mul_{layer}_{i}_0 = local_A[0][{i*2}] * local_B[0][{i*2}];')
print(f'{data_type} add_{layer}_{i} = mul_{layer}_{i}_0 + local_A[0][{i*2+1}] * local_B[0][{i*2+1}];')
else:
print(f'{data_type} add_{layer}_{i} = add_{layer+1}_{i*2} + add_{layer+1}_{i*2+1};')
print('local_C[c7][c6] += add_0_0;')
| 36.2 | 114 | 0.592541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.552486 |
7d589dd1f59c435f5b8daa7514686b5a0b85423d | 4,451 | py | Python | battlecode-manager/player_plain.py | gruzzlymug/ddg-2018 | 76f598f7548ad51b126ec9efb7da0fd0d4a306c2 | [
"MIT"
]
| 1 | 2018-02-11T03:32:22.000Z | 2018-02-11T03:32:22.000Z | battlecode-manager/player_plain.py | gruzzlymug/ddg-2018 | 76f598f7548ad51b126ec9efb7da0fd0d4a306c2 | [
"MIT"
]
| null | null | null | battlecode-manager/player_plain.py | gruzzlymug/ddg-2018 | 76f598f7548ad51b126ec9efb7da0fd0d4a306c2 | [
"MIT"
]
| null | null | null | import os
import psutil
import subprocess
import threading
import sys
from threading import Timer
import select
from player_abstract import AbstractPlayer
class PlainPlayer(AbstractPlayer):
def __init__(self, socket_file, working_dir, local_dir=None,
player_key="", player_mem_limit=256, player_cpu=20):
super().__init__(socket_file, working_dir, local_dir, None, None, player_key, player_mem_limit, player_cpu)
self.paused = False
self.streaming = False
self.process = None
def stream_logs(self, stdout=True, stderr=True, line_action=lambda line: print(line.decode())):
assert not self.streaming
self.streaming = True
if stdout:
threading.Thread(target=self._stream_logs, args=(self.process.stdout, line_action)).start()
if stderr:
threading.Thread(target=self._stream_logs, args=(self.process.stderr, line_action)).start()
def _stream_logs(self, stream, line_action):
for line in stream:
if self.process is None:
return
line_action(line)
def start(self):
if sys.platform == 'win32':
args = [os.path.join(self.working_dir, 'run.bat')]
# things break otherwise
env = dict(os.environ)
else:
args = ['sh', os.path.join(self.working_dir, 'run.sh')]
# Path needs to be passed through, otherwise some compilers (e.g gcc) can get confused and not find things
env = {'PATH': os.environ['PATH']}
env['PLAYER_KEY'] = str(self.player_key)
env['RUST_BACKTRACE'] = '1'
env['BC_PLATFORM'] = self._detect_platform()
if isinstance(self.socket_file, tuple):
# tcp port
env['TCP_PORT'] = str(self.socket_file[1])
else:
env['SOCKET_FILE'] = self.socket_file
cwd = self.working_dir
self.process = psutil.Popen(args, env=env, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
def pause(self):
# pausing too slow on windows
if sys.platform == 'win32': return
if not self.paused:
self.paused = True
suspend(self.process)
def unpause(self, timeout=None):
# pausing too slow on windows
if sys.platform == 'win32': return
if self.paused:
resume(self.process)
self.paused = False
def destroy(self):
if self.process is not None:
tmp = self.process
# This will signal to the log thread that everything is going to be shut down
# and ignore any future messages. In particular bash may log something like 'Terminated: <PID>'
# which would pollute the output of this script.
self.process = None
reap(tmp)
self.process = None
super().destroy()
def reap(process, timeout=3):
"Tries hard to terminate and ultimately kill all the children of this process."
def on_terminate(proc):
pass
# print("process {} terminated with exit code {}".format(proc.pid, proc.returncode))
try:
procs = process.children(recursive=True)
# send SIGTERM
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# send SIGKILL
for p in alive:
p.kill()
gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for p in alive:
print("process {} survived SIGKILL; giving up" % p.pid)
process.kill()
except:
print("Killing failed; assuming process exited early.")
def suspend(process):
procs = process.children(recursive=False)
# to enterprising players reading this code:
# yes, it is possible to escape the pausing using e.g. `nohup` when running without docker.
# however, that won't work while running inside docker. Sorry.
for p in procs:
try:
p.suspend()
except:
pass
try:
p.suspend()
except:
pass
def resume(process):
procs = process.children(recursive=True)
for p in procs:
try:
p.resume()
except:
pass
try:
p.resume()
except:
pass
| 32.253623 | 119 | 0.599191 | 2,743 | 0.616266 | 0 | 0 | 0 | 0 | 0 | 0 | 1,026 | 0.23051 |
7d58f75c60cd92e49b8842d06b9c5d9c9a1f2ca8 | 91 | py | Python | skfda/exploratory/__init__.py | jiduque/scikit-fda | 5ea71e78854801b259aa3a01eb6b154aa63bf54b | [
"BSD-3-Clause"
]
| 147 | 2019-05-10T20:46:42.000Z | 2022-03-25T17:23:19.000Z | skfda/exploratory/__init__.py | jiduque/scikit-fda | 5ea71e78854801b259aa3a01eb6b154aa63bf54b | [
"BSD-3-Clause"
]
| 306 | 2019-04-26T08:56:05.000Z | 2022-03-30T11:12:48.000Z | skfda/exploratory/__init__.py | jiduque/scikit-fda | 5ea71e78854801b259aa3a01eb6b154aa63bf54b | [
"BSD-3-Clause"
]
| 38 | 2019-09-03T17:24:04.000Z | 2022-01-06T05:09:18.000Z | from . import depth
from . import outliers
from . import stats
from . import visualization
| 18.2 | 27 | 0.78022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7d5919e7ea877027b781af2973db1c3cf8b3e549 | 4,726 | py | Python | jassen/django/project/blog/views.py | cabilangan112/intern-drf-blog | b2d6c7a4af1316b2c7ce38547bd9df99b4f3e8b9 | [
"MIT"
]
| null | null | null | jassen/django/project/blog/views.py | cabilangan112/intern-drf-blog | b2d6c7a4af1316b2c7ce38547bd9df99b4f3e8b9 | [
"MIT"
]
| null | null | null | jassen/django/project/blog/views.py | cabilangan112/intern-drf-blog | b2d6c7a4af1316b2c7ce38547bd9df99b4f3e8b9 | [
"MIT"
]
| null | null | null | from django.shortcuts import render
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework import viewsets, status
from .models import Post,Comment,Category,Tag
from .serializers import PostSerializer,CommentSerializer,CategorySerializer,TagSerializer
class PostViewSet(viewsets.ViewSet):
def list(self ,request):
queryset = Post.objects.filter(status='published')
serializer_context = {'request': request,}
serializer = PostSerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
def post(self, request, format=None):
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
post = serializer.save()
for tag in request.data.get('tags'):
t = Tag.objects.get(id=tag)
post.tags.add(t)
return Response(serializer.data)
return Response(serializer.errors )
def get_tags(self, *args, **kwargs):
tags = Tags.objects.all()
serializer = TagSerializer(tags, many=True)
return Response(serializers.data)
def get_object(self, pk):
try:
return Post.objects.get(pk=pk)
except Post.DoesNotExist:
raise Http404
def put(self, request, pk, format=None):
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
post = serializer.save()
for tag in request.data.get('tags'):
t = Tag.objects.get(id=tag)
post.tags.add(t)
return Response(serializer.data)
return Response(serializer.errors )
def retrieve(self, request, pk=None):
queryset = Post.objects.all()
post = get_object_or_404(queryset, pk=pk)
serializer_context = {'request': request,}
serializer = PostSerializer(post, context=serializer_context)
return Response(serializer.data)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CommentViewSet(viewsets.ViewSet):
def list(self, request):
queryset = Comment.objects.all()
serializer = CommentSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
post= get_object_or_404(Post, pk=pk)
serializer = CommentSerializer(data=request.data)
if serializer.is_valid():
comment = serializer.save()
return Response(serializer.data)
return Response(serializer.errors )
class CategoryViewSet(viewsets.ViewSet):
def list(self ,request):
queryset = Category.objects.all()
serializer_context = {
'request': request,
}
serializer = CategorySerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
def post(self, request, format=None):
post= get_object_or_404(Post, pk=pk)
serializer = CategorySerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TagViewSet(viewsets.ViewSet):
def list(self ,request):
queryset = Tag.objects.all()
serializer_context = {
'request': request,
}
serializer = TagSerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Tag.objects.all()
tag = get_object_or_404(queryset, pk=pk)
serializer_context = {
'request': request,
}
serializer = TagSerializer(tag, context=serializer_context)
return Response(serializer.data)
class HideViewSet(viewsets.ViewSet):
def hidden(self ,request):
queryset = Post.objects.filter(status='hidden')
serializer_context = {'request': request,}
serializer = PostSerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
class DraftViewSet(viewsets.ViewSet):
def draft(self ,request):
queryset = Post.objects.filter(status='draft')
serializer_context = {'request': request,}
serializer = PostSerializer(queryset, many=True, context=serializer_context)
return Response(serializer.data)
| 34.75 | 90 | 0.658697 | 4,293 | 0.908379 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.021371 |
7d5a512e475a15e2cba00eeed5fa7df50d174682 | 15,479 | py | Python | loopchain/rest_server/rest_server_rs.py | ahastudio/loopchain | 88b76956c069fedc1a0a2d239f47c3866493ad0f | [
"Apache-2.0"
]
| null | null | null | loopchain/rest_server/rest_server_rs.py | ahastudio/loopchain | 88b76956c069fedc1a0a2d239f47c3866493ad0f | [
"Apache-2.0"
]
| null | null | null | loopchain/rest_server/rest_server_rs.py | ahastudio/loopchain | 88b76956c069fedc1a0a2d239f47c3866493ad0f | [
"Apache-2.0"
]
| null | null | null | # Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for restful API server of Radio station"""
import _ssl
import base64
import json
import logging
import pickle
import ssl
from concurrent import futures
from typing import List
import grpc
from sanic import Sanic, response
from sanic.views import HTTPMethodView
from loopchain import configure as conf, utils
from loopchain.baseservice import PeerManager, PeerStatus
from loopchain.baseservice import StubManager
from loopchain.baseservice.ca_service import CAService
from loopchain.components import SingletonMetaClass
from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code
from loopchain.utils import loggers
def get_channel_name_from_args(args) -> str:
return args.get('channel', conf.LOOPCHAIN_DEFAULT_CHANNEL)
class ServerComponents(metaclass=SingletonMetaClass):
def __init__(self):
self.__app = Sanic(__name__)
self.__app.config.KEEP_ALIVE = False
# SSL 적용 여부에 따라 context 생성 여부를 결정한다.
if conf.REST_SSL_TYPE is conf.SSLAuthType.none:
self.__ssl_context = None
elif conf.REST_SSL_TYPE == conf.SSLAuthType.server_only:
self.__ssl_context = (conf.DEFAULT_SSL_CERT_PATH, conf.DEFAULT_SSL_KEY_PATH)
elif conf.REST_SSL_TYPE == conf.SSLAuthType.mutual:
self.__ssl_context = ssl.SSLContext(_ssl.PROTOCOL_SSLv23)
self.__ssl_context.verify_mode = ssl.CERT_REQUIRED
self.__ssl_context.check_hostname = False
self.__ssl_context.load_verify_locations(cafile=conf.DEFAULT_SSL_TRUST_CERT_PATH)
self.__ssl_context.load_cert_chain(conf.DEFAULT_SSL_CERT_PATH, conf.DEFAULT_SSL_KEY_PATH)
else:
utils.exit_and_msg(
f"REST_SSL_TYPE must be one of [0,1,2]. But now conf.REST_SSL_TYPE is {conf.REST_SSL_TYPE}")
@property
def app(self):
return self.__app
@property
def stub(self):
return self.__stub_to_rs_service
@property
def ssl_context(self):
return self.__ssl_context
def set_stub_port(self, port):
self.__stub_to_rs_service = StubManager(
conf.IP_LOCAL + ':' + str(port), loopchain_pb2_grpc.RadioStationStub, ssl_auth_type=conf.GRPC_SSL_TYPE
)
def set_resource(self):
self.__app.add_route(Peer.as_view(), '/api/v1/peer/<request_type:string>')
self.__app.add_route(Configuration.as_view(), '/api/v1/conf')
self.__app.add_route(Certificate.as_view(), '/api/v1/cert/<request_type:string>/<certificate_type:string>')
def get_peer_list(self, channel):
return self.__stub_to_rs_service.call(
"GetPeerList",
loopchain_pb2.CommonRequest(request="", group_id=conf.ALL_GROUP_ID, channel=channel))
def get_leader_peer(self, channel):
return self.__stub_to_rs_service.call(
"Request",
loopchain_pb2.Message(code=message_code.Request.peer_get_leader, channel=channel))
def get_peer_status(self, peer_id, group_id, channel):
return self.__stub_to_rs_service.call_in_times(
"GetPeerStatus",
loopchain_pb2.PeerID(peer_id=peer_id, group_id=group_id, channel=channel))
def get_peer_status_async(self, peer_id, group_id, channel) -> grpc.Future:
return self.__stub_to_rs_service.call_async(
"GetPeerStatus",
loopchain_pb2.PeerID(peer_id=peer_id, group_id=group_id, channel=channel))
def get_configuration(self, conf_info):
return self.__stub_to_rs_service.call(
"Request",
loopchain_pb2.Message(code=message_code.Request.rs_get_configuration, meta=conf_info))
def set_configuration(self, conf_info):
return self.__stub_to_rs_service.call(
"Request",
loopchain_pb2.Message(code=message_code.Request.rs_set_configuration, meta=conf_info))
def response_simple_success(self):
result = {
'response_code': message_code.Response.success,
'message': message_code.get_response_msg(message_code.Response.success)
}
return result
def abort_if_url_doesnt_exist(self, request_type, type_list):
result = {'response_code': message_code.Response.fail}
if request_type not in type_list.values():
result['message'] = "The resource doesn't exist"
return result
def ready(self):
async def ready_tasks():
from loopchain import loggers
loggers.get_preset().update_logger()
loggers.update_other_loggers()
logging.debug('rest_server:initialize complete.')
self.__app.add_task(ready_tasks())
def serve(self, api_port):
self.ready()
self.__app.run(host='0.0.0.0', port=api_port, debug=False, ssl=self.ssl_context)
class Peer(HTTPMethodView):
__REQUEST_TYPE = {
'PEER_LIST': 'list',
'LEADER_PEER': 'leader',
'PEER_STATUS': 'status',
'PEER_STATUS_LIST': 'status-list'
}
async def get(self, request, request_type):
# args = ServerComponents().parser.parse_args()
args = request.raw_args
channel = get_channel_name_from_args(args)
logging.debug(f'channel name : {channel}')
if request_type == self.__REQUEST_TYPE['PEER_LIST']:
grpc_response = ServerComponents().get_peer_list(channel)
peer_manager = PeerManager(channel)
peer_list_data = pickle.loads(grpc_response.peer_list)
peer_manager.load(peer_list_data, False)
all_peer_list = []
connected_peer_list = []
leader_peer_id = ""
leader_peer = peer_manager.get_leader_peer(conf.ALL_GROUP_ID, is_peer=False) # for set peer_type info to peer
if leader_peer is not None:
leader_peer_id = leader_peer.peer_id
for peer_id in peer_manager.peer_list[conf.ALL_GROUP_ID]:
peer_each = peer_manager.peer_list[conf.ALL_GROUP_ID][peer_id]
peer_data = self.__change_format_to_json(peer_each)
if peer_each.peer_id == leader_peer_id:
peer_data['peer_type'] = loopchain_pb2.BLOCK_GENERATOR
else:
peer_data['peer_type'] = loopchain_pb2.PEER
all_peer_list.append(peer_data)
if peer_each.status == PeerStatus.connected:
connected_peer_list.append(peer_data)
json_data = {
'registered_peer_count': peer_manager.get_peer_count(),
'connected_peer_count': peer_manager.get_connected_peer_count(),
'registered_peer_list': all_peer_list,
'connected_peer_list': connected_peer_list
}
result = {
'response_code': message_code.Response.success,
'data': json_data
}
elif request_type == self.__REQUEST_TYPE['PEER_STATUS_LIST']:
grpc_response = ServerComponents().get_peer_list(channel)
peer_manager = PeerManager(channel)
peer_list_data = pickle.loads(grpc_response.peer_list)
peer_manager.load(peer_list_data, False)
async_futures: List[grpc.Future] = []
for peer_id in peer_manager.peer_list[conf.ALL_GROUP_ID]:
async_future = ServerComponents().get_peer_status_async(peer_id, conf.ALL_GROUP_ID, channel)
async_futures.append(async_future)
futures.as_completed(async_futures)
all_peer_list = []
for async_future, peer_id in zip(async_futures, peer_manager.peer_list[conf.ALL_GROUP_ID]):
if async_future.exception():
logging.warning(f'RequestType({request_type}), exception({async_future.exception()})')
continue
grpc_response = async_future.result()
if grpc_response is not None and grpc_response.status != "":
peer_each = peer_manager.peer_list[conf.ALL_GROUP_ID][peer_id]
status_json = json.loads(grpc_response.status)
status_json["order"] = peer_each.order
all_peer_list.append(status_json)
json_data = {
'registered_peer_count': peer_manager.get_peer_count(),
'connected_peer_count': peer_manager.get_connected_peer_count(),
'peer_status_list': all_peer_list
}
result = {
'response_code': message_code.Response.success,
'data': json_data
}
elif request_type == self.__REQUEST_TYPE['LEADER_PEER']:
grpc_response = ServerComponents().get_leader_peer(channel)
result = dict()
result['response_code'] = grpc_response.code
if grpc_response.code == message_code.Response.success:
result['data'] = self.__change_format_to_json(pickle.loads(grpc_response.object))
else:
result['message'] = message_code.get_response_msg(grpc_response.code)
elif request_type == self.__REQUEST_TYPE['PEER_STATUS']:
peer_id = args['peer_id']
group_id = args['group_id']
if peer_id is None or group_id is None:
return self.__abort_if_arg_isnt_enough('peer_id, group_id')
# logging.debug(f"try get_peer_status peer_id({peer_id}), group_id({group_id})")
grpc_response = ServerComponents().get_peer_status(args['peer_id'], args['group_id'], channel)
result = json.loads(grpc_response.status)
else:
return ServerComponents().abort_if_url_doesnt_exist(request_type, self.__REQUEST_TYPE)
return response.json(result)
def __change_format_to_json(self, peer):
json_data = {
'order': peer.order,
'peer_id': peer.peer_id,
'group_id': peer.group_id,
'target': peer.target,
'cert': base64.b64encode(peer.cert).decode("utf-8"),
'status_update_time': str(peer.status_update_time),
'status': peer.status
}
return json_data
def __abort_if_arg_isnt_enough(self, param_name):
result = dict()
result['response_code'] = message_code.Response.fail_validate_params
result['message'] = \
message_code.get_response_msg(result['response_code']) \
+ ". You must throw all of parameters : " + param_name
return result
class Configuration(HTTPMethodView):
async def get(self, request):
# args = ServerComponents().parser.parse_args()
args = request.raw_args
if 'name' in args:
json_data = {'name': args['name']}
request_data = json.dumps(json_data)
else:
request_data = ''
grpc_response = ServerComponents().get_configuration(request_data)
result = {'response_code': grpc_response.code}
if grpc_response.meta is not "":
result['data'] = json.loads(grpc_response.meta)
else:
result['message'] = grpc_response.message
return response.json(result)
async def post(self, request):
result = dict()
request_data = request.json
try:
if request_data is None:
result['response_code'] = message_code.Response.fail
result['message'] = 'You must throw parameter of JSON when you call (/api/v1/conf) by post method.'
else:
grpc_response = ServerComponents().set_configuration(json.dumps(request_data))
result = {
'response_code': grpc_response.code,
'message': message_code.get_response_msg(message_code.Response.success)
}
except ValueError as e:
result['response_code'] = message_code.Response.fail
result['message'] = str(e)
return response.json(result)
class Certificate(HTTPMethodView):
__REQUEST_TYPE = {
'CERT_LIST': 'list',
'ISSUE': 'issue'
}
__CERTIFICATE_TYPE = {
'CA': 'ca',
'PEER': 'peer'
}
_DEFAULT_PATH = "resources/testcerts/"
_DEFAULT_COMMON_NAME = "Test CA"
_DEFAULT_ORGANIZATION_UNIT = "DEV"
_DEFAULT_ORGANIZATION = "THeLoop"
_DEFAULT_COUNTRY = "kr"
_DEFAULT_PERIOD = 5
async def get(self, request, request_type, certificate_type):
ca = CAService(self._DEFAULT_PATH, None)
result = dict()
if request_type == self.__REQUEST_TYPE['CERT_LIST']:
if certificate_type == self.__CERTIFICATE_TYPE['CA']:
certificate = ca.get_ca_certificate()
result['response_code'] = message_code.Response.success
result['data'] = ca.get_certificate_json(certificate)
elif certificate_type == self.__CERTIFICATE_TYPE['PEER']:
certificate = ca.get_peer_certificate_list()
cert_json = []
for cert_key in certificate:
cert_peer = ca.get_peer_certificate(cert_key)
cert_json.append(ca.get_certificate_json(cert_peer))
result['response_code'] = message_code.Response.success
result['data'] = cert_json
else:
return ServerComponents().abort_if_url_doesnt_exist(certificate_type, self.__CERTIFICATE_TYPE)
elif request_type == self.__REQUEST_TYPE['ISSUE']:
if certificate_type == self.__CERTIFICATE_TYPE['CA']:
ca.generate_ca_cert(
cn=self._DEFAULT_COMMON_NAME,
ou=self._DEFAULT_ORGANIZATION_UNIT,
o=self._DEFAULT_ORGANIZATION,
expire_period=self._DEFAULT_PERIOD,
password=None
)
return ServerComponents().response_simple_success()
elif certificate_type == self.__CERTIFICATE_TYPE['PEER']:
if ca.is_secure is False:
return self.__abort_if_CA_certificate_loading_fails()
else:
ca.generate_peer_cert(self._DEFAULT_COMMON_NAME, None)
return ServerComponents().response_simple_success()
else:
return ServerComponents().abort_if_url_doesnt_exist(certificate_type, self.__CERTIFICATE_TYPE)
else:
return ServerComponents().abort_if_url_doesnt_exist(request_type, self.__REQUEST_TYPE)
return response.json(result)
def __abort_if_CA_certificate_loading_fails(self):
result = {
'response_code': message_code.Response.fail,
'message': 'Fail loading of CA certificate.'
}
return response.json(result)
| 38.600998 | 122 | 0.640481 | 14,163 | 0.913094 | 0 | 0 | 194 | 0.012507 | 8,668 | 0.558829 | 2,447 | 0.157759 |
7d5ba93142fb8ff5765303ca6b3001d2cd9dccdf | 10,178 | py | Python | ceilometer/tests/storage/test_impl_sqlalchemy.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
]
| null | null | null | ceilometer/tests/storage/test_impl_sqlalchemy.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
]
| null | null | null | ceilometer/tests/storage/test_impl_sqlalchemy.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
]
| null | null | null | #
# Author: John Tran <[email protected]>
# Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_sqlalchemy.py
.. note::
In order to run the tests against real SQL server set the environment
variable CEILOMETER_TEST_SQL_URL to point to a SQL server before running
the tests.
"""
import datetime
import repr
import mock
from ceilometer.alarm.storage import impl_sqlalchemy as impl_sqla_alarm
from ceilometer.openstack.common import timeutils
from ceilometer.storage import impl_sqlalchemy
from ceilometer.storage import models
from ceilometer.storage.sqlalchemy import models as sql_models
from ceilometer.tests import base as test_base
from ceilometer.tests import db as tests_db
from ceilometer.tests.storage import test_storage_scenarios as scenarios
@tests_db.run_with('sqlite')
class CeilometerBaseTest(tests_db.TestBase):
def test_ceilometer_base(self):
base = sql_models.CeilometerBase()
base['key'] = 'value'
self.assertEqual('value', base['key'])
@tests_db.run_with('sqlite')
class TraitTypeTest(tests_db.TestBase):
# TraitType is a construct specific to sqlalchemy.
# Not applicable to other drivers.
def test_trait_type_exists(self):
tt1 = self.conn._get_or_create_trait_type("foo", 0)
self.assertTrue(tt1.id >= 0)
tt2 = self.conn._get_or_create_trait_type("foo", 0)
self.assertEqual(tt2.id, tt1.id)
self.assertEqual(tt2.desc, tt1.desc)
self.assertEqual(tt2.data_type, tt1.data_type)
def test_new_trait_type(self):
tt1 = self.conn._get_or_create_trait_type("foo", 0)
self.assertTrue(tt1.id >= 0)
tt2 = self.conn._get_or_create_trait_type("blah", 0)
self.assertNotEqual(tt1.id, tt2.id)
self.assertNotEqual(tt1.desc, tt2.desc)
# Test the method __repr__ returns a string
self.assertTrue(repr.repr(tt2))
def test_trait_different_data_type(self):
tt1 = self.conn._get_or_create_trait_type("foo", 0)
self.assertTrue(tt1.id >= 0)
tt2 = self.conn._get_or_create_trait_type("foo", 1)
self.assertNotEqual(tt1.id, tt2.id)
self.assertEqual(tt2.desc, tt1.desc)
self.assertNotEqual(tt1.data_type, tt2.data_type)
# Test the method __repr__ returns a string
self.assertTrue(repr.repr(tt2))
@tests_db.run_with('sqlite')
class EventTypeTest(tests_db.TestBase):
# EventType is a construct specific to sqlalchemy
# Not applicable to other drivers.
def test_event_type_exists(self):
et1 = self.conn._get_or_create_event_type("foo")
self.assertTrue(et1.id >= 0)
et2 = self.conn._get_or_create_event_type("foo")
self.assertEqual(et2.id, et1.id)
self.assertEqual(et2.desc, et1.desc)
def test_event_type_unique(self):
et1 = self.conn._get_or_create_event_type("foo")
self.assertTrue(et1.id >= 0)
et2 = self.conn._get_or_create_event_type("blah")
self.assertNotEqual(et1.id, et2.id)
self.assertNotEqual(et1.desc, et2.desc)
# Test the method __repr__ returns a string
self.assertTrue(repr.repr(et2))
class MyException(Exception):
pass
@tests_db.run_with('sqlite')
class EventTest(tests_db.TestBase):
def test_string_traits(self):
model = models.Trait("Foo", models.Trait.TEXT_TYPE, "my_text")
trait = self.conn._make_trait(model, None)
self.assertEqual(models.Trait.TEXT_TYPE, trait.trait_type.data_type)
self.assertIsNone(trait.t_float)
self.assertIsNone(trait.t_int)
self.assertIsNone(trait.t_datetime)
self.assertEqual("my_text", trait.t_string)
self.assertIsNotNone(trait.trait_type.desc)
def test_int_traits(self):
model = models.Trait("Foo", models.Trait.INT_TYPE, 100)
trait = self.conn._make_trait(model, None)
self.assertEqual(models.Trait.INT_TYPE, trait.trait_type.data_type)
self.assertIsNone(trait.t_float)
self.assertIsNone(trait.t_string)
self.assertIsNone(trait.t_datetime)
self.assertEqual(100, trait.t_int)
self.assertIsNotNone(trait.trait_type.desc)
def test_float_traits(self):
model = models.Trait("Foo", models.Trait.FLOAT_TYPE, 123.456)
trait = self.conn._make_trait(model, None)
self.assertEqual(models.Trait.FLOAT_TYPE, trait.trait_type.data_type)
self.assertIsNone(trait.t_int)
self.assertIsNone(trait.t_string)
self.assertIsNone(trait.t_datetime)
self.assertEqual(123.456, trait.t_float)
self.assertIsNotNone(trait.trait_type.desc)
def test_datetime_traits(self):
now = datetime.datetime.utcnow()
model = models.Trait("Foo", models.Trait.DATETIME_TYPE, now)
trait = self.conn._make_trait(model, None)
self.assertEqual(models.Trait.DATETIME_TYPE,
trait.trait_type.data_type)
self.assertIsNone(trait.t_int)
self.assertIsNone(trait.t_string)
self.assertIsNone(trait.t_float)
self.assertEqual(now, trait.t_datetime)
self.assertIsNotNone(trait.trait_type.desc)
def test_bad_event(self):
now = datetime.datetime.utcnow()
m = [models.Event("1", "Foo", now, []),
models.Event("2", "Zoo", now, [])]
with mock.patch.object(self.conn, "_record_event") as mock_save:
mock_save.side_effect = MyException("Boom")
problem_events = self.conn.record_events(m)
self.assertEqual(2, len(problem_events))
for bad, event in problem_events:
self.assertEqual(bad, models.Event.UNKNOWN_PROBLEM)
def test_get_none_value_traits(self):
model = sql_models.Trait(None, None, 5)
self.assertIsNone(model.get_value())
self.assertTrue(repr.repr(model))
def test_event_repr(self):
ev = sql_models.Event('msg_id', None, False)
ev.id = 100
self.assertTrue(repr.repr(ev))
@tests_db.run_with('sqlite')
class RelationshipTest(scenarios.DBTestBase):
# Note: Do not derive from SQLAlchemyEngineTestBase, since we
# don't want to automatically inherit all the Meter setup.
@mock.patch.object(timeutils, 'utcnow')
def test_clear_metering_data_meta_tables(self, mock_utcnow):
mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45)
self.conn.clear_expired_metering_data(3 * 60)
session = self.conn._engine_facade.get_session()
meta_tables = [sql_models.MetaText, sql_models.MetaFloat,
sql_models.MetaBigInt, sql_models.MetaBool]
for table in meta_tables:
self.assertEqual(0, (session.query(table)
.filter(~table.id.in_(
session.query(sql_models.Sample.id)
.group_by(sql_models.Sample.id))).count()
))
class CapabilitiesTest(test_base.BaseTestCase):
# Check the returned capabilities list, which is specific to each DB
# driver
def test_capabilities(self):
expected_capabilities = {
'meters': {'pagination': False,
'query': {'simple': True,
'metadata': True,
'complex': False}},
'resources': {'pagination': False,
'query': {'simple': True,
'metadata': True,
'complex': False}},
'samples': {'pagination': True,
'groupby': True,
'query': {'simple': True,
'metadata': True,
'complex': True}},
'statistics': {'pagination': False,
'groupby': True,
'query': {'simple': True,
'metadata': True,
'complex': False},
'aggregation': {'standard': True,
'selectable': {
'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True}}
},
'events': {'query': {'simple': True}}
}
actual_capabilities = impl_sqlalchemy.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_alarm_capabilities(self):
expected_capabilities = {
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
}
actual_capabilities = impl_sqla_alarm.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self):
expected_capabilities = {
'storage': {'production_ready': True},
}
actual_capabilities = (impl_sqlalchemy.
Connection.get_storage_capabilities())
self.assertEqual(expected_capabilities, actual_capabilities)
| 40.070866 | 78 | 0.610434 | 8,677 | 0.852525 | 0 | 0 | 6,111 | 0.600413 | 0 | 0 | 1,980 | 0.194537 |
7d5e7f22dbe1241e9828565a5008c4bed0402c69 | 17,906 | py | Python | ProjectManager.py | kojingharang/ManagerKit | 6efb9b2290b62e0bd3fe88eb3dc814d066f72f02 | [
"MIT"
]
| null | null | null | ProjectManager.py | kojingharang/ManagerKit | 6efb9b2290b62e0bd3fe88eb3dc814d066f72f02 | [
"MIT"
]
| null | null | null | ProjectManager.py | kojingharang/ManagerKit | 6efb9b2290b62e0bd3fe88eb3dc814d066f72f02 | [
"MIT"
]
| null | null | null | from collections import namedtuple
import datetime
import pprint
import sys
import copy
import json
def expandStatusValue(v):
"""
v : string | (string, datetime.date | None)
が string だった場合 (string, None) に展開する.
"""
if isinstance(v, str):
v = (v, None)
return v
def formatDate(d):
if not d:
return "????-??-??"
return "{0:%Y-%m-%d}".format(d)
"""
title:
プロジェクト名
url:
プロジェクトページ
owner:
主担当
status:
"" : 未着手
"o" : 作業中
"v" : 完了
startDate:
着手開始日
"" | "yyyy-mm-dd"
endDate
完了日
blocking:
着手できない理由
doc:
メモ
milestones:
(finishDate : datetime.date | None, title : string)[]
"""
class Project:
def __init__(self, codeName="", title="", url="", owner="", priority=100, status={}, days=0,
startDate=None, endDate=None, blocking="", doc="", milestones=[], epic=""):
self.index = 0
self.codeName = codeName
self.title = title
self.url = url
self.owner = owner
self.orig_owner = owner
self.priority = priority
self.status = dict([(k, expandStatusValue(v)) for k, v in status.items()])
# pprint.pprint(self.status)
self.days = days
self.startDate = startDate
self.endDate = endDate
self.doc = doc
self.blocking = blocking
self.put = False
self.milestones = milestones
self.epic = epic
def isDone(self):
return self.status["End"][0]=="v"
def doing(self):
sd = self.startDate
if sd is None:
sd = datetime.date(3000, 1, 1)
ed = self.endDate
if ed is None:
ed = datetime.date(3000, 1, 1)
now = datetime.date.today()
return sd <= now and now <= ed
def fixed(self):
return self.owner != "" and self.startDate is not None and self.endDate is not None
def getMilestones(self, status_master):
"""
return (datetime.date, label)[]
"""
sm = dict(status_master)
rv = [ (v[1], self.title+" "+sm[k]+" (主担当: "+self.owner+")") for k, v in self.status.items() ] + self.milestones
return list(filter(lambda v: v[0], rv))
colorDone = "#DDFADE"
colorDoing = "#E0F0FF"
def hsv2rgb(hsv):
"""
hsv: [h, s, v]
h in [0, 360]
s in [0, 1]
v in [0, 1]
return [r, g, b]
r, g, b in [0, 1]
"""
h = hsv[0]
s = hsv[1]
v = hsv[2]
hd = h/60; # in [0, 6]
r = v
g = v
b = v
if s > 0:
hdi = max(0, min(5, int(hd)));
f = hd - hdi
if hdi==0:
g *= 1 - s * (1-f)
b *= 1 - s
elif hdi==1:
r *= 1 - s * f
b *= 1 - s
elif hdi==2:
r *= 1 - s
b *= 1 - s * (1-f)
elif hdi==3:
r *= 1 - s
g *= 1 - s * f
elif hdi==4:
r *= 1 - s * (1-f)
g *= 1 - s
elif hdi==5:
g *= 1 - s
b *= 1 - s * f
return [r, g, b]
def rgb2hex(rgb):
return "#%02x%02x%02x" % (int(rgb[0]*255), int(rgb[1]*255), int(rgb[2]*255))
def statusCell(st, name, label):
s, endDate = st[name]
col = ""
if s=="v":
col = colorDone
if s=="o":
col = colorDoing
style=""
if col:
style = "background-color: {col};".format(**vars())
text = " "
if endDate:
tentative = "<br>(仮)" if datetime.date.today() <= endDate else ""
text = "<span style='font-size: 0.7em;'>{endDate.year:04}-{endDate.month:02}-{endDate.day:02}{tentative}</span>".format(**vars())
return """<td style="{style}">{text}</td>""".format(**vars())
def genProjectListHtml(projects, status_master, ticketLinkFun, additional_milestones, getLabels):
"""
getLabels: index:int, project -> label[]
"""
### Generate milestone list
# milestones: (datetime.date, label)[]
milestones = sum([ p.getMilestones(status_master) for p in projects], []) + additional_milestones
milestones = sorted(milestones)
s = []
for d, l in milestones:
color = "black" if datetime.date.today() <= d else "#c0c0c0"
tentative = " (仮)" if datetime.date.today() <= d else ""
s.append("<li style='color:"+color+"'>"+formatDate(d)+tentative+" "+l+"</li><br>")
s = "\n".join(s)
html = """
<ul>
<li>今後のマイルストーン一覧</li>
<ul>
{s}
</ul>
</ul>
<div id="filters">フィルタ (AND): </div>
""".format(**vars())
### Generate project list
def sortFun(v):
return v.priority + (1000 if v.isDone() else 0) + (500 if v.blocking else 0)
projects = sorted(projects, key=sortFun)
statusTitles = "".join([ """<td style="width: 5%;">{label}</td>""".format(**vars()) for name, label in status_master])
html += """
<html><body><table class="projects">
<tr class="title">
<td style="width: 5%;">番号</td>
<td style="width: 5%;">優先度</td>
<td>プロジェクト名</td>
{statusTitles}
<td style="width: 5%;">主担当</td>
<td style="width: 10%;">メモ</td>
<td style="width: 10%;">作業期間(予定)</td>
</tr>
""".format(**vars())
labels = {}
for i, p in enumerate(projects):
if p.startDate:
startS = "{0:%Y-%m-%d}".format(p.startDate)
endS = "{0:%Y-%m-%d}".format(p.endDate)
schedule = "{startS}<br>〜{endS}".format(**vars())
if p.isDone():
schedule = ""
title = p.title
if p.url:
title = """<a href="{p.url}">{title}</a>""".format(**vars())
# status = StatusDetail(p.status)
statusTitles = "".join([ statusCell(p.status, name, label) for name, label in status_master])
trCol = "white" if i%2==0 else "#f0f0f0"
schedule_bg = "background-color: "+colorDoing+";" if p.doing() else ""
index = i+1
owner_note = ""
doc_note = ""
if p.orig_owner=="":
owner_note = "(仮)"
doc_note = "(TODO 主担当決め)"
tasks = ""
if p.epic:
link = ticketLinkFun(p.epic)
style = """background-color: darkgreen; color: white; text-decoration: none; font-size: 0.8em; padding: 4px; border-radius: 10px;"""
tasks = """<a href="{link}" target="_blank" style="{style}">Tasks</a>""".format(**vars())
odd = "odd" if i%2==0 else ""
id = "project%04d" % i
labels[id] = getLabels(i, p)
html += """
<tr style="background-color: {trCol}" id="{id}">
<td>{index}</td>
<td>{p.priority}</td>
<td>
<a name="{p.codeName}"></a>
<span style="font-size: 0.8em; font-weight: bold; color: #5050c0;">
<a style="text-decoration: none;" href="#{p.codeName}">{p.codeName}</a>
</span>
{tasks}<br>
{title}
</td>
{statusTitles}
<td>{p.owner}{owner_note}</td>
<td>{p.doc}{doc_note}<span style="color: red;">{p.blocking}</span></td>
<td style="font-size: 0.5em;{schedule_bg}">{schedule}</td>
</tr>
""".format(**vars())
html += """
</table></body></html>
"""
return html, labels
def Xsect(p0, p1):
# return Xsect(p0.startDate, p0.endDate, p1.startDate, p1.endDate)
if any([ v is None for v in [p0.startDate, p0.endDate, p1.startDate, p1.endDate]]):
return False
return not (p1.endDate < p0.startDate or p0.endDate < p1.startDate)
#def Xsect(s0, e0, s1, e1):
# return not (e1 < s0 or e0 < s1)
def dupCheck(p, projects):
"""
重複してなければ True を返す.
"""
if p.isDone():
return True
if not p.fixed():
return True
for pp in projects:
if pp.fixed() and not pp.isDone() and p.owner==pp.owner and p.title != pp.title:
if Xsect(p, pp):
print("[CONFLICT]", p.title, p.startDate, p.endDate, p.owner, "AND", pp.title, pp.startDate, pp.endDate, pp.owner)
return False
return True
def isClone(name):
"""
クローンかどうか.
クローンには明示的なプロジェクト割り当てしかできない.
"""
return any([str(i) in name for i in range(10)])
def assign(projects, people):
"""
return
Dict
person -> project[]
"""
# 担当者に割り当てた上で各PJがいつ終わるかというスケジュール表(担当者 x PJの表)
# TODO startDate がきまってるやつを最初に置く
# 担当者 -> 着手可能日付
freeDates = dict([(p, datetime.date.min) for p, _ in people])
# owner -> {startDate, project}[]
schedule = {}
"""
startDateFixed
開始日がきまってるやつを置く
canStart
開始日がきまってないやつを置く
blocking
開始できないやつを置く
"""
for phase in ["startDateFixed", "canStart", "blocking"]:
print("\nPhase", phase, "\n")
if phase=="canStart":
for k in freeDates:
freeDates[k] = max(freeDates[k], datetime.date.today())
for i, p in enumerate(sorted(projects, key=lambda v: (v.priority, v.title))):
if phase!="blocking" and p.blocking:
continue
if phase=="startDateFixed" and p.startDate is None:
continue
if p.isDone():
continue
if p.put:
continue
print("Try to put", p.title)
def filterFun(name):
pp = copy.deepcopy(p)
pp.owner = name
return dupCheck(pp, projects)
def getFreePerson(freeDates):
cands = sorted([ kv for kv in freeDates.items() if not isClone(kv[0]) and filterFun(kv[0]) ], key=lambda v: (v[1], v[0]))
print(cands)
return cands[0][0]
person = p.owner
if person=="":
person = getFreePerson(freeDates)
# print(person)
origStartDate = p.startDate
origEndDate = p.endDate
if p.blocking:
# Later
p.startDate = datetime.date.today() + datetime.timedelta(365*3+i*30)
p.endDate = p.startDate + datetime.timedelta(30)
if p.startDate is None:
p.startDate = freeDates[person]
if p.endDate is None:
p.endDate = p.startDate + datetime.timedelta(90)
if not dupCheck(p, projects):
p.startDate = origStartDate
p.endDate = origEndDate
# continue
sys.exit(0)
schedule.setdefault(person, [])
p.owner = person
print("Put", p.title, p.startDate, p.endDate, person)
schedule[person].append(p)
p.put = True
freeDates[person] = max(freeDates[person], p.endDate + datetime.timedelta(1))
#pprint.pprint(freeDates)
# pprint.pprint(schedule)
# for p in projects:
# print("[]", p.title, p.startDate, p.endDate)
for p in projects:
if not p.isDone():
for pp in projects:
if not pp.isDone() and p.title != pp.title and p.owner==pp.owner and p.title < pp.title:
if Xsect(p, pp):
print("[CONFLICT]", p.title, p.startDate, p.endDate, p.owner, "AND", pp.title, pp.startDate, pp.endDate, pp.owner)
return schedule
def genScheduleHtml(projects, schedule, people, ticketLinkFun):
"""
schedule
Dict
person -> project[]
"""
# date x 担当者
allDates = [ d for ps in schedule.values() for p in ps for d in [p.startDate, p.endDate]]
minDate = min(allDates)
maxDate = max(allDates)
colors = [ rgb2hex(hsv2rgb([i/len(projects)*360, 0.1, 1])) for i in range(len(projects)) ]
startDateIndex = minDate.toordinal()
endDateIndex = maxDate.toordinal()
N = endDateIndex - startDateIndex + 1
# print(N)
def createRow():
return [ ["", ""] for _ in range(len(people)+1) ]
table = {0: createRow()}
# 定期
for i in range(10000):
d = minDate + datetime.timedelta(i)
if maxDate < d:
break
if d.day in [1, 15, 30]:
table.setdefault(d.toordinal(), createRow())
wp = 95/len(people)
# プロジェクト設置
for i, (person, ps) in enumerate(sorted(schedule.items())):
if person not in [p for p, _ in people]:
continue
for p in ps:
# print(p.startDate, p.endDate)
si = p.startDate.toordinal()
ei = p.endDate.toordinal()
for d in [si, ei]:
table.setdefault(d, createRow())
if d==si:
title = p.title
if p.url:
title = """
<a href="{p.url}">{title}</a>
""".format(**vars())
title += "<br>"
doc = p.doc.replace("\n", "<br>")
title += """
<span style="font-size: 0.8em;">{doc}</span>""".format(**vars())
title += """<br><span style="color: red;">{p.blocking}</span>""".format(**vars())
table[d][i+1][0] = title
table[d][i+1][1] = "font-size: 1em;"
# 色塗り
for i, (person, ps) in enumerate(sorted(schedule.items())):
for p in ps:
si = p.startDate.toordinal()
ei = p.endDate.toordinal()
for d in sorted(table.keys()):
if si <= d and d <= ei:
col = colors[p.index]
table[d][i+1][1] += "width: {wp}%; background-color: {col};".format(**vars())
# 日付
today = datetime.date.today()
for d in table:
if d==0:
continue
da = datetime.date.fromordinal(d)
s = "{0:%Y-%m-%d}".format(da)
col = "white" if da.month % 2==0 else "#e0e0e0"
if da.year==today.year and da.month==today.month:
col = "#c0ffff"
style = "vertical-align: top; width: 5%; font-size: 3px; background-color: "+col+";"
table[d][0] = [s, style]
table = [ table[k] for k in sorted(table.keys()) ]
# pprint.pprint(table)
def createHeader():
"""
メンバー見出しを生成
"""
row = [["", ""]]
for i, (person, ps) in enumerate(sorted(schedule.items())):
row.append([person, "width: %f; background-color: #e0e0e0".format(**vars())])
return row
for i in range(0, len(table), 10):
table.insert(i, createHeader())
def tableToHtml(table):
html = "<table class='schedule'>"
for row in table:
html += "<tr>"
for text, style in row:
html += "<td style='{style}'>{text}</td>".format(**vars())
html += "</tr>"
html += "</table>"
return html
return tableToHtml(table)
######################
def createTasksHtml(titleAndEpics, members, ticketLinkFun):
def entry(label, url):
return """<a href="{url}" target="main_frame">{label}</a>""".format(**vars())
epics = [ epic for _, epic in titleAndEpics ]
epicHtml = " ".join([ entry(title, ticketLinkFun(epic)) for title, epic in titleAndEpics ])
memberHtml = " ".join([ entry(name, ticketLinkFun("", name)) for name in members ])
memberNotInEpicsHtml = " ".join([ entry(name, ticketLinkFun("", name, "", epics)) for name in members ])
notInEpicsHtml = entry("管理Epicに関連付けられてないチケット", ticketLinkFun("", "", "", epics))
html = """
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1, user-scalable=no">
</head>
<frameset rows="100px,*" frameborder=1 border=1>
<frame name="menu_frame" src="menu.html">
<frame name="main_frame" src="">
</frameset>
</html>
""".format(**vars())
filename = "tasks.html"
with open(filename, "w") as f:
print(html, file=f)
print("[ProjectManager.createTasksHtml] OK. Wrote", filename)
html = """
<html>
<head>
<meta charset="UTF-8">
</head>
<body style="margin: 0; font-size: 0.7em;">
Projects : {epicHtml}<br>
Members : {memberHtml}<br>
Members (管理Epicに関連付けられてないチケット): {memberNotInEpicsHtml}<br>
{notInEpicsHtml}<br>
</body>
</html>
""".format(**vars())
filename = "menu.html"
with open(filename, "w") as f:
print(html, file=f)
print("[ProjectManager.createTasksHtml] OK. Wrote", filename)
######################
def run(projects, people, status_master, ticketLinkFun, css="", project_list_header="", schedule_header="",
statusFilename="status.html",
tasksFilename="tasks.html",
additional_milestones=[],
getLabels=lambda i, p: []):
"""
people:
(Name, NameInTicketSystem)[]
ticketLinkFun:
epic : string, assignee : string, label : string -> url : string
milestones:
(datetime.date, label)[]
"""
codeNames = {}
for p in projects:
codeNames.setdefault(p.codeName, 0)
codeNames[p.codeName] += 1
bad = False
for k, v in codeNames.items():
if 1 < v:
print("[ERROR] Duplicate code name:", k, "(", v, "projects)")
bad = True
if bad:
print()
return
for i, p in enumerate(projects):
p.index = i
names = [ name for name, _ in people ]
if p.owner and p.owner not in names:
people.append((p.owner, ""))
people = list(set(people))
schedule = assign(projects, people)
projectsHtml, labels = genProjectListHtml(projects, status_master, ticketLinkFun, additional_milestones, getLabels)
scheduleHtml = genScheduleHtml(projects, schedule, people, ticketLinkFun)
css = """
body {
margin: 0;
}
h1 {
font-size: 1.2em;
background-color: darkgreen;
color: white;
padding: 10px;
}
table {
border-spacing: 1;
margin-left: 20px;
}
table.projects tr.title td {
color: white;
padding: 5px;
}
table.projects tr.title {
background-color: darkgreen;
}
table.example tr td {
margin: 20px;
font-size: 0.9em;
}
table.schedule {
border-spacing: 0;
}
table.schedule tr td {
padding: 0;
}
#filters {
padding: 20px;
}
span.filter {
cursor: pointer;
padding: 20px;
border-radius: 40px;
margin: 10px;
}
""" + css
example = """
<table class="example"><tr>
<td style="background-color: white;">未着手</td>
<td style="background-color: {colorDoing};">作業中</td>
<td style="background-color: {colorDone};">完了</td>
</tr></table>
""".format(**globals())
projectLabels = json.dumps(labels)
labelsMaster = getLabels(0, None)
filters = json.dumps([ name for name, label in labelsMaster ])
filterLabels = json.dumps([ label for name, label in labelsMaster ])
vs = """
// Master data
var filters = {filters};
var filterLabels = {filterLabels};
var projectLabels = {projectLabels};
""".format(**vars())
ready = vs + """
// フィルタ状態: name -> bool
var filterEnabled = {};
// フィルタ状態を反映
function applyFilters() {
Object.keys(projectLabels).forEach(function(eid) {
var labels = projectLabels[eid];
// console.log(eid, labels);
var show = true;
// Check all enabled filters are in labels
for(var fi=0;fi<filters.length;fi++) {
if(filterEnabled[filters[fi]]) {
var lok = 0;
for(var li=0;li<labels.length;li++) {
if(labels[li] == filters[fi]) lok=1;
}
if(!lok) show=false;
}
}
// console.log(show);
$("#"+eid).toggle(show);
});
for(var i=0;i<filters.length;i++) {
$(".filter#"+filters[i]).css({"background-color": filterEnabled[filters[i]] ? "#aaffaa" : "#eeeeee"});
}
// console.log(filterEnabled);
}
$(document).ready(function(){
// フィルタボタンを作る
var html = "";
for(var i=0;i<filters.length;i++) {
var name = filters[i];
html += '<span class="filter" id="'+name+'">'+filterLabels[i]+'</span>';
}
$("#filters").html($("#filters").html() + html);
// フィルタの適用切り替え
$(".filter").on("click", function(event) {
var name = $(event.target).attr("id");
filterEnabled[name] = !filterEnabled[name];
applyFilters();
});
applyFilters();
});
"""
html = """
<html>
<head>
<meta charset="utf-8" />
<script type="text/javascript" src="jquery-3.2.1.min.js"></script>
<style>
{css}
</style>
<script>
{ready}
</script>
</head>
<body>
{project_list_header}
<br><br>
{example}
<br><br>
{projectsHtml}
<br><br>
{schedule_header}
{scheduleHtml}
<hr>
<a href="https://github.com/kojingharang/ManagerKit/blob/master/ProjectManager.py">Source</a>
</body>
</html>
""".format(**vars())
with open(statusFilename, "w") as f:
print(html, file=f)
print("[ProjectManager.run] OK. Wrote", statusFilename)
titleAndEpics = [(p.title, p.epic) for p in sorted(projects, key=lambda p: p.priority) if p.epic and not p.isDone()]
members = [ name for _, name in people if name]
createTasksHtml(titleAndEpics, members, ticketLinkFun)
| 24.629986 | 135 | 0.627667 | 1,300 | 0.069825 | 0 | 0 | 0 | 0 | 0 | 0 | 7,969 | 0.428027 |
7d60c0b18a3d86b57134273bbd22d9fd56431efb | 18,643 | py | Python | asteroids/whatsobservable.py | mcnowinski/various-and-sundry | ec0038d52f43435a45bf4fd1975315ad08fce560 | [
"MIT"
]
| 2 | 2016-09-29T09:24:22.000Z | 2021-01-15T06:11:04.000Z | asteroids/whatsobservable.py | mcnowinski/various-and-sundry | ec0038d52f43435a45bf4fd1975315ad08fce560 | [
"MIT"
]
| null | null | null | asteroids/whatsobservable.py | mcnowinski/various-and-sundry | ec0038d52f43435a45bf4fd1975315ad08fce560 | [
"MIT"
]
| null | null | null | import datetime
import ephem
import os.path
import os
import numpy as np
import pdb
from pandas import DataFrame
__version__ = '0.1.2'
class Error(Exception):
pass
def _convert_datetime_to_pyephem_date_string(in_datetime):
return in_datetime.strftime('%Y/%m/%d %H:%M:%S')
def _find_cached_file(filename):
base = os.path.expanduser('~/')
# Look in a few likely locations before doing a giant search
filenames_to_test = [os.path.join(base, filename),
os.path.join(base, 'refdata', filename),
os.path.join(base, 'Dropbox', filename),
os.path.join(base, 'Dropbox', 'refdata', filename)]
for cur_filename in filenames_to_test:
if os.path.isfile(cur_filename):
return cur_filename
# didn't find it, so do a giant search
for root, dirs, files in os.walk(base):
if filename in files:
return os.path.join(root, filename)
return "File Not Found"
def get_latlon_from_observatory_code(code):
if type(code) is int:
code = '%03i' % code
elif type(code) is str:
code = code[:3] # trim any remainder, like @399
try:
obscode_filename = _find_cached_file('ObsCodes.html')
# TODO: add a verbose option to print path to ObsCodes.html
obscodes = open(obscode_filename, 'r').read().splitlines()
except:
raise Error("Problem reading ObsCodes.html file from disk. \n"
"Most likely you need to go download a copy from: \n"
" http://www.minorplanetcenter.net/iau/lists/ObsCodes.html")
curobsline = [a for a in obscodes if a.startswith(code)][0]
output = {'obscode':curobsline[0:3],
'longitude':float(curobsline[4:13]),
'cos':float(curobsline[13:21]),
'sin':float(curobsline[21:30]),
'name':curobsline[30:].strip()}
# From the documentation:
# "The following list gives the observatory code, longitude (in degrees east of Greenwich) and the parallax
# constants (rho cos phi' and rho sin phi', where phi' is the geocentric latitude and rho is the geocentric
# distance in earth radii) for each observatory. It is updated nightly."
output['latitude'] = np.degrees(np.arctan2(output['sin'], output['cos']))
# Unsure where the following comment came from:
# geocentric distance in earth radii:
# output['sin']/np.sin(np.radians(output['latitude']))
# NOTE: while ObsCodes.html is clear about being geocentric, it is unclear what pyephem wants, so blaze ahead
# TODO: confirm whether pyephem wants geocentric
return output
def pack_mpc_date(in_datetime):
"""
Convert a datetime.date or datetime.datetime object into the MPC packed date format, as described at:
http://www.minorplanetcenter.net/iau/info/PackedDates.html
Copy of the packing definition from the above web page:
Packed Dates
Dates of the form YYYYMMDD may be packed into five characters to conserve space.
The first two digits of the year are packed into a single character in column 1 (I = 18, J = 19, K = 20). Columns 2-3 contain the last two digits of the year. Column 4 contains the month and column 5 contains the day, coded as detailed below:
Month Day Character Day Character
in Col 4 or 5 in Col 4 or 5
Jan. 1 1 17 H
Feb. 2 2 18 I
Mar. 3 3 19 J
Apr. 4 4 20 K
May 5 5 21 L
June 6 6 22 M
July 7 7 23 N
Aug. 8 8 24 O
Sept. 9 9 25 P
Oct. 10 A 26 Q
Nov. 11 B 27 R
Dec. 12 C 28 S
13 D 29 T
14 E 30 U
15 F 31 V
16 G
Examples:
1996 Jan. 1 = J9611
1996 Jan. 10 = J961A
1996 Sept.30 = J969U
1996 Oct. 1 = J96A1
2001 Oct. 22 = K01AM
This system can be extended to dates with non-integral days. The decimal fraction of the day is simply appended to the five characters defined above.
Examples:
1998 Jan. 18.73 = J981I73
2001 Oct. 22.138303 = K01AM138303
"""
if in_datetime.year >= 1800 and in_datetime.year < 1900:
century = 'I'
elif in_datetime.year >= 1900 and in_datetime.year < 2000:
century = 'J'
elif in_datetime.year >= 2000 and in_datetime.year < 2100:
century = 'K'
else:
raise Error("Year is not within 1800-2099: " + in_datetime.isoformat())
year = in_datetime.strftime('%y')
translate = {}
for i in range(10):
translate[i] = str(i)
for i in range(10,32):
translate[i] = chr(ord('A') + i - 10)
month = translate[in_datetime.month]
day = translate[in_datetime.day]
try:
decimaldays = ('%7.5f' % ((in_datetime.hour + (in_datetime.minute / 60.) + (in_datetime.second / 3600.)) / 24.))[2:]
except:
decimaldays = ''
return century + year + month + day + decimaldays
def unpack_mpc_date(in_packed):
"""
Convert a MPC packed date format (as described below) to a datetime.date or datetime.datetime object
http://www.minorplanetcenter.net/iau/info/PackedDates.html
Copy of the packing definition from the above web page:
Packed Dates
Dates of the form YYYYMMDD may be packed into five characters to conserve space.
The first two digits of the year are packed into a single character in column 1 (I = 18, J = 19, K = 20). Columns 2-3 contain the last two digits of the year. Column 4 contains the month and column 5 contains the day, coded as detailed below:
Month Day Character Day Character
in Col 4 or 5 in Col 4 or 5
Jan. 1 1 17 H
Feb. 2 2 18 I
Mar. 3 3 19 J
Apr. 4 4 20 K
May 5 5 21 L
June 6 6 22 M
July 7 7 23 N
Aug. 8 8 24 O
Sept. 9 9 25 P
Oct. 10 A 26 Q
Nov. 11 B 27 R
Dec. 12 C 28 S
13 D 29 T
14 E 30 U
15 F 31 V
16 G
Examples:
1996 Jan. 1 = J9611
1996 Jan. 10 = J961A
1996 Sept.30 = J969U
1996 Oct. 1 = J96A1
2001 Oct. 22 = K01AM
This system can be extended to dates with non-integral days. The decimal fraction of the day is simply appended to the five characters defined above.
Examples:
1998 Jan. 18.73 = J981I73
2001 Oct. 22.138303 = K01AM138303
"""
translate = {}
for i in range(10):
translate[str(i)] = i
for i in range(10,32):
translate[chr(ord('A') + i - 10)] = i
if in_packed[0] == 'I':
year = 1800
elif in_packed[0] == 'J':
year = 1900
elif in_packed[0] == 'K':
year = 2000
else:
raise Error('Unrecognized century code at start of: ' + in_packed)
year += int(in_packed[1:3])
month = translate[in_packed[3]]
day = translate[in_packed[4]]
if len(in_packed) == 5:
return datetime.date(year, month, day)
else:
decimaldays = float('0.' + in_packed[5:])
hour = int(decimaldays * 24.)
minute = int((decimaldays * 24. - hour) * 60.)
second = int(round(decimaldays * 24. * 60. * 60. - (hour * 3600.) - (minute * 60.)))
return datetime.datetime(year, month, day, hour, minute, second)
#TODO: clean up the following comments and incorporate into the code
# can get all numbered asteroids (and other junk) from minor planet center in MPCORB.DAT file:
# [MPCORB.DAT](http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT)
# [Format is described in more detail](http://www.minorplanetcenter.org/iau/info/MPOrbitFormat.html)
# 944 Hidalgo line as of 2013-07-26 is:
#Des'n H G Epoch M Peri. Node Incl. e n a Reference #Obs #Opp Arc rms Perts Computer
#00944 10.77 0.15 K134I 215.40344 56.65077 21.56494 42.54312 0.6617811 0.07172582 5.7370114 0 MPO263352 582 21 1920-2010 0.77 M-v 38h MPCLINUX 0000 (944) Hidalgo 20100222
# But, I want in xephem format, [described here](http://www.clearskyinstitute.com/xephem/help/xephem.html#mozTocId468501)
# and minor planet provides a subset in xephem format [here](http://www.minorplanetcenter.net/iau/Ephemerides/Bright/2013/Soft03Bright.txt):
# though to ensure I was comparing same exact orbit solutions, used 944 Hidalgo from
# http://www.minorplanetcenter.net/iau/Ephemerides/Distant/Soft03Distant.txt
# From MPO263352
#944 Hidalgo,e,42.5431,21.5649,56.6508,5.737011,0.0717258,0.66178105,215.4034,04/18.0/2013,2000,H10.77,0.15
# So, for my purposes, the xephem format, separated by commas is:
# NUMBER NAME - easy enough....
# e - for ecliptic elliptical orbit
# i = inclination, degrees (directly from MPCORB.DAT)
# O = longitude of ascending node, degrees (directly from MPCORB.DAT)
# o = argument of perihelion, degrees (directly from MPCORB.DAT)
# a = mean distance (aka semi-major axis), AU (directly from MPCORB.DAT)
# n = mean daily motion, degrees per day (computed from a**3/2 if omitted) (directly from MPCORB.DAT)
# e = eccentricity, must be < 1 (directly from MPCORB.DAT)
# M = mean anomaly, i.e., degrees from perihelion (directly from MPCORB.DAT)
# E = epoch date, i.e., time of M MM/DD.D/YYYY
# in MPCORB.DAT epoch date is packed according to rules:
# http://www.minorplanetcenter.net/iau/info/PackedDates.html
# Subfield 10A First date these elements are valid, optional
# SubField 10B Last date these elements are valid, optional
# D = the equinox year, i.e., time of i, O and o (always J2000.0 in MPCORB.DAT, so 2000
# First component of magnitude model, either g from (g,k) or H from (H,G). Specify which by preceding the number with a "g" or an "H". In absence of either specifier the default is (H,G) model. See Magnitude models.
# corresponds to H in MPCORB.DAT, just need to preface with an 'H'
# Second component of magnitude model, either k or G (directly from MPCORB.DAT)
# s = angular size at 1 AU, arc seconds, optional - I don't care, so skip....
def convert_mpcorb_to_xephem(input):
"""
convert from, e.g.:
[MPCORB.DAT](http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT)
[Format is described in more detail](http://www.minorplanetcenter.org/iau/info/MPOrbitFormat.html)
Des'n H G Epoch M Peri. Node Incl. e n a Reference #Obs #Opp Arc rms Perts Computer
# 944 Hidalgo line as of 2013-07-26 is:
00944 10.77 0.15 K134I 215.40344 56.65077 21.56494 42.54312 0.6617811 0.07172582 5.7370114 0 MPO263352 582 21 1920-2010 0.77 M-v 38h MPCLINUX 0000 (944) Hidalgo 20100222
to
# From MPO263352
944 Hidalgo,e,42.5431,21.5649,56.6508,5.737011,0.0717258,0.66178105,215.4034,04/18.0/2013,2000,H10.77,0.15
input is a single line of text, output will include a newline character within it (but no newline at end)
"""
output = '# From ' + input[107:116] + '\n'
output += input[166:194].strip().replace('(','').replace(')','') + ','
output += 'e,'
output += input[59:68].strip() + ',' # i = inclination, degrees
output += input[48:57].strip() + ',' # O = longitude of ascending node, degrees
output += input[37:46].strip() + ',' # o = argument of perihelion, degrees
output += input[92:103].strip() + ',' # a = mean distance (aka semi-major axis), AU
output += input[80:91].strip() + ',' # n = mean daily motion, degrees per day (computed from a**3/2 if omitted)
output += input[70:79].strip() + ',' # e = eccentricity, must be < 1
output += input[26:35].strip() + ',' # M = mean anomaly, i.e., degrees from perihelion
output += unpack_mpc_date(input[20:25].strip()).strftime('%m/%d/%Y') + ',' # E = epoch date, i.e., time of M
output += '2000,' # D = the equinox year, i.e., time of i, O and o (always J2000.0 in MPCORB.DAT
output += 'H' + input[8:13].strip() + ',' # First component of magnitude model
output += input[14:19].strip() # Second component of magnitude model
return output
def minorplanets(in_datetime, observatory_code,
max_objects=None,
max_magnitude=None, require_magnitude=True,
max_zenithdistance_deg=90.0,
min_heliocentric_distance_AU=None, max_heliocentric_distance_AU=None,
min_topocentric_distance_AU=None, max_topocentric_distance_AU=None):
"""
in_datetime - datetime.datetime(), e.g. datetime.datetime.utcnow()
observatory_code - the Code of the observatory in
http://www.minorplanetcenter.net/iau/lists/ObsCodes.html
can be either string or integer.
max_objects - default is None, otherwise limits the return to this number
of observable objects
max_magnitude - default is None, otherwise limits return to objects
brighter than or equal to this magnitude
(as calculated by PyEphem from the MPC data)
(TODO: confirm whether this is V-band, R-band,
or other...)
require_magnitude - default is True. If False and max_magnitude is None,
then return all objects, whether PyEphem can calculate
a magnitude or not.
max_zenithdistance_deg - default is 90 degrees (horizon)
min/max_heliocentric_distance_AU - defaults are None
min/max_topocentric_distance_AU - defaults are None
"""
obs_info = get_latlon_from_observatory_code(observatory_code)
obs = ephem.Observer()
obs.lat = np.radians(obs_info['latitude'])
obs.lon = np.radians(obs_info['longitude'])
obs.date = _convert_datetime_to_pyephem_date_string(in_datetime)
mpc_filename = _find_cached_file('MPCORB.DAT')
if mpc_filename == 'File Not Found':
raise Error("Problem reading MPCORB.DAT file from disk. \n"
"Most likely you need to go download a copy from: \n"
" http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT")
if max_magnitude is not None:
require_magnitude = True
matching_objects = []
with open(mpc_filename) as f:
in_header = True
for line in f:
if in_header is False and len(line) > 1:
if (not require_magnitude) or (require_magnitude and (line[8:13] != ' ')):
eph = ephem.readdb(convert_mpcorb_to_xephem(line).splitlines()[1])
eph.compute(obs)
if (max_magnitude is None) or (eph.mag <= max_magnitude):
if ((max_zenithdistance_deg is None) or
(np.degrees(np.pi/2. - eph.alt) <= max_zenithdistance_deg)):
if ((min_heliocentric_distance_AU is None) or
(eph.sun_distance >= min_heliocentric_distance_AU)):
if ((max_heliocentric_distance_AU is None) or
(eph.sun_distance <= max_heliocentric_distance_AU)):
if ((min_topocentric_distance_AU is None) or
(eph.earth_distance >= min_topocentric_distance_AU)):
if ((max_topocentric_distance_AU is None) or
(eph.earth_distance <= max_topocentric_distance_AU)):
matching_objects.append(eph)
else:
if line.startswith('-------------------'):
in_header = False
if max_objects is not None:
if len(matching_objects) >= max_objects:
break
name = [a.name for a in matching_objects]
d = {}
d['rise_time'] = [a.rise_time.datetime() if a.rise_time is not None else np.nan for a in matching_objects]
d['transit_time'] = [a.transit_time.datetime() if a.transit_time is not None else np.nan for a in matching_objects]
d['set_time'] = [a.set_time.datetime() if a.set_time is not None else np.nan for a in matching_objects]
d['raJ2000_deg'] = [np.degrees(a.a_ra) for a in matching_objects]
d['decJ2000_deg'] = [np.degrees(a.a_dec) for a in matching_objects]
d['mag'] = [a.mag for a in matching_objects]
d['R_AU'] = [a.sun_distance for a in matching_objects]
d['delta_AU'] = [a.earth_distance for a in matching_objects]
moon = ephem.Moon()
moon.compute(obs.date)
d['O-E-M_deg'] = [np.degrees(ephem.separation(moon, a)) for a in matching_objects]
output = DataFrame(d, index=name)
output = output[['rise_time', 'transit_time', 'set_time', 'raJ2000_deg', 'decJ2000_deg',
'mag', 'R_AU', 'delta_AU', 'O-E-M_deg']] # re-order columns to something sensible
return output
| 53.418338 | 250 | 0.565145 | 32 | 0.001716 | 0 | 0 | 0 | 0 | 0 | 0 | 11,502 | 0.616961 |
7d6278af283b8d74f950804bc1e7d3a988413e1b | 7,573 | py | Python | pcdet/models/backbones_3d/vfe/pillar_vfe.py | KPeng9510/OpenPCDet | 4bebf2f45a3193afb1ffe4f7ee1913afc0632e62 | [
"Apache-2.0"
]
| 1 | 2021-02-18T19:46:44.000Z | 2021-02-18T19:46:44.000Z | pcdet/models/backbones_3d/vfe/pillar_vfe.py | KPeng9510/OpenPCDet | 4bebf2f45a3193afb1ffe4f7ee1913afc0632e62 | [
"Apache-2.0"
]
| null | null | null | pcdet/models/backbones_3d/vfe/pillar_vfe.py | KPeng9510/OpenPCDet | 4bebf2f45a3193afb1ffe4f7ee1913afc0632e62 | [
"Apache-2.0"
]
| 1 | 2022-01-23T13:37:49.000Z | 2022-01-23T13:37:49.000Z | import torch
from torch_geometric.nn import FeaStConv
from knn_cuda import KNN
from torch_cluster import fps
#from ....ops.roiaware_pool3d import roiaware_pool3d_utils
import torch.nn as nn
import torch.nn.functional as F
from .vfe_template import VFETemplate
import sys
from lppproj import LocalityPreservingProjection
class PFNLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
super().__init__()
self.last_vfe = last_layer
self.use_norm = use_norm
if not self.last_vfe:
out_channels = out_channels // 2
if self.use_norm:
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, out_channels, bias=True)
self.part = 50000
def forward(self, inputs):
if inputs.shape[0] > self.part:
# nn.Linear performs randomly when batch size is too large
num_parts = inputs.shape[0] // self.part
part_linear_out = [self.linear(inputs[num_part*self.part:(num_part+1)*self.part])
for num_part in range(num_parts+1)]
x = torch.cat(part_linear_out, dim=0)
else:
x = self.linear(inputs)
torch.backends.cudnn.enabled = False
x = self.norm(x.permute(0, 2, 1)).permute(0, 2, 1) if self.use_norm else x
torch.backends.cudnn.enabled = True
x = F.relu(x)
x_max = torch.max(x, dim=1, keepdim=True)[0]
if self.last_vfe:
return x_max
else:
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
class PillarVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, point_cloud_range):
super().__init__(model_cfg=model_cfg)
num_point_features=5
self.bev_width=512
self.bev_height=512
self.use_norm = self.model_cfg.USE_NORM
self.with_distance = self.model_cfg.WITH_DISTANCE
self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ
num_point_features += 6 if self.use_absolute_xyz else 3
if self.with_distance:
num_point_features += 1
self.num_filters = self.model_cfg.NUM_FILTERS
assert len(self.num_filters) > 0
num_filters = [num_point_features] + list(self.num_filters)
pfn_layers = []
#print(num_filters)
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
pfn_layers.append(
PFNLayer(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2))
)
self.pfn_layers = nn.ModuleList(pfn_layers)
self.relu = nn.ReLU()
#self.FC1=nn.Sequential(
#nn.Linear(2*num_point_features, num_point_features),
#nn.ReLU(inplace=True),
#)
#self.FC2=nn.Sequential(
#nn.Linear(num_point_features,num_point_features),
# nn.ReLU(inplace=True),
#)
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
def get_output_feature_dim(self):
return self.num_filters[-1]
def get_paddings_indicator(self, actual_num, max_num, axis=0):
actual_num = torch.unsqueeze(actual_num, axis + 1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
paddings_indicator = actual_num.int() > max_num
return paddings_indicator
def forward(self, batch_dict, **kwargs):
#print(batch_dict.keys())
gt_boxes = batch_dict["gt_boxes"]
#print(batch_dict["gt_names"].size())
voxel_features, voxel_num_points, coords = batch_dict['voxels'], batch_dict['voxel_num_points'], batch_dict['voxel_coords']
#print(voxel_features.size())
voxel_features = torch.cat([voxel_features[:,:,:4],voxel_features[:,:,6:]], dim=-1)
#print(voxel_features.size())
points_mean = voxel_features[:, :, :3].sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1)
f_cluster = voxel_features[:, :, :3] - points_mean
f_center = torch.zeros_like(voxel_features[:,: , :3])
center = torch.zeros_like(voxel_features[:,1,:3]).view(voxel_features.size()[0],1,3)
coor = torch.zeros([3,self.bev_width,self.bev_height], dtype=f_center.dtype, device=f_center.device)
x = torch.linspace(0,self.bev_width,self.bev_height) #*self.voxel_x + self.x_offset
z = torch.linspace(0,1,1)
y = torch.linspace(0,self.bev_width,self.bev_height)
grid_x,grid_y,grid_z = torch.meshgrid(x,y,z)
coor = torch.cat([(grid_x*self.voxel_x + self.x_offset).unsqueeze(-1), (grid_y*self.voxel_y + self.y_offset).unsqueeze(-1), (grid_z*self.voxel_z + self.z_offset).unsqueeze(-1)], dim=-1)
coor = coor.view(self.bev_width*self.bev_height,3)
center[:,:,0] = (coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x + self.x_offset)
center[:,:,1] = (coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y + self.y_offset)
center[:,:,2] = (coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z + self.z_offset)
f_center[:, :, 0] = voxel_features[:, :, 0] - (coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x + self.x_offset)
f_center[:, :, 1] = voxel_features[:, :, 1] - (coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y + self.y_offset)
f_center[:, :, 2] = voxel_features[:, :, 2] - (coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z + self.z_offset)
if self.use_absolute_xyz:
features = [voxel_features, f_cluster, f_center]
else:
features = [voxel_features[..., 3:], f_cluster, f_center]
batch_dict["points_mean"]=center
batch_dict["points_coor"]=coor
if self.with_distance:
points_dist = torch.norm(voxel_features[:, :, :3], 2, 2, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
coords = batch_dict['voxel_coords']
batch_size = coords[:, 0].max().int().item() + 1
voxel_count = features.shape[1]
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(voxel_features)
features *= mask
#batch_spatial_features={}
#container = torch.zeros_like(features)
for index in range(batch_size):
batch_mask = coords[:, 0] ==index
batch_features = features[batch_mask, :]
#batch_spatial_features.append(spatial_feature)
batch_points_mean = points_mean[batch_mask,:]
for pfn in self.pfn_layers:
features = pfn(features)
features = features.squeeze()
batch_dict['pillar_features'] = features
return batch_dict
| 44.810651 | 193 | 0.629737 | 7,233 | 0.955104 | 0 | 0 | 0 | 0 | 0 | 0 | 709 | 0.093622 |
7d68c3cd5ebdfbe4a4f33c56583ea1d144745710 | 915 | py | Python | chess/pythonchess/docs/conf.py | mahakbansal/ChessAlphaZero | 2b3f823fdc252d7fd32de0b5e4e53aece9082dd5 | [
"MIT"
]
| 2 | 2021-02-22T21:53:58.000Z | 2021-04-03T16:40:52.000Z | chess/pythonchess/docs/conf.py | mahakbansal/ChessAlphaZero | 2b3f823fdc252d7fd32de0b5e4e53aece9082dd5 | [
"MIT"
]
| 1 | 2018-09-26T03:38:57.000Z | 2018-09-26T03:38:57.000Z | chess/pythonchess/docs/conf.py | mahakbansal/ChessAlphaZero | 2b3f823fdc252d7fd32de0b5e4e53aece9082dd5 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
# Import the chess module.
sys.path.insert(0, os.path.abspath('..'))
import chess
# Autodoc.
extensions = ["sphinx.ext.autodoc"]
autodoc_member_order = 'bysource'
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "python-chess"
copyright = "2014–2018, Niklas Fiekas"
# The version.
version = chess.__version__
release = chess.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
| 22.875 | 74 | 0.747541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 577 | 0.629226 |
7d69ee0ea7680377c19eec8ca94d5abf487ee54e | 1,227 | py | Python | python/example.py | msmerlak/aa | 09ffdf7df582be9c83c7c9bfd873c55fddb65109 | [
"MIT"
]
| null | null | null | python/example.py | msmerlak/aa | 09ffdf7df582be9c83c7c9bfd873c55fddb65109 | [
"MIT"
]
| null | null | null | python/example.py | msmerlak/aa | 09ffdf7df582be9c83c7c9bfd873c55fddb65109 | [
"MIT"
]
| null | null | null | # min (1/2) x'Q'x - q'x
from __future__ import print_function
import numpy as np
import aa
dim = 1000
mems = [5, 10, 20, 50, 100]
N = int(1e4)
np.random.seed(1234)
Q = np.random.randn(dim,dim)
Q = Q.T.dot(Q)
q = np.random.randn(dim)
x_0 = np.random.randn(dim)
x_star = np.linalg.solve(Q, q)
step = 0.0005
def f(x):
return 0.5 * x.T @ Q @ x - q.T @ x
f_star = f(x_star)
print('f^* = ', f_star)
print('No acceleration')
x = x_0.copy()
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
for mem in mems:
print('Type-I acceleration, mem:', mem)
x = x_0.copy()
aa_wrk = aa.AndersonAccelerator(dim, mem, True, eta=1e-8)
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
aa_wrk.apply(x, x_prev)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
print('Type-II acceleration, mem:', mem)
x = x_0.copy()
aa_wrk = aa.AndersonAccelerator(dim, mem, False, eta=1e-10)
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
aa_wrk.apply(x, x_prev)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
| 22.309091 | 61 | 0.544417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.125509 |
7d6a2293f4de2609456441f4d1fef57b68982b63 | 2,193 | py | Python | MuonAnalysis/MuonAssociators/test/L1MuonMatcher/test.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | MuonAnalysis/MuonAssociators/test/L1MuonMatcher/test.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | MuonAnalysis/MuonAssociators/test/L1MuonMatcher/test.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
# initialize MessageLogger and output report
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.threshold = 'INFO'
process.MessageLogger.cerr.INFO = cms.untracked.PSet(
default = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
PATSummaryTables = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
# source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
#'file:/afs/cern.ch/cms/PRS/top/cmssw-data/relval200-for-pat-testing/TauolaTTbar-Summer08_IDEAL_V9_v1-AODSIM.80.root'
'/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/1E84F77B-341C-DE11-8A99-0019DB29C5FC.root',
'/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/34267FD6-1C1C-DE11-A836-001617C3B78C.root',
'/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/68BF59CF-1C1C-DE11-AFA9-000423D98BC4.root'
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.GlobalTag.globaltag = cms.string('IDEAL_V9::All')
process.GlobalTag.globaltag = cms.string('STARTUP_V9::All')
process.load("Configuration.StandardSequences.MagneticField_cff")
# PAT Layer 0+1
process.load("PhysicsTools.PatAlgos.patSequences_cff")
process.load("MuonAnalysis.MuonAssociators.muonL1Match_cfi")
process.muonL1Match.preselection = cms.string("")
process.allLayer1Muons.trigPrimMatch = cms.VInputTag(
cms.InputTag("muonL1Match"),
cms.InputTag("muonL1Match","propagatedReco"),
)
## Put your EDAnalyzer here
## process.plots = cms.EDFilter("DataPlotter",
## muons = cms.InputTag("cleanLayer1Muons"),
## muonCut = cms.string("")
## )
process.p = cms.Path(
process.muonL1Match *
process.patDefaultSequence
# * process.plots
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("plots.root")
)
| 37.810345 | 125 | 0.75285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,154 | 0.52622 |
7d6a59d7fa23a596aac99cdbb9dd178d370f5c83 | 95 | py | Python | hydropy/__init__.py | GironsLopez/hydropy | 59cb29254e4a3f02f994e2d049e3c1135e9295a2 | [
"MIT"
]
| null | null | null | hydropy/__init__.py | GironsLopez/hydropy | 59cb29254e4a3f02f994e2d049e3c1135e9295a2 | [
"MIT"
]
| null | null | null | hydropy/__init__.py | GironsLopez/hydropy | 59cb29254e4a3f02f994e2d049e3c1135e9295a2 | [
"MIT"
]
| null | null | null | """
Hydropy
=======
Provides functions to work with hydrological processes and equations
"""
| 11.875 | 68 | 0.705263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.989474 |
7d6a678fc2e4bddc6ad3dc6d90062ac0ebecff7e | 915 | py | Python | Desafios Finais Python - Cognizant Data Cloud Engineer #2/Preenchimento de Vetor I.py | italocreator/heros-journey | 76a867b3c9addf2c8b6c06999f9993e12a5b4e46 | [
"MIT"
]
| null | null | null | Desafios Finais Python - Cognizant Data Cloud Engineer #2/Preenchimento de Vetor I.py | italocreator/heros-journey | 76a867b3c9addf2c8b6c06999f9993e12a5b4e46 | [
"MIT"
]
| null | null | null | Desafios Finais Python - Cognizant Data Cloud Engineer #2/Preenchimento de Vetor I.py | italocreator/heros-journey | 76a867b3c9addf2c8b6c06999f9993e12a5b4e46 | [
"MIT"
]
| null | null | null | """
Desafio
Você recebeu o desafio de ler um valor e criar um programa que coloque o valor lido na primeira posição de um vetor N[10].
Em cada posição subsequente, coloque o dobro do valor da posição anterior.
Por exemplo, se o valor lido for 1, os valores do vetor devem ser 1,2,4,8 e assim sucessivamente.
Mostre o vetor em seguida.
Entrada
A entrada contém um valor inteiro (V<=50).
Saída
Para cada posição do vetor, escreva "N[i] = X", onde i é a posição do vetor e X é o valor armazenado na posição i.
O primeiro número do vetor N (N[0]) irá receber o valor de V.
Exemplo de Entrada Exemplo de Saída
1 N[0] = 1
N[1] = 2
N[2] = 4
...
"""
x = int(input())
n = list()
# TODO: Complete os espaços em branco com uma solução possível para o problema.
for i in range(10):
n.append(x)
x = x*2
print(f"N[{i}] = {n[i]}")
| 30.5 | 123 | 0.632787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 848 | 0.903088 |
7d6a9fc0ae2c18fcc1e9420cc0d5c546fe26cbe4 | 1,267 | py | Python | Home_Work_2_B_Naychuk_Anastasiya/Task1.py | NaychukAnastasiya/goiteens-python3-naychuk | a79d0af238a15f58a822bb5d8e4d48227d4a7bc1 | [
"MIT"
]
| null | null | null | Home_Work_2_B_Naychuk_Anastasiya/Task1.py | NaychukAnastasiya/goiteens-python3-naychuk | a79d0af238a15f58a822bb5d8e4d48227d4a7bc1 | [
"MIT"
]
| null | null | null | Home_Work_2_B_Naychuk_Anastasiya/Task1.py | NaychukAnastasiya/goiteens-python3-naychuk | a79d0af238a15f58a822bb5d8e4d48227d4a7bc1 | [
"MIT"
]
| null | null | null | # Яке з 3 чисел найбільш наближене до середнього
print("Введіть перше число")
var1 = float(input())
print("Введіть друге число")
var2 = float(input())
print("Введіть третє число")
var3 = float(input())
# Avg = (var1+var2+var3)/3 # Варіант розв'язку з порівнянням чисел із середнім арифметичним:
if ((var1 > var2) and (var1 < var3)) or (var1 < var2) and (var1 > var3):
print ("Найбільш наближеним числом до середнього є ",var1)
elif ((var2 > var1) and (var2 < var3)) or ((var2 < var1) and (var12 > var3)):
print ("Найбільш наближеним числом до середнього є ",var2)
else:
print ("Найбільш наближеним числом до середнього є ",var3)
# # Варіант розв'язку з порівнянням чисел із середнім арифметичним:
# if (abs(var1-Avg))>(abs(var2-Avg)):
# if (abs(var2-Avg))>(abs(var3-Avg)):
# print ("Найбільш наближеним числом до середнього є ",var3)
# else: #(abs(var2-Avg))<(abs(var3-Avg))
# print ("Найбільш наближеним числом до середнього є ",var2)
# else: #(abs(var1-Avg))<(abs(var2-Avg))
# if (abs(var1-Avg))>(abs(var3-Avg)):
# print ("Найбільш наближеним числом до середнього є ",var3)
# else: #(abs(var1-Avg))<(abs(var3-Avg))
# print ("Найбільш наближеним числом до середнього є ",var1) | 45.25 | 93 | 0.648777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,392 | 0.807893 |
7d6ad190979d6481b1c2985d3daa77d4ce6fbfd1 | 5,689 | py | Python | src/paper_1/curriculum/main.py | ludwigflo/paper1 | 13202febdb01a76bbf115435ce9676f6b82e1393 | [
"MIT"
]
| null | null | null | src/paper_1/curriculum/main.py | ludwigflo/paper1 | 13202febdb01a76bbf115435ce9676f6b82e1393 | [
"MIT"
]
| null | null | null | src/paper_1/curriculum/main.py | ludwigflo/paper1 | 13202febdb01a76bbf115435ce9676f6b82e1393 | [
"MIT"
]
| null | null | null | from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader
from paper_1.utils import read_parameter_file, create_experiment_directory
from paper_1.evaluation.eval_utils import init_metrics_object
from paper_1.baseline.main import train as baseline_train
from paper_1.model.model_utils import initialize_model
from torch.utils.tensorboard import SummaryWriter
from train import select_splitted_pseudo_labels
from os.path import dirname, abspath
from torch.optim import Adam
import pandas as pd
import numpy as np
import random
import torch
import os
def main(main_params: dict, data_params: dict, metric_params: dict, model_params: dict,
parent_dir, source_domain: str, target_domain: str):
# clear the cuda memory
torch.cuda.empty_cache()
# get the current validation fold
val_fold = data_params['data']['val_fold']
# read the train params
num_train_iter = main_params['num_train_iter']
experiment_id = main_params['experiment_id']
num_epochs = main_params['num_epochs']
quantiles = main_params['quantiles']
model_dir = main_params['model_dir']
base_dir = main_params['base_dir']
# get the data loader parameters
balance_keys = data_params['data_loader']['balance_keys']
batch_size = data_params['data_loader']['batch_size']
# load the data
data_train_src, data_train_tar = load_train_data(data_params, source_domain, target_domain)
data_list_val = load_val_data(data_params)
num_val_iter_list = [df.shape[0] for df in data_list_val]
validation_domains = data_params['data']['validation']['validation_domains']
val_loader_list = [sequential_data_loader(data_frame) for data_frame in data_list_val]
# load a pre trained model
model_path = model_dir + source_domain + '/' + 'None' + '/' + str(val_fold) + '/f1_best.pt'
# load a previously stored model, which is the init point for curriculum labeling
pretrained_model = torch.load(model_path)
mapping = metric_params['inverse_class_mapping']
# initialize the metrics object
metric_object = init_metrics_object(metric_params)
# create a directory for the current experiments
file_names_params = os.listdir(parent_dir + '/parameters/')
file_names_params = [parent_dir + '/parameters/' + x for x in file_names_params]
file_names_baseline = os.listdir(parent_dir + '/baseline/')
file_names_baseline = [parent_dir + '/baseline/' + x for x in file_names_baseline]
file_names = []
file_names.extend(file_names_params)
file_names.extend(file_names_baseline)
file_names = [x for x in file_names if not os.path.isdir(x)]
val_fold = data_params['data']['val_fold']
exp_base_dir = create_experiment_directory(base_dir, source_domain, target_domain, val_fold, file_names, experiment_id)
for quantile in quantiles:
exp_dir = exp_base_dir + str(quantile) + '/'
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# create a tensorboard writer
writer = SummaryWriter(exp_dir)
# create data loader with current pseudo labels
data_frame_pseudo = select_splitted_pseudo_labels(pretrained_model, data_train_tar, quantile, mapping)
# delete the previously trained model, as it is no longer in use
del pretrained_model
# create the train data loader
data_train = pd.concat([data_train_src, data_frame_pseudo])
train_loader = random_data_loader(data_train, balance_keys, batch_size)
# initialize a new model to train it from scratch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = initialize_model(model_params, parent_dir, device)
model.cuda()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
# create an optimizer for the model
optimizer = Adam(model.parameters(), lr=4e-5, betas=(0.9, 0.999))
# train the newly created model from scratch
baseline_train(model, optimizer, metric_object, num_train_iter, metric_params, train_loader, val_loader_list,
source_domain, writer, num_val_iter_list, validation_domains, num_epochs, exp_dir)
# update the pretrained model
pretrained_model = model
del model
del optimizer
if __name__ == '__main__':
# set the seed for reproducability
seed_value = 0
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
# get the current and parent directory
current_file = abspath(__file__)
current_dir = dirname(current_file)
parent_dir = dirname(current_dir)
metric_param_file = parent_dir + '/parameters/metric_params.yaml'
model_param_file = parent_dir + '/parameters/model_params.yaml'
data_param_file = parent_dir + '/parameters/data_params.yaml'
main_param_file = current_dir + '/main_params.yaml'
# load the parameters
metric_params = read_parameter_file(metric_param_file)
model_params = read_parameter_file(model_param_file)
main_params = read_parameter_file(main_param_file)
data_params = read_parameter_file(data_param_file)
# define the domains, on which the models should be trained
source_domains = ['Race', 'Religion', 'Sexual Orientation']
target_domains = ['Race', 'Religion', 'Sexual Orientation']
for source_domain in source_domains:
for target_domain in target_domains:
if source_domain != target_domain:
main(main_params, data_params, metric_params, model_params, parent_dir, source_domain, target_domain)
| 40.347518 | 123 | 0.731763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,307 | 0.229742 |
7d6b4c15322d55cd0ce898e730c14103fb38d94b | 6,793 | py | Python | sfc/tests/functest/sfc_symmetric_chain.py | pkaralis/sfc | b2572f3e4e96ef82fbfd5b6233933f1eac5cb166 | [
"Apache-2.0"
]
| null | null | null | sfc/tests/functest/sfc_symmetric_chain.py | pkaralis/sfc | b2572f3e4e96ef82fbfd5b6233933f1eac5cb166 | [
"Apache-2.0"
]
| null | null | null | sfc/tests/functest/sfc_symmetric_chain.py | pkaralis/sfc | b2572f3e4e96ef82fbfd5b6233933f1eac5cb166 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2017 Ericsson AB and others. All rights reserved
#
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import os
import sys
import threading
import logging
import urllib3
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
import sfc.lib.config as sfc_config
from sfc.tests.functest import sfc_parent_function
""" logging configuration """
logger = logging.getLogger(__name__)
COMMON_CONFIG = sfc_config.CommonConfig()
CLIENT = "client"
SERVER = "server"
openstack_sfc = os_sfc_utils.OpenStackSFC()
class SfcSymmetricChain(sfc_parent_function.SfcCommonTestCase):
"""One client and one server are created using nova.
The server will be running a web server on port 80.
Then one Service Function (SF) is created using Tacker.
This service function will be running a firewall that
blocks the traffic in a specific port.
A symmetric service chain routing the traffic throught
this SF will be created as well.
The purpose is to check different HTTP traffic
combinations using firewall.
"""
def run(self):
logger.info("The test scenario %s is starting", __name__)
self.create_custom_vnfd(self.testcase_config.test_vnfd, 'test-vnfd1')
self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
if self.vnf_id is None:
logger.error('ERROR while booting VNF')
sys.exit(1)
tosca_file = os.path.join(
COMMON_CONFIG.sfc_test_dir,
COMMON_CONFIG.vnffgd_dir,
self.testcase_config.test_vnffgd)
os_sfc_utils.create_vnffgd(
self.tacker_client,
tosca_file=tosca_file,
vnffgd_name='test-vnffgd')
client_port = openstack_sfc.get_client_port(
self.client_instance,
self.client_creator)
server_port = openstack_sfc.get_client_port(
self.server_instance,
self.server_creator)
server_ip_prefix = self.server_ip + '/32'
default_param_file = os.path.join(
COMMON_CONFIG.sfc_test_dir,
COMMON_CONFIG.vnfd_dir,
COMMON_CONFIG.vnfd_default_params_file)
os_sfc_utils.create_vnffg_with_param_file(
self.tacker_client,
'test-vnffgd',
'test-vnffg',
default_param_file,
client_port.id,
server_port.id,
server_ip_prefix)
# Start measuring the time it takes to implement the classification
# rules
t1 = threading.Thread(target=wait_for_classification_rules,
args=(self.ovs_logger, self.compute_nodes,
self.server_instance.compute_host,
server_port,
self.client_instance.compute_host,
client_port, self.odl_ip,
self.odl_port,))
try:
t1.start()
except Exception as e:
logger.error("Unable to start the thread that counts time %s" % e)
logger.info("Assigning floating IPs to instances")
self.assign_floating_ip_client_server()
vnf_ip = os_sfc_utils.get_vnf_ip(self.tacker_client,
vnf_id=self.vnf_id)
self.assign_floating_ip_sfs(vnf_ip)
self.check_floating_ips()
self.start_services_in_vm()
self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
logger.info("Wait for ODL to update the classification rules in OVS")
t1.join()
results = self.present_results_allowed_port_http(self.testcase_config)
self.vxlan_blocking_stop(self.fips_sfs[0])
self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', "80")
self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
results = self.present_results_blocked_port_http(self.testcase_config,
'HTTP uplink')
self.vxlan_blocking_stop(self.fips_sfs[0])
self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0',
self.testcase_config.source_port)
results = self.present_results_blocked_port_http(self.testcase_config,
'HTTP downlink')
self.vxlan_blocking_stop(self.fips_sfs[0])
self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
results = self.present_results_allowed_http()
if __name__ == '__main__':
return results.compile_summary(), self.creators
if __name__ == 'sfc.tests.functest.sfc_symmetric_chain':
return results.compile_summary(), self.creators
def get_creators(self):
"""Return the creators info, specially in case the info is not
returned due to an exception.
:return: creators
"""
return self.creators
def wait_for_classification_rules(ovs_logger, compute_nodes,
server_compute, server_port,
client_compute, client_port,
odl_ip, odl_port):
if client_compute == server_compute:
odl_utils.wait_for_classification_rules(
ovs_logger,
compute_nodes,
odl_ip,
odl_port,
client_compute,
[server_port, client_port])
else:
odl_utils.wait_for_classification_rules(
ovs_logger,
compute_nodes,
odl_ip,
odl_port,
server_compute,
server_port)
odl_utils.wait_for_classification_rules(
ovs_logger,
compute_nodes,
odl_ip,
odl_port,
client_compute,
client_port)
if __name__ == '__main__':
# Disable InsecureRequestWarning errors when executing the SFC tests in XCI
urllib3.disable_warnings()
TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_symmetric_chain')
supported_installers = ['fuel', 'apex', 'osa', 'compass']
vnf_names = ['testVNF1']
test_run = SfcSymmetricChain(TESTCASE_CONFIG, supported_installers,
vnf_names)
test_run.run()
| 35.196891 | 79 | 0.6227 | 4,709 | 0.693214 | 0 | 0 | 0 | 0 | 0 | 0 | 1,620 | 0.238481 |
7d6ecad90431713565bfe9a36d5edf9284440624 | 1,827 | py | Python | site-packages/offshoot/main.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
]
| 42 | 2017-01-23T22:36:03.000Z | 2021-11-14T21:22:17.000Z | site-packages/offshoot/main.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
]
| 6 | 2021-09-26T21:18:30.000Z | 2022-02-01T01:26:18.000Z | site-packages/offshoot/main.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
]
| 6 | 2017-04-14T13:07:27.000Z | 2020-06-17T06:24:18.000Z | #!/usr/bin/env python
import sys
import os
import subprocess
import offshoot
valid_commands = ["init", "install", "uninstall"]
def execute():
if len(sys.argv) == 2:
command = sys.argv[1]
if command not in valid_commands:
raise Exception("'%s' is not a valid Offshoot command." % command)
if command == "init":
init()
elif len(sys.argv) > 2:
command, args = sys.argv[1], sys.argv[2:]
if command not in valid_commands:
raise Exception("'%s' is not a valid Offshoot command." % command)
if command == "install":
install(args[0])
elif command == "uninstall":
uninstall(args[0])
def install(plugin):
print("OFFSHOOT: Attempting to install %s..." % plugin)
plugin_directory = offshoot.config.get("file_paths").get("plugins")
plugin_path = "%s/%s/plugin.py".replace("/", os.sep) % (plugin_directory, plugin)
plugin_module_string = plugin_path.replace(os.sep, ".").replace(".py", "")
subprocess.call([sys.executable.split(os.sep)[-1], "-m", "%s" % plugin_module_string, "install"])
def uninstall(plugin):
print("OFFSHOOT: Attempting to uninstall %s..." % plugin)
plugin_directory = offshoot.config.get("file_paths").get("plugins")
plugin_path = "%s/%s/plugin.py".replace("/", os.sep) % (plugin_directory, plugin)
plugin_module_string = plugin_path.replace(os.sep, ".").replace(".py", "")
subprocess.call([sys.executable.split(os.sep)[-1], "-m", "%s" % plugin_module_string, "uninstall"])
def init():
import warnings
warnings.filterwarnings("ignore")
print("OFFSHOOT: Generating configuration file...")
offshoot.generate_configuration_file()
print("OFFSHOOT: Initialized successfully!")
if __name__ == "__main__":
execute()
| 27.681818 | 103 | 0.636015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 468 | 0.256158 |
7d6f707bec1ef6f1945e2739232de8ac3b5e6c3e | 1,953 | py | Python | samples/unsharp/unsharp.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
]
| 7 | 2019-08-20T02:43:44.000Z | 2019-12-13T14:26:05.000Z | samples/unsharp/unsharp.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
]
| null | null | null | samples/unsharp/unsharp.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
]
| 1 | 2019-07-25T21:46:50.000Z | 2019-07-25T21:46:50.000Z | import heterocl as hcl
from math import sqrt
hcl.config.init_dtype = hcl.Float()
input_image = hcl.placeholder((480, 640, 3), name = "input")
output_image = hcl.placeholder((480, 640, 3), name = "output")
def unsharp(input_image, output_image):
"""
Helper Functions
"""
def clamp(val, min_, max_):
local = hcl.local(val)
with hcl.if_(val < min_):
local[0] = min_
with hcl.elif_(val > max_):
local[0] = max_
return local[0]
def clamp2D(tensor, min_, max_):
return hcl.compute(tensor.shape, lambda x, y: clamp(tensor[x, y], min_, max_), name = "clamped_" + tensor.name)
def clamp3D(tensor, min_, max_):
return hcl.compute(tensor.shape, lambda x, y, c: clamp(tensor[x, y, c], min_, max_), name = "clamped_" + tensor.name)
def kernel_f(x):
return hcl.exp(-(x * x) / (2 * 1.5 * 1.5)) / sqrt(2 * 3.14159 * 1.5)
def kernel(x):
return kernel_f(x) * 255 / (kernel_f(0) + kernel_f(1) * 2 + kernel_f(2) * 2 + kernel_f(3) * 2 + kernel_f(4) * 2)
rx = hcl.reduce_axis(-4, 5, "rx")
ry = hcl.reduce_axis(-4, 5, "ry")
my = hcl.reduce_axis(0, 640, "my")
gray = hcl.compute((480, 640), lambda x, y: (input_image[x, y, 0] * 77 + input_image[x, y, 1] * 150 + input_image[x, y, 2] * 29) >> 8, name = "gray")
blur = hcl.compute(gray.shape, lambda x, y: hcl.sum(gray[rx+x, ry+y] * kernel(rx) * kernel(ry), axis = [rx, ry]), name = "blur")
sharpen = clamp2D(hcl.compute(gray.shape, lambda x, y: gray[x, y] * 2 - blur[x, y], name = "sharpen"), 0, 255)
ratio = clamp2D(hcl.compute(gray.shape, lambda x, y: sharpen[x, y] * 32 / hcl.max(gray[x, my], axis = my), name = "ratio"), 0, 255)
out = clamp3D(hcl.compute(output_image.shape, lambda x, y, c: ratio[x, y] * input_image[x, y, c] >> 5, name = "out"), 0, 255)
U = hcl.update(output_image, lambda x, y, c: out[x, y, c])
return U
s = hcl.make_schedule([input_image, output_image], unsharp)
print hcl.lower(s, [input_image, output_image])
| 39.06 | 151 | 0.620072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.0553 |
7d702e229890e1a0e38bb9dc45ff5dead9dc3d80 | 14,391 | py | Python | hatspil/core/utils.py | dodomorandi/hatspil | 99c4d255b3f9836b32506636c84b16b3456bd74c | [
"MIT"
]
| 2 | 2018-12-20T08:54:17.000Z | 2019-10-19T18:35:33.000Z | hatspil/core/utils.py | dodomorandi/hatspil | 99c4d255b3f9836b32506636c84b16b3456bd74c | [
"MIT"
]
| null | null | null | hatspil/core/utils.py | dodomorandi/hatspil | 99c4d255b3f9836b32506636c84b16b3456bd74c | [
"MIT"
]
| null | null | null | """A collection of utility function, shared across modules."""
import collections
import datetime
import gzip as gz
import logging
import os
import re
import shutil
import subprocess
from argparse import ArgumentTypeError
from copy import deepcopy
from logging import Logger
from typing import (Any, Callable, Dict, Generator, Iterable, List, Mapping,
Optional, Sequence, Tuple, TypeVar, Union, ValuesView,
cast)
from ..config import Config, KitData
from .barcoded_filename import BarcodedFilename
from .exceptions import AnnotationError, DataError
def get_current() -> str:
"""Get the current date in standard HaTSPiL format."""
today = datetime.date.today()
return "%04d_%02d_%02d" % (today.year, today.month, today.day)
def get_overridable_current_date(parameters: Dict[str, Any]) -> str:
"""Get an eventual overridden date.
If the `parameters` dict contains a `use_date` value, return it.
Otherwise return the result of `get_current`.
"""
if parameters["use_date"] is None:
return get_current()
else:
current_date = parameters["use_date"]
assert isinstance(current_date, str)
return current_date
def run_and_log(command: str, logger: Logger) -> int:
"""Run a command and log everything.
Use `subprocess.Popen` to run a command. The standard output and the
standard error are piped into the logger.
Args:
command: the command to run.
logger: the logger.
Returns:
int: the exit status of the process.
"""
logger.info("Running command: %s", command)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True,
bufsize=1,
) as process:
(out, err) = process.communicate()
for line in out.split("\n"):
if line != "":
logger.info(line)
for line in err.split("\n"):
if line != "":
logger.warning(line)
return process.wait()
def get_sample_filenames(
obj: Union[Sequence[str], Mapping[str, List[str]], str],
split_by_organism: bool = False,
) -> Union[List[str], Mapping[str, List[str]]]:
"""Return the filenames organised in a different way.
Take a set of filenames in different possible shapes and reorganize
them depending on the content and the value of `split_by_organism`.
Args:
obj: the filenames. It can be a string for one single filename,
a list of filenames or a dict where each key is an organism
code (i.e.: hg19) and the relative value is a list of
filenames.
split_by_organism: whether the filenames must be split by
organism or they must be returned all
together.
Returns:
The input filenames with the desired shape. There are different
cases:
* If `obj` is a list and its length is greater than 1 and
`split_by_organism` is `True`, the organism for each file
is obtained using `get_organism_from_filename`. A dict is
created, where each organism maps to a list of filenames.
If the dict contains more than one organism, it is returned,
otherwise a list of the filenames is returned.
* If `obj` is a list but its length is not greater than 1 or
`split_by_organism` is `False`, a **copy** of `obj` is
returned.
* If `obj` is a dict and it contains more than one entry and
`split_by_organism` is `True`, a **deep copy** of `obj` is
returned.
* If `obj` is a dict but it contains less than two entries or
`split_by_organism` is `False`, a list of all the filenames
in `obj` is returned.
* If `obj` is a string and `split_by_organism` is `True`, the
organism is obtained using `get_organism_from_filename`. If
the organism is valid, a dict with the organism mapped to
a list of one element, `obj`, is returned. Otherwise, if the
organism is invalid (`None` or empty), a list of one element,
`obj`, is returned.
* If `obj` is a string but `split_by_organism` is `False`, a
list of one element, `obj`, is returned.
"""
if isinstance(obj, list):
if split_by_organism and len(obj) > 1:
filenames: Dict[str, List[str]] = {}
for filename in obj:
organism = get_organism_from_filename(filename)
if organism is None:
organism = ""
filenames.setdefault(organism, []).append(filename)
if len(filenames) > 1:
return filenames
else:
return list(next(iter(filenames.values())))
else:
return list(obj)
elif isinstance(obj, dict):
if split_by_organism and len(obj) > 1:
return deepcopy(obj)
else:
values = obj.values()
if not values:
return []
elif isinstance(next(iter(values)), list):
return [filename for filenames in values for filename in filenames]
elif isinstance(next(iter(values)), str):
return list(cast(ValuesView[str], values))
else:
raise DataError("unexpected filenames type")
else:
assert isinstance(obj, str)
if split_by_organism:
organism = get_organism_from_filename(obj)
if organism:
return {organism: [obj]}
else:
return [obj]
else:
return [obj]
def get_organism_from_filename(filename: str) -> Optional[str]:
"""Get the organism from a filename.
Try to analyse the barcode of a filename, and return the organism
if available. Otherwise return `None`.
"""
try:
barcoded = BarcodedFilename(os.path.basename(filename))
return barcoded.organism
except Exception:
return None
def get_samples_by_organism(
obj: Union[List[str], Dict[str, List[str]], str], default_organism: str
) -> Dict[str, List[str]]:
"""Return the samples in a dict.
Create a organism-samples dict.
Args:
obj: the samples that are collected.
default_organism: when `obj` is not a dict, `default_organism`
is used as key for the output dict.
Returns:
A dictionary that maps organisms to lists of samples. If `obj`
is a dict, a copy of `obj` is returned. If `obj` is a list,
a dict with `default_organism` that maps to `obj` is returned.
If `obj` is a string, a dict with `default_organism` that maps
to a list of one element, `obj`, is returned.
"""
if isinstance(obj, list):
return {default_organism: obj}
elif isinstance(obj, dict):
return dict(obj)
else:
return {default_organism: [obj]}
def get_genome_ref_index_by_organism(config: Config, organism: str) -> Tuple[str, str]:
"""Return the reference file and the index file.
Select the `config.*_ref` and `config.*_index` depending on
`organism`.
"""
if organism == "hg19":
return (config.hg19_ref, config.hg19_index)
elif organism == "hg38":
return (config.hg38_ref, config.hg38_index)
elif organism == "mm9":
return (config.mm9_ref, config.mm9_index)
elif organism == "mm10":
return (config.mm10_ref, config.mm10_index)
else:
raise DataError("Invalid organism")
def get_dbsnp_by_organism(config: Config, organism: str) -> str:
"""Return the dbSNP filename.
Select the `config.dbsnp_*` depending on `organism`.
"""
if organism == "hg19":
return config.dbsnp_hg19
elif organism == "hg38":
return config.dbsnp_hg38
else:
raise DataError("Invalid organism")
def get_cosmic_by_organism(config: Config, organism: str) -> str:
"""Return the cosmic DB filename.
Select the `config.cosmic_*` depending on `organism`.
"""
if organism == "hg19":
return config.cosmic_hg19
elif organism == "hg38":
return config.cosmic_hg38
else:
raise DataError("Invalid organism")
def get_picard_max_records_string(max_records: str) -> str:
"""Get the max records string for Picard.
Create the 'MAX_RECORDS_IN_RAM' parameter using `max_records`. If
`max_records` is empty, an empty string is returned.
"""
if max_records is None or max_records == "":
return ""
else:
return " MAX_RECORDS_IN_RAM=%d" % int(max_records)
def find_fastqs_by_organism(
sample: str, fastq_dir: str, default_organism: str
) -> Dict[str, List[Tuple[str, int]]]:
"""Search for FASTQ files and group them by organism.
Find all the .fastq files inside `fastq_dir` that start with
`sample` and have a valid suffix. Group all the files by organism.
Args:
sample: the barcoded sample as string.
fastq_dir: the directory where the fastq files must be searched.
default_organism: the organism to use in case the organism field
in a filename is absent.
Returns:
A dict that maps an organism to a list of fastq files.
"""
re_fastq_filename = re.compile(
r"^%s(?:\.((?:hg|mm)\d+))?\.R([12])\.fastq(?:\.gz)?$" % sample, re.I
)
fastq_files = [
filename
for filename in os.listdir(fastq_dir)
if re_fastq_filename.match(filename)
]
fastqs: Dict[str, List[Tuple[str, int]]] = {}
for filename in fastq_files:
match = re_fastq_filename.match(filename)
assert match is not None
organism = match.group(1)
read_index = int(match.group(2))
if organism is None or organism == "":
organism = default_organism
if organism in fastqs:
fastqs[organism].append((filename, read_index))
else:
fastqs[organism] = [(filename, read_index)]
return fastqs
def gzip(filename: str) -> None:
"""Compress a file with GZ compression."""
compressed_filename = filename + ".gz"
with open(filename, "rb") as in_fd, gz.open(
compressed_filename, "wb", compresslevel=6
) as out_fd:
shutil.copyfileobj(in_fd, out_fd)
os.unlink(filename)
def gunzip(filename: str) -> None:
"""Decompress a GZ file."""
decompressed_filename = filename[:-3]
with open(decompressed_filename, "wb") as out_fd, gz.open(filename, "rb") as in_fd:
shutil.copyfileobj(in_fd, out_fd)
os.unlink(filename)
def check_gz(filename: str) -> bool:
"""Check if a GZ file is valid."""
chunk_size = 2 ** 20
with gz.open(filename, "rb") as fd:
try:
while fd.read(1):
fd.seek(chunk_size, os.SEEK_CUR)
return True
except Exception:
return False
def parsed_date(raw_date: str) -> str:
"""Parse a date in 'Y_M_D' format and return a std HaTSPiL date."""
try:
date = datetime.datetime.strptime(raw_date, "%Y_%m_%d")
except ValueError:
raise ArgumentTypeError("expected string in format YYYY_MM_DD")
return "%04d_%02d_%02d" % (date.year, date.month, date.day)
def get_human_annotation(config: Config) -> str:
"""Get the best human genome annotation available in config."""
if config.use_hg38:
return "hg38"
elif config.use_hg19:
return "hg19"
else:
raise AnnotationError("no available human annotation in config")
def get_mouse_annotation(config: Config) -> str:
"""Get the best murine genome annotation available in config."""
if config.use_mm10:
return "mm10"
elif config.use_mm9:
return "mm9"
else:
raise AnnotationError("no available mouse annotation in config")
reFloat = re.compile(r"^(\d+\.\d*|\.\d+)$")
reInt = re.compile(r"^(\d+)$")
def parse_as_number(s: str) -> Union[int, float, str]:
"""Try to parse a string as number.
If `s` matches a float format, a parsed float is returned. If `s`
matches an int, a parset int is returned. Otherwise `s` is returned.
"""
if reFloat.match(s):
return float(s)
elif reInt.match(s):
return int(s)
else:
return s
T = TypeVar("T")
U = TypeVar("U")
def flatten(iterable: Iterable[Union[Iterable[T], Any]]) -> Generator[Any, None, None]:
"""Return a generator, flattening recusively an iterable object."""
for element in iterable:
if isinstance(element, collections.Iterable) and not isinstance(element, str):
yield from flatten(element)
else:
yield element
def rfind_if(iterable: Sequence[T], fun: Callable[[T], bool]) -> Optional[int]:
"""Reverse find an object in an iterable that satisfies `fun`.
Args:
iterable: an iterable object.
fun: a function that returns `True` when the item is found.
Returns:
The index of the first element for which `fun` returns `True`,
performing the operation on the reversed iterable.
"""
for index, element in enumerate(reversed(iterable)):
if fun(element):
return len(iterable) - index
return None
def argmin(
iterable: Iterable[T], key: Optional[Callable[[T], U]] = None
) -> Optional[int]:
"""Like `min`, but return the index of the element found."""
best = min(
((index, element) for (index, element) in enumerate(iterable)),
key=lambda x: key(x[1]) if key else x[1],
)
if best is not None:
return best[0]
else:
return None
def create_logger(
logger_name: str, handler: Optional[logging.FileHandler] = None
) -> Logger:
"""Create a named logger and add a handler to this."""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
if handler:
logger.addHandler(handler)
return logger
def get_kit_from_barcoded(
config: Config, barcoded: BarcodedFilename
) -> Optional[KitData]:
"""Get a kit from the config given a barcoded filename."""
assert barcoded.kit is not None
assert barcoded.analyte is not None
return config.kits.get((barcoded.kit, barcoded.analyte))
| 32.485327 | 87 | 0.625599 | 0 | 0 | 355 | 0.024668 | 0 | 0 | 0 | 0 | 6,069 | 0.421722 |
7d7258deda24afb1f717d1778a24d42c5aaa3305 | 2,556 | py | Python | DistrictData.py | robbierobinette/rcv-tensorflow | 984852902f465bb6f61ba863e4b76092249911d0 | [
"MIT"
]
| null | null | null | DistrictData.py | robbierobinette/rcv-tensorflow | 984852902f465bb6f61ba863e4b76092249911d0 | [
"MIT"
]
| null | null | null | DistrictData.py | robbierobinette/rcv-tensorflow | 984852902f465bb6f61ba863e4b76092249911d0 | [
"MIT"
]
| null | null | null | import csv
from typing import List
from CombinedPopulation import CombinedPopulation
from PopulationGroup import PopulationGroup, Democrats, Republicans, Independents
class DistrictVotingRecord:
def __init__(self,
district: str,
incumbent: str,
expected_lean: float,
d1: float, r1: float,
d2: float, r2: float):
self.district = district
self.incumbent = incumbent
self.expected_lean = expected_lean
self.d1 = d1
self.r1 = r1
self.d2 = d2
self.r2 = r2
l1 = .5 - d1 / (d1 + r1)
l2 = .5 - d2 / (d2 + r2)
self.lean = 100 * (l1 + l2) / 2
def print(self) -> None:
print("%6s %25s % 5.2f" % (self.district, self.incumbent, self.lean))
def population(self, partisanship: float, skew_factor: float, stddev: float) -> CombinedPopulation:
s = self
r_pct = (s.r1 + s.r2) / 2 / 100
d_pct = (s.d1 + s.d2) / 2 / 100
i_weight = .20
r_weight = max(0.05, (1 - i_weight) * r_pct)
d_weight = max(0.05, (1 - i_weight) * d_pct)
skew = (r_weight - d_weight) / 2.0 * skew_factor * 100
rep = PopulationGroup(Republicans, partisanship + skew, stddev, r_weight, 12)
dem = PopulationGroup(Democrats, -partisanship + skew, stddev, d_weight, -12)
ind = PopulationGroup(Independents, 0 + skew, stddev, i_weight, 0)
return CombinedPopulation([rep, dem, ind])
class DistrictData:
def __init__(self, path: str):
self.path = path
self.dvr = {}
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for row in csv_reader:
if row[0] != 'district':
dvr = self.parse_row(row)
self.dvr[dvr.district] = dvr
def parse_row(self, row: List[str]) -> DistrictVotingRecord:
if row[2] == 'EVEN':
lean = 0
elif row[2][0] == 'R':
lean = float(row[2][2:])
else:
lean = -float(row[2][2:])
d1 = float(row[3])
r1 = float(row[4])
if row[5] == 'null':
d2 = d1
r2 = r1
else:
d2 = float(row[5])
r2 = float(row[6])
return DistrictVotingRecord(row[0], row[1], lean, d1, r1, d2, r2)
def main():
dd = DistrictData("data-5vPn3.csv")
print("got dd")
for k, v in dd.dvr.items():
v.print()
if __name__ == "__main__":
main()
| 29.72093 | 103 | 0.534429 | 2,219 | 0.868153 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.030908 |
7d72c0bcd96eb18d89e4b84f9f4aa4228039c607 | 102 | py | Python | urlmiddleware/base.py | dbramwell/django-urlmiddleware | 8f7f4a571730805cdd04f321548c8d1dc7751ec7 | [
"MIT"
]
| 4 | 2015-04-10T10:41:18.000Z | 2016-06-16T01:19:15.000Z | urlmiddleware/base.py | dbramwell/django-urlmiddleware | 8f7f4a571730805cdd04f321548c8d1dc7751ec7 | [
"MIT"
]
| 2 | 2015-12-18T12:24:05.000Z | 2015-12-18T17:00:27.000Z | urlmiddleware/base.py | dbramwell/django-urlmiddleware | 8f7f4a571730805cdd04f321548c8d1dc7751ec7 | [
"MIT"
]
| 7 | 2015-11-17T17:53:37.000Z | 2016-03-29T06:21:17.000Z | from django.core.urlresolvers import Resolver404
class MiddlewareResolver404(Resolver404):
pass
| 17 | 48 | 0.823529 | 50 | 0.490196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7d745ae2b2c11edcf86ebca48a6d9d1699e9100c | 98 | py | Python | test.py | ifplusor/actrie | 54e9aff441594fbcd30a936d4fbc300ad81007b9 | [
"BSD-3-Clause"
]
| 8 | 2017-10-01T04:47:12.000Z | 2022-02-15T10:16:11.000Z | test.py | ifplusor/actrie | 54e9aff441594fbcd30a936d4fbc300ad81007b9 | [
"BSD-3-Clause"
]
| null | null | null | test.py | ifplusor/actrie | 54e9aff441594fbcd30a936d4fbc300ad81007b9 | [
"BSD-3-Clause"
]
| 4 | 2018-04-06T08:27:02.000Z | 2021-05-11T07:56:17.000Z | # coding=utf-8
from actrie.tests.test_matcher import test
if __name__ == "__main__":
test()
| 14 | 42 | 0.704082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.244898 |
7d7502212e99f51f8f089c24fff476d5cecb479f | 5,137 | py | Python | warehouse/email/services.py | pradyunsg/warehouse | 82815b06d9f98deed5f205c66e054de59d22a10d | [
"Apache-2.0"
]
| 1 | 2022-03-29T11:56:45.000Z | 2022-03-29T11:56:45.000Z | warehouse/email/services.py | pradyunsg/warehouse | 82815b06d9f98deed5f205c66e054de59d22a10d | [
"Apache-2.0"
]
| 358 | 2022-01-03T05:30:40.000Z | 2022-03-31T05:40:50.000Z | warehouse/email/services.py | anthonysidesap/warehouse | 140a2cc3cc007daca5f7fa2878a43e7e152d8959 | [
"Apache-2.0"
]
| null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from email.headerregistry import Address
from email.message import EmailMessage as RawEmailMessage
from email.utils import parseaddr
from typing import Optional
import premailer
from jinja2.exceptions import TemplateNotFound
from pyramid.renderers import render
from pyramid_mailer import get_mailer
from pyramid_mailer.message import Message
from zope.interface import implementer
from warehouse.email.interfaces import IEmailSender
from warehouse.email.ses.models import EmailMessage as SESEmailMessage
def _format_sender(sitename, sender):
if sender is not None:
return str(Address(sitename, addr_spec=sender))
class EmailMessage:
def __init__(self, subject: str, body_text: str, body_html: Optional[str] = None):
self.subject = subject
self.body_text = body_text
self.body_html = body_html
@classmethod
def from_template(cls, email_name, context, *, request):
subject = render(f"email/{email_name}/subject.txt", context, request=request)
body_text = render(f"email/{email_name}/body.txt", context, request=request)
try:
body_html = render(
f"email/{email_name}/body.html", context, request=request
)
body_html = premailer.Premailer(body_html, remove_classes=True).transform()
# Catching TemplateNotFound here is a bit of a leaky abstraction, but there's
# not much we can do about it.
except TemplateNotFound:
body_html = None
return cls(subject=subject, body_text=body_text, body_html=body_html)
@implementer(IEmailSender)
class SMTPEmailSender:
def __init__(self, mailer, sender=None):
self.mailer = mailer
self.sender = sender
@classmethod
def create_service(cls, context, request):
sitename = request.registry.settings["site.name"]
sender = _format_sender(sitename, request.registry.settings.get("mail.sender"))
return cls(get_mailer(request), sender=sender)
def send(self, recipient, message):
self.mailer.send_immediately(
Message(
subject=message.subject,
body=message.body_text,
html=message.body_html,
recipients=[recipient],
sender=self.sender,
)
)
def last_sent(self, to, subject):
# We don't store previously sent emails, so nothing to comapre against
return None
@implementer(IEmailSender)
class SESEmailSender:
def __init__(self, client, *, sender=None, db):
self._client = client
self._sender = sender
self._db = db
@classmethod
def create_service(cls, context, request):
sitename = request.registry.settings["site.name"]
sender = _format_sender(sitename, request.registry.settings.get("mail.sender"))
aws_session = request.find_service(name="aws.session")
return cls(
aws_session.client(
"ses", region_name=request.registry.settings.get("mail.region")
),
sender=sender,
db=request.db,
)
def send(self, recipient, message):
raw = RawEmailMessage()
raw["Subject"] = message.subject
raw["From"] = self._sender
raw["To"] = recipient
raw.set_content(message.body_text)
if message.body_html:
raw.add_alternative(message.body_html, subtype="html")
resp = self._client.send_raw_email(
Source=self._sender,
Destinations=[recipient],
RawMessage={"Data": bytes(raw)},
)
self._db.add(
SESEmailMessage(
message_id=resp["MessageId"],
from_=parseaddr(self._sender)[1],
to=parseaddr(recipient)[1],
subject=message.subject,
)
)
def last_sent(self, to, subject):
last_email = (
self._db.query(SESEmailMessage)
.filter(
SESEmailMessage.to == to,
SESEmailMessage.subject == subject,
)
.order_by(SESEmailMessage.created.desc())
.first()
)
if last_email:
return last_email.created
class ConsoleAndSMTPEmailSender(SMTPEmailSender):
def send(self, recipient, message):
super().send(recipient=recipient, message=message)
print(
f"""Email sent
Subject: {message.subject}
From: {self.sender}
To: {recipient}
HTML: Visualize at http://localhost:1080
Text: {message.body_text}"""
)
| 32.308176 | 87 | 0.647265 | 3,899 | 0.759003 | 0 | 0 | 3,407 | 0.663228 | 0 | 0 | 1,068 | 0.207903 |
7d762add2bb0e919d8e50f41074b703f99873c98 | 265 | py | Python | quickvision/pretrained/_pretrained_weights.py | zlapp/quickvision | cbf87756088bd7fe24d380ca831f5c1a204466f8 | [
"Apache-2.0"
]
| 47 | 2020-11-15T03:36:48.000Z | 2021-04-08T05:28:02.000Z | quickvision/pretrained/_pretrained_weights.py | zlapp/quickvision | cbf87756088bd7fe24d380ca831f5c1a204466f8 | [
"Apache-2.0"
]
| 78 | 2020-11-14T17:55:28.000Z | 2021-04-06T08:55:24.000Z | quickvision/pretrained/_pretrained_weights.py | zlapp/quickvision | cbf87756088bd7fe24d380ca831f5c1a204466f8 | [
"Apache-2.0"
]
| 15 | 2020-11-14T18:01:04.000Z | 2021-02-16T14:50:12.000Z | import torch
__all__ = ["_load_pretrained_weights"]
def _load_pretrained_weights(weights_dict, model_name: str, pretrained: str):
state_dict = torch.hub.load_state_dict_from_url(weights_dict[model_name][pretrained], map_location="cpu")
return state_dict
| 29.444444 | 109 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.116981 |
7d762e8385c0a3df789a5bd08064a714cdafb006 | 2,420 | py | Python | woke/woke/a_config/data_model.py | Ackee-Blockchain/woke | 0d27de25720142beb9619a89619b7a94c3556af1 | [
"ISC"
]
| 7 | 2022-01-28T06:50:00.000Z | 2022-02-14T11:34:32.000Z | woke/woke/a_config/data_model.py | Ackee-Blockchain/woke | 0d27de25720142beb9619a89619b7a94c3556af1 | [
"ISC"
]
| 30 | 2022-01-26T17:54:48.000Z | 2022-03-21T12:33:53.000Z | woke/woke/a_config/data_model.py | Ackee-Blockchain/woke | 0d27de25720142beb9619a89619b7a94c3556af1 | [
"ISC"
]
| null | null | null | from typing import Optional, List
from pathlib import Path
from dataclasses import astuple
import re
from pydantic import BaseModel, Field, Extra, validator
from pydantic.dataclasses import dataclass
from woke.core.enums import EvmVersionEnum
from woke.c_regex_parsing.solidity_version import SolidityVersion
class WokeConfigModel(BaseModel):
class Config:
allow_mutation = False
json_encoders = {
SolidityVersion: str,
}
extra = Extra.forbid
@dataclass
class SolcRemapping:
context: Optional[str]
prefix: str
target: Optional[str]
def __iter__(self):
return iter(astuple(self))
def __str__(self):
return f"{self.context or ''}:{self.prefix}={self.target or ''}"
class SolcWokeConfig(WokeConfigModel):
allow_paths: List[Path] = []
"""Woke should set solc `--allow-paths` automatically. This option allows to specify additional allowed paths."""
evm_version: Optional[EvmVersionEnum] = None
"""Version of the EVM to compile for. Leave unset to let the solc decide."""
include_paths: List[Path] = []
remappings: List[SolcRemapping] = []
target_version: Optional[SolidityVersion] = None
@validator("allow_paths", pre=True, each_item=True)
def set_allow_path(cls, v):
return Path(v).resolve()
@validator("include_paths", pre=True, each_item=True)
def set_include_path(cls, v):
return Path(v).resolve()
@validator("remappings", pre=True, each_item=True)
def set_remapping(cls, v):
if isinstance(v, SolcRemapping):
return v
remapping_re = re.compile(
r"(?:(?P<context>[^:\s]+)?:)?(?P<prefix>[^\s=]+)=(?P<target>[^\s]+)?"
)
match = remapping_re.match(v)
assert match, f"`{v}` is not a valid solc remapping."
groupdict = match.groupdict()
context = groupdict["context"]
prefix = groupdict["prefix"]
target = groupdict["target"]
return SolcRemapping(context, prefix, target)
class CompilerWokeConfig(WokeConfigModel):
solc: SolcWokeConfig = Field(default_factory=SolcWokeConfig)
class TopLevelWokeConfig(WokeConfigModel):
subconfigs: List[Path] = []
compiler: CompilerWokeConfig = Field(default_factory=CompilerWokeConfig)
@validator("subconfigs", pre=True, each_item=True)
def set_subconfig(cls, v):
return Path(v).resolve()
| 30.25 | 117 | 0.673554 | 2,083 | 0.860744 | 0 | 0 | 1,183 | 0.488843 | 0 | 0 | 431 | 0.178099 |
7d765dcd0b83ec7b2f5cef707b8de57d0e0211e3 | 1,399 | py | Python | model/rcnn/network.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
]
| null | null | null | model/rcnn/network.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
]
| null | null | null | model/rcnn/network.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
]
| null | null | null | import tensorflow as tf
import tensorflow.contrib.layers as tfl
"""Copied from the almighty Christian Hundt;
CECAM/CSM/IRTG School 2018: Machine Learning in Scientific Computing
https://github.com/CECAML/school_nierstein_2018/blob/master/Convnet%20TF.ipynb
"""
def prelu(net):
alpha = tf.Variable(0.0, dtype=net.dtype)
return tf.maximum(alpha * net, net)
def residual_conv_block(net, num_filters, kernel_size, stride, is_training=True):
# let us cache the input tensor and downsample it
inp = tfl.avg_pool2d(net, kernel_size, stride, padding="SAME")
# now convolve with stride (potential downsampling)
net = tfl.conv2d(net, num_filters, kernel_size, stride, activation_fn=tf.identity, padding="SAME")
# normalize the output
net = tfl.batch_norm(net, is_training=is_training, activation_fn=tf.identity)
# now convolve again but do not downsample
net = tfl.conv2d(net, num_filters, kernel_size, stride=1, activation_fn=tf.identity, padding="SAME")
return prelu(tf.concat((net, inp), axis=-1))
def network(X, Y):
net = tf.identity(X)
net = residual_conv_block(net, 16, 3, 2)
net = residual_conv_block(net, 32, 3, 2)
net = residual_conv_block(net, 64, 3, 2)
net = residual_conv_block(net, 128, 3, 2)
net = tf.reduce_mean(net, axis=(1, 2))
net = tfl.fully_connected(net, 10, activation_fn=tf.identity)
return net
| 31.795455 | 104 | 0.717655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 379 | 0.270908 |
7d76a9eff5e5d91d0da51d617aa1f132efbb6c52 | 517 | py | Python | app/application.py | dulin/tornado-test | 8ceeb9f2b50b4cd0f18baa9149140721feec1925 | [
"MIT"
]
| null | null | null | app/application.py | dulin/tornado-test | 8ceeb9f2b50b4cd0f18baa9149140721feec1925 | [
"MIT"
]
| null | null | null | app/application.py | dulin/tornado-test | 8ceeb9f2b50b4cd0f18baa9149140721feec1925 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- mode: python -*-
import tornado.web
from app.views import HelloWorld
from app.ws.communication import CommunicationSocketHandler
class Application(tornado.web.Application):
def __init__(self, db=None):
self.db = db
handlers = [
(r"/", HelloWorld),
(r"//", CommunicationSocketHandler),
]
settings = dict(
debug=True
)
super(Application, self).__init__(handlers, **settings)
| 23.5 | 63 | 0.599613 | 331 | 0.640232 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.145068 |
7d76d0e887ea0135157eb8f9b5b96280465e3061 | 31,326 | py | Python | python-fmclient/fmclient/fmclient/common/wrapping_formatters.py | starlingx/fault | 6105f83a85a8ca2e5ed8f33e0f5ed5455c8f0e17 | [
"Apache-2.0"
]
| 2 | 2020-02-07T19:02:07.000Z | 2021-05-28T15:44:48.000Z | python-fmclient/fmclient/fmclient/common/wrapping_formatters.py | starlingx/fault | 6105f83a85a8ca2e5ed8f33e0f5ed5455c8f0e17 | [
"Apache-2.0"
]
| null | null | null | python-fmclient/fmclient/fmclient/common/wrapping_formatters.py | starlingx/fault | 6105f83a85a8ca2e5ed8f33e0f5ed5455c8f0e17 | [
"Apache-2.0"
]
| null | null | null | #
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Manages WrapperFormatter objects.
WrapperFormatter objects can be used for wrapping CLI column celldata in order
for the CLI table (using prettyTable) to fit the terminal screen
The basic idea is:
Once celldata is retrieved and ready to display, first iterate through the celldata
and word wrap it so that fits programmer desired column widths. The
WrapperFormatter objects fill this role.
Once the celldata is formatted to their desired widths, then it can be passed to
the existing prettyTable code base for rendering.
"""
import copy
import re
import six
import textwrap
from fmclient.common.cli_no_wrap import is_nowrap_set
from fmclient.common.cli_no_wrap import set_no_wrap
from prettytable import _get_size
from six.moves import range
UUID_MIN_LENGTH = 36
# monkey patch (customize) how the textwrap module breaks text into chunks
wordsep_re = re.compile(r'(\s+|' # any whitespace
r',|'
r'=|'
r'\.|'
r':|'
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
textwrap.TextWrapper.wordsep_re = wordsep_re
def get_width(value):
if value is None:
return 0
return _get_size(six.text_type(value))[0] # get width from [width,height]
def _get_terminal_width():
from fmclient.common.utils import get_terminal_size
result = get_terminal_size()[0]
return result
def is_uuid_field(field_name):
"""
:param field_name:
:return: True if field_name looks like a uuid name
"""
if field_name is not None and field_name in ["uuid", "UUID"] or field_name.endswith("uuid"):
return True
return False
class WrapperContext(object):
"""Context for the wrapper formatters
Maintains a list of the current WrapperFormatters
being used to format the prettyTable celldata
Allows wrappers access to its 'sibling' wrappers
contains convenience methods and attributes
for calculating current tableWidth.
"""
def __init__(self):
self.wrappers = []
self.wrappers_by_field = {}
self.non_data_chrs_used_by_table = 0
self.num_columns = 0
self.terminal_width = -1
def set_num_columns(self, num_columns):
self.num_columns = num_columns
self.non_data_chrs_used_by_table = (num_columns * 3) + 1
def add_column_formatter(self, field, wrapper):
self.wrappers.append(wrapper)
self.wrappers_by_field[field] = wrapper
def get_terminal_width(self):
if self.terminal_width == -1:
self.terminal_width = _get_terminal_width()
return self.terminal_width
def get_table_width(self):
"""
Calculates table width by looping through all
column formatters and summing up their widths
:return: total table width
"""
widths = [w.get_actual_column_char_len(w.get_calculated_desired_width(), check_remaining_row_chars=False) for w
in
self.wrappers]
chars_used_by_data = sum(widths)
width = self.non_data_chrs_used_by_table + chars_used_by_data
return width
def is_table_too_wide(self):
"""
:return: True if calculated table width is too wide for the terminal width
"""
if self.get_terminal_width() < self.get_table_width():
return True
return False
def field_value_function_factory(formatter, field):
"""Builds function for getting a field value from table cell celldata
As a side-effect, attaches function as the 'get_field_value' attribute
of the formatter
:param formatter:the formatter to attach return function to
:param field:
:return: function that returns cell celldata
"""
def field_value_function_builder(data):
if isinstance(data, dict):
formatter.get_field_value = lambda celldata: celldata.get(field, None)
else:
formatter.get_field_value = lambda celldata: getattr(celldata, field)
return formatter.get_field_value(data)
return field_value_function_builder
class WrapperFormatter(object):
"""Base (abstract) class definition of wrapping formatters"""
def __init__(self, ctx, field):
self.ctx = ctx
self.add_blank_line = False
self.no_wrap = False
self.min_width = 0
self.field = field
self.header_width = 0
self.actual_column_char_len = -1
self.textWrapper = None
if self.field:
self.get_field_value = field_value_function_factory(self, field)
else:
self.get_field_value = lambda data: data
def get_basic_desired_width(self):
return self.min_width
def get_calculated_desired_width(self):
basic_desired_width = self.get_basic_desired_width()
if self.header_width > basic_desired_width:
return self.header_width
return basic_desired_width
def get_sibling_wrappers(self):
"""
:return: a list of your sibling wrappers for the other fields
"""
others = [w for w in self.ctx.wrappers if w != self]
return others
def get_remaining_row_chars(self):
used = [w.get_actual_column_char_len(w.get_calculated_desired_width(),
check_remaining_row_chars=False)
for w in self.get_sibling_wrappers()]
chrs_used_by_data = sum(used)
remaining_chrs_in_row = (self.ctx.get_terminal_width() -
self.ctx.non_data_chrs_used_by_table) - chrs_used_by_data
return remaining_chrs_in_row
def set_min_width(self, min_width):
self.min_width = min_width
def set_actual_column_len(self, actual):
self.actual_column_char_len = actual
def get_actual_column_char_len(self, desired_char_len, check_remaining_row_chars=True):
"""Utility method to adjust desired width to a width
that can actually be applied based on current table width
and current terminal width
Will not allow actual width to be less than min_width
min_width is typically length of the column header text
or the longest 'word' in the celldata
:param desired_char_len:
:param check_remaining_row_chars:
:return:
"""
if self.actual_column_char_len != -1:
return self.actual_column_char_len # already calculated
if desired_char_len < self.min_width:
actual = self.min_width
else:
actual = desired_char_len
if check_remaining_row_chars and actual > self.min_width:
remaining = self.get_remaining_row_chars()
if actual > remaining >= self.min_width:
actual = remaining
if check_remaining_row_chars:
self.set_actual_column_len(actual)
if self.ctx.is_table_too_wide():
# Table too big can I shrink myself?
if actual > self.min_width:
# shrink column
while actual > self.min_width:
actual -= 1 # TODO(jkung): fix in next sprint
# each column needs to share in
# table shrinking - but this is good
# enough for now - also - why the loop?
self.set_actual_column_len(actual)
return actual
def _textwrap_fill(self, s, actual_width):
if not self.textWrapper:
self.textWrapper = textwrap.TextWrapper(actual_width)
else:
self.textWrapper.width = actual_width
return self.textWrapper.fill(s)
def text_wrap(self, s, width):
"""
performs actual text wrap
:param s:
:param width: in characters
:return: formatted text
"""
if self.no_wrap:
return s
actual_width = self.get_actual_column_char_len(width)
new_s = self._textwrap_fill(s, actual_width)
wrapped = new_s != s
if self.add_blank_line and wrapped:
new_s += "\n".ljust(actual_width)
return new_s
def format(self, data):
return str(self.get_field_value(data))
def get_unwrapped_field_value(self, data):
return self.get_field_value(data)
def as_function(self):
def foo(data):
return self.format(data)
foo.WrapperFormatterMarker = True
foo.wrapper_formatter = self
return foo
@staticmethod
def is_wrapper_formatter(foo):
if not foo:
return False
return getattr(foo, "WrapperFormatterMarker", False)
class WrapperLambdaFormatter(WrapperFormatter):
"""A wrapper formatter that adapts a function (callable)
to look like a WrapperFormatter
"""
def __init__(self, ctx, field, format_function):
super(WrapperLambdaFormatter, self).__init__(ctx, field)
self.format_function = format_function
def format(self, data):
return self.format_function(self.get_field_value(data))
class WrapperFixedWidthFormatter(WrapperLambdaFormatter):
"""A wrapper formatter that forces the text to wrap within
a specific width (in chars)
"""
def __init__(self, ctx, field, width):
super(WrapperFixedWidthFormatter, self).__init__(ctx, field,
lambda data:
self.text_wrap(str(data),
self.get_calculated_desired_width()))
self.width = width
def get_basic_desired_width(self):
return self.width
class WrapperPercentWidthFormatter(WrapperFormatter):
"""A wrapper formatter that forces the text to wrap within
a specific percentage width of the current terminal width
"""
def __init__(self, ctx, field, width_as_decimal):
super(WrapperPercentWidthFormatter, self).__init__(ctx, field)
self.width_as_decimal = width_as_decimal
def get_basic_desired_width(self):
width = int((self.ctx.get_terminal_width() - self.ctx.non_data_chrs_used_by_table) *
self.width_as_decimal)
return width
def format(self, data):
width = self.get_calculated_desired_width()
field_value = self.get_field_value(data)
return self.text_wrap(str(field_value), width)
class WrapperWithCustomFormatter(WrapperLambdaFormatter):
"""A wrapper formatter that allows the programmer to have a custom
formatter (in the form of a function) that is first applied
and then a wrapper function is applied to the result
See wrapperFormatterFactory for a better explanation! :-)
"""
# noinspection PyUnusedLocal
def __init__(self, ctx, field, custom_formatter, wrapper_formatter):
super(WrapperWithCustomFormatter, self).__init__(ctx, None,
lambda data: wrapper_formatter.format(custom_formatter(data)))
self.wrapper_formatter = wrapper_formatter
self.custom_formatter = custom_formatter
def get_unwrapped_field_value(self, data):
return self.custom_formatter(data)
def __setattr__(self, name, value):
#
# Some attributes set onto this class need
# to be pushed down to the 'inner' wrapper_formatter
#
super(WrapperWithCustomFormatter, self).__setattr__(name, value)
if hasattr(self, "wrapper_formatter"):
if name == "no_wrap":
self.wrapper_formatter.no_wrap = value
if name == "add_blank_line":
self.wrapper_formatter.add_blank_line = value
if name == "header_width":
self.wrapper_formatter.header_width = value
def set_min_width(self, min_width):
super(WrapperWithCustomFormatter, self).set_min_width(min_width)
self.wrapper_formatter.set_min_width(min_width)
def set_actual_column_len(self, actual):
super(WrapperWithCustomFormatter, self).set_actual_column_len(actual)
self.wrapper_formatter.set_actual_column_len(actual)
def get_basic_desired_width(self):
return self.wrapper_formatter.get_basic_desired_width()
def wrapper_formatter_factory(ctx, field, formatter):
"""
This function is a factory for building WrapperFormatter objects.
The function needs to be called for each celldata column (field)
that will be displayed in the prettyTable.
The function looks at the formatter parameter and based on its type,
determines what WrapperFormatter to construct per field (column).
ex:
formatter = 15 - type = int : Builds a WrapperFixedWidthFormatter that
will wrap at 15 chars
formatter = .25 - type = int : Builds a WrapperPercentWidthFormatter that
will wrap at 25% terminal width
formatter = type = callable : Builds a WrapperLambdaFormatter that
will call some arbitrary function
formatter = type = dict : Builds a WrapperWithCustomFormatter that
will call some arbitrary function to format
and then apply a wrapping formatter to the result
ex: this dict {"formatter" : captializeFunction,,
"wrapperFormatter": .12}
will apply the captializeFunction to the column
celldata and then wordwrap at 12 % of terminal width
:param ctx: the WrapperContext that the built WrapperFormatter will use
:param field: name of field (column_ that the WrapperFormatter will execute on
:param formatter: specifies type and input for WrapperFormatter that will be built
:return: WrapperFormatter
"""
if isinstance(formatter, WrapperFormatter):
return formatter
if callable(formatter):
return WrapperLambdaFormatter(ctx, field, formatter)
if isinstance(formatter, int):
return WrapperFixedWidthFormatter(ctx, field, formatter)
if isinstance(formatter, float):
return WrapperPercentWidthFormatter(ctx, field, formatter)
if isinstance(formatter, dict):
if "wrapperFormatter" in formatter:
embedded_wrapper_formatter = wrapper_formatter_factory(ctx, None,
formatter["wrapperFormatter"])
elif "hard_width" in formatter:
embedded_wrapper_formatter = WrapperFixedWidthFormatter(ctx, field, formatter["hard_width"])
embedded_wrapper_formatter.min_width = formatter["hard_width"]
else:
embedded_wrapper_formatter = WrapperFormatter(ctx, None) # effectively a NOOP width formatter
if "formatter" not in formatter:
return embedded_wrapper_formatter
custom_formatter = formatter["formatter"]
wrapper = WrapperWithCustomFormatter(ctx, field, custom_formatter, embedded_wrapper_formatter)
return wrapper
raise Exception("Formatter Error! Unrecognized formatter {} for field {}".format(formatter, field))
def build_column_stats_for_best_guess_formatting(objs, fields, field_labels, custom_formatters={}):
class ColumnStats(object):
def __init__(self, field, field_label, custom_formatter=None):
self.field = field
self.field_label = field_label
self.average_width = 0
self.min_width = get_width(field_label) if field_label else 0
self.max_width = get_width(field_label) if field_label else 0
self.total_width = 0
self.count = 0
self.average_percent = 0
self.max_percent = 0
self.isUUID = is_uuid_field(field)
if custom_formatter:
self.get_field_value = custom_formatter
else:
self.get_field_value = field_value_function_factory(self, field)
def add_value(self, value):
if self.isUUID:
return
self.count += 1
value_width = get_width(value)
self.total_width = self.total_width + value_width
if value_width < self.min_width:
self.min_width = value_width
if value_width > self.max_width:
self.max_width = value_width
if self.count > 0:
self.average_width = float(self.total_width) / float(self.count) # pylint: disable=old-division
def set_max_percent(self, max_total_width):
if max_total_width > 0:
self.max_percent = float(self.max_width) / float(max_total_width) # pylint: disable=old-division
def set_avg_percent(self, avg_total_width):
if avg_total_width > 0:
self.average_percent = float(self.average_width) / float(avg_total_width) # pylint: disable=old-division
def __str__(self):
return str([self.field,
self.average_width,
self.min_width,
self.max_width,
self.total_width,
self.count,
self.average_percent,
self.max_percent,
self.isUUID])
def __repr__(self):
return str([self.field,
self.average_width,
self.min_width,
self.max_width,
self.total_width,
self.count,
self.average_percent,
self.max_percent,
self.isUUID])
if objs is None or len(objs) == 0:
return {"stats": {},
"total_max_width": 0,
"total_avg_width": 0}
stats = {}
for i in range(0, len(fields)):
stats[fields[i]] = ColumnStats(fields[i], field_labels[i], custom_formatters.get(fields[i]))
for obj in objs:
for field in fields:
column_stat = stats[field]
column_stat.add_value(column_stat.get_field_value(obj))
total_max_width = sum([s.max_width for s in stats.values()])
total_avg_width = sum([s.average_width for s in stats.values()])
return {"stats": stats,
"total_max_width": total_max_width,
"total_avg_width": total_avg_width}
def build_best_guess_formatters_using_average_widths(objs, fields, field_labels, custom_formatters={}, no_wrap_fields=[]):
column_info = build_column_stats_for_best_guess_formatting(objs, fields, field_labels, custom_formatters)
format_spec = {}
total_avg_width = float(column_info["total_avg_width"])
if total_avg_width <= 0:
return format_spec
for f in [ff for ff in fields if ff not in no_wrap_fields]:
format_spec[f] = float(column_info["stats"][f].average_width) / total_avg_width # pylint: disable=old-division
custom_formatter = custom_formatters.get(f, None)
if custom_formatter:
format_spec[f] = {"formatter": custom_formatter, "wrapperFormatter": format_spec[f]}
# Handle no wrap fields by building formatters that will not wrap
for f in [ff for ff in fields if ff in no_wrap_fields]:
format_spec[f] = {"hard_width": column_info["stats"][f].max_width}
custom_formatter = custom_formatters.get(f, None)
if custom_formatter:
format_spec[f] = {"formatter": custom_formatter, "wrapperFormatter": format_spec[f]}
return format_spec
def build_best_guess_formatters_using_max_widths(objs, fields, field_labels, custom_formatters={}, no_wrap_fields=[]):
column_info = build_column_stats_for_best_guess_formatting(objs, fields, field_labels, custom_formatters)
format_spec = {}
for f in [ff for ff in fields if ff not in no_wrap_fields]:
format_spec[f] = float(column_info["stats"][f].max_width) / float(column_info["total_max_width"]) # pylint: disable=old-division
custom_formatter = custom_formatters.get(f, None)
if custom_formatter:
format_spec[f] = {"formatter": custom_formatter, "wrapperFormatter": format_spec[f]}
# Handle no wrap fields by building formatters that will not wrap
for f in [ff for ff in fields if ff in no_wrap_fields]:
format_spec[f] = {"hard_width": column_info["stats"][f].max_width}
custom_formatter = custom_formatters.get(f, None)
if custom_formatter:
format_spec[f] = {"formatter": custom_formatter, "wrapperFormatter": format_spec[f]}
return format_spec
def needs_wrapping_formatters(formatters, no_wrap=None):
no_wrap = is_nowrap_set(no_wrap)
if no_wrap:
return False
# handle easy case:
if not formatters:
return True
# If we have at least one wrapping formatter,
# then we assume we don't need to wrap
for f in formatters.values():
if WrapperFormatter.is_wrapper_formatter(f):
return False
# looks like we need wrapping
return True
def as_wrapping_formatters(objs, fields, field_labels, formatters, no_wrap=None, no_wrap_fields=[]):
"""This function is the entry point for building the "best guess"
word wrapping formatters. A best guess formatter guesses what the best
columns widths should be for the table celldata. It does this by collecting
various stats on the celldata (min, max average width of column celldata) and from
this celldata decides the desired widths and the minimum widths.
Given a list of formatters and the list of objects (objs), this function
first determines if we need to augment the passed formatters with word wrapping
formatters. If the no_wrap parameter or global no_wrap flag is set,
then we do not build wrapping formatters. If any of the formatters within formatters
is a word wrapping formatter, then it is assumed no more wrapping is required.
:param objs:
:param fields:
:param field_labels:
:param formatters:
:param no_wrap:
:param no_wrap_fields:
:return: When no wrapping is required, the formatters parameter is returned
-- effectively a NOOP in this case
When wrapping is required, best-guess word wrapping formatters are returned
with original parameter formatters embedded in the word wrapping formatters
"""
no_wrap = is_nowrap_set(no_wrap)
if not needs_wrapping_formatters(formatters, no_wrap):
return formatters
format_spec = build_best_guess_formatters_using_average_widths(objs, fields, field_labels, formatters, no_wrap_fields)
formatters = build_wrapping_formatters(objs, fields, field_labels, format_spec)
return formatters
def build_wrapping_formatters(objs, fields, field_labels, format_spec, add_blank_line=True,
no_wrap=None, use_max=False):
"""
A convenience function for building all wrapper formatters that will be used to
format a CLI's output when its rendered in a prettyTable object.
It iterates through the keys of format_spec and calls wrapperFormatterFactory to build
wrapperFormatter objects for each column.
Its best to show by example parameters:
field_labels = ['UUID', 'Time Stamp', 'State', 'Event Log ID', 'Reason Text',
'Entity Instance ID', 'Severity']
fields = ['uuid', 'timestamp', 'state', 'event_log_id', 'reason_text',
'entity_instance_id', 'severity']
format_spec = {
"uuid" : .10, # float = so display as 10% of terminal width
"timestamp" : .08,
"state" : .08,
"event_log_id" : .07,
"reason_text" : .42,
"entity_instance_id" : .13,
"severity" : {"formatter" : captializeFunction,
"wrapperFormatter": .12}
}
:param objs: the actual celldata that will get word wrapped
:param fields: fields (attributes of the celldata) that will be displayed in the table
:param field_labels: column (field headers)
:param format_spec: dict specify formatter for each column (field)
:param add_blank_line: default True, when tru adds blank line to column if it wraps, aids readability
:param no_wrap: default False, when True turns wrapping off but does not suppress other custom formatters
:param use_max
:return: wrapping formatters as functions
"""
no_wrap = set_no_wrap(no_wrap)
if objs is None or len(objs) == 0:
return {}
biggest_word_pattern = re.compile("[\.:,;\!\?\\ =-\_]")
def get_biggest_word(s):
return max(biggest_word_pattern.split(s), key=len)
wrapping_formatters_as_functions = {}
if len(fields) != len(field_labels):
raise Exception("Error in buildWrappingFormatters: "
"len(fields) = {}, len(field_labels) = {},"
" they must be the same length!".format(len(fields),
len(field_labels)))
field_to_label = {}
for i in range(0, len(fields)):
field_to_label[fields[i]] = field_labels[i]
ctx = WrapperContext()
ctx.set_num_columns(len(fields))
if not format_spec:
if use_max:
format_spec = build_best_guess_formatters_using_max_widths(objs, fields, field_labels)
else:
format_spec = build_best_guess_formatters_using_average_widths(objs, fields, field_labels)
for k in list(format_spec.keys()):
if k not in fields:
raise Exception("Error in buildWrappingFormatters: format_spec "
"specifies a field {} that is not specified "
"in fields : {}".format(k, fields))
format_spec_for_k = copy.deepcopy(format_spec[k])
if callable(format_spec_for_k):
format_spec_for_k = {"formatter": format_spec_for_k}
wrapper_formatter = wrapper_formatter_factory(ctx, k, format_spec_for_k)
if wrapper_formatter.min_width <= 0:
# need to specify min-width so that
# column is not unnecessarily squashed
if is_uuid_field(k): # special case
wrapper_formatter.set_min_width(UUID_MIN_LENGTH)
else:
# column width cannot be smaller than the widest word
column_data = [str(wrapper_formatter.get_unwrapped_field_value(data)) for data in objs]
widest_word_in_column = max([get_biggest_word(d) + " "
for d in column_data + [field_to_label[k]]], key=len)
wrapper_formatter.set_min_width(len(widest_word_in_column))
wrapper_formatter.header_width = get_width(field_to_label[k])
wrapper_formatter.add_blank_line = add_blank_line
wrapper_formatter.no_wrap = no_wrap
wrapping_formatters_as_functions[k] = wrapper_formatter.as_function()
ctx.add_column_formatter(k, wrapper_formatter)
return wrapping_formatters_as_functions
def set_no_wrap_on_formatters(no_wrap, formatters):
"""
Purpose of this function is to temporarily force
the no_wrap setting for the formatters parameter.
returns orig_no_wrap_settings defined for each formatter
Use unset_no_wrap_on_formatters(orig_no_wrap_settings) to undo what
this function does
"""
# handle easy case:
if not formatters:
return {}
formatter_no_wrap_settings = {}
global_orig_no_wrap = is_nowrap_set()
set_no_wrap(no_wrap)
for k, f in formatters.items():
if WrapperFormatter.is_wrapper_formatter(f):
formatter_no_wrap_settings[k] = (f.wrapper_formatter.no_wrap, f.wrapper_formatter)
f.wrapper_formatter.no_wrap = no_wrap
return {"global_orig_no_wrap": global_orig_no_wrap,
"formatter_no_wrap_settings": formatter_no_wrap_settings}
def unset_no_wrap_on_formatters(orig_no_wrap_settings):
"""
It only makes sense to call this function with the return value
from the last call to set_no_wrap_on_formatters(no_wrap, formatters).
It effectively undoes what set_no_wrap_on_formatters() does
"""
if not orig_no_wrap_settings:
return {}
global_orig_no_wrap = orig_no_wrap_settings["global_orig_no_wrap"]
formatter_no_wrap_settings = orig_no_wrap_settings["formatter_no_wrap_settings"]
formatters = {}
for k, v in formatter_no_wrap_settings.items():
formatters[k] = v[1]
formatters[k].no_wrap = v[0]
set_no_wrap(global_orig_no_wrap)
return formatters
def _simpleTestHarness(no_wrap):
from fmclient.common import utils
def testFormatter(event):
return "*{}".format(event["state"])
def buildFormatter(field, width):
def f(dict):
if field == 'number':
return dict[field]
return "{}".format(dict[field]).replace("_", " ")
return {"formatter": f, "wrapperFormatter": width}
set_no_wrap(no_wrap)
field_labels = ['Time Stamp', 'State', 'Event Log ID', 'Reason Text',
'Entity Instance ID', 'Severity', 'Number']
fields = ['timestamp', 'state', 'event_log_id', 'reason_text',
'entity_instance_id', 'severity', 'number']
formatterSpecX = {"timestamp": 10,
"state": 8,
"event_log_id": 70,
"reason_text": 30,
"entity_instance_id": 30,
"severity": 12,
"number": 4}
formatterSpec = {}
for f in fields:
formatterSpec[f] = buildFormatter(f, formatterSpecX[f])
logs = []
for i in range(0, 30):
log = {}
for f in fields:
if f == 'number':
log[f] = i
else:
log[f] = "{}{}".format(f, i)
logs.append(utils.objectify(log))
formatterSpec = formatterSpecX
formatters = build_wrapping_formatters(logs, fields, field_labels, formatterSpec)
utils.print_list(logs, fields, field_labels, formatters=formatters, sortby=6,
reversesort=True, no_wrap_fields=['entity_instance_id'])
print("nowrap = {}".format(is_nowrap_set()))
if __name__ == "__main__":
_simpleTestHarness(True)
_simpleTestHarness(False)
| 38.721879 | 137 | 0.632925 | 12,468 | 0.398008 | 0 | 0 | 154 | 0.004916 | 0 | 0 | 10,657 | 0.340197 |
7d77a229da1b2cdc8c56a9c402927cc2d1140814 | 2,139 | py | Python | simple.py | vaiorabbit/python-glfw | b5984650e976f4702c3dc06db7115aebc13698ca | [
"Zlib"
]
| null | null | null | simple.py | vaiorabbit/python-glfw | b5984650e976f4702c3dc06db7115aebc13698ca | [
"Zlib"
]
| null | null | null | simple.py | vaiorabbit/python-glfw | b5984650e976f4702c3dc06db7115aebc13698ca | [
"Zlib"
]
| 1 | 2020-03-04T08:59:15.000Z | 2020-03-04T08:59:15.000Z | # Ref.: https://github.com/vaiorabbit/ruby-opengl/blob/master/sample/simple.rb
from ctypes import *
from OpenGL.GL import *
import GLFW
from GLFW import *
def key_callback_fn(window_handle, key, scancode, action, mods):
if key == GLFW_KEY_ESCAPE and action == GLFW_PRESS:
glfwSetWindowShouldClose(window_handle, 1)
key_callback = GLFWkeyfun(key_callback_fn)
def main():
GLFW.load_glfw_library('libglfw.dylib')
glfwInit()
major = c_int()
minor = c_int()
rev = c_int()
glfwGetVersion(byref(major), byref(minor), byref(rev))
print(major.value, minor.value, rev.value)
verstr = glfwGetVersionString()
print(verstr)
win = glfwCreateWindow(640, 480, b"Python GLFW", None, None)
glfwMakeContextCurrent(win)
primary_monitor = glfwGetPrimaryMonitor()
monitor_name = glfwGetMonitorName(primary_monitor)
print(monitor_name)
vidmode_ptr = glfwGetVideoMode(primary_monitor)
vidmode = cast(vidmode_ptr, POINTER(GLFWvidmode)).contents
print(vidmode.width, vidmode.height, vidmode.refreshRate)
glfwSetKeyCallback(win, key_callback)
fb_width = c_int()
fb_height = c_int()
try:
while glfwWindowShouldClose(win) == 0:
glfwGetFramebufferSize(win, fb_width, fb_height)
ratio = fb_width.value / fb_height.value
glViewport(0, 0, fb_width.value, fb_height.value)
glClear(GL_COLOR_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-ratio, ratio, -1.0, 1.0, 1.0, -1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glRotatef(glfwGetTime() * 50.0, 0.0, 0.0, 1.0)
glBegin(GL_TRIANGLES)
glColor3f(1.0, 0.0, 0.0)
glVertex3f(-0.6, -0.4, 0.0)
glColor3f(0.0, 1.0, 0.0)
glVertex3f(0.6, -0.4, 0.0)
glColor3f(0.0, 0.0, 1.0)
glVertex3f(0.0, 0.6, 0.0)
glEnd()
glfwSwapBuffers(win)
glfwPollEvents()
finally:
glfwDestroyWindow(win)
glfwTerminate()
if __name__ == '__main__':
main()
| 28.905405 | 78 | 0.632071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.054698 |
7d77a393017f4de426158a54d01130a88642e6af | 34,661 | py | Python | market_sim/_agents/risk_model.py | quanttrade/rl_trading | f4168c69f44fe5a11a06461387d4591426a43735 | [
"Apache-2.0"
]
| 247 | 2017-09-14T03:26:39.000Z | 2022-03-30T10:23:02.000Z | market_sim/_agents/risk_model.py | Deeptradingfx/rl_trading | f4168c69f44fe5a11a06461387d4591426a43735 | [
"Apache-2.0"
]
| null | null | null | market_sim/_agents/risk_model.py | Deeptradingfx/rl_trading | f4168c69f44fe5a11a06461387d4591426a43735 | [
"Apache-2.0"
]
| 111 | 2017-10-18T07:47:07.000Z | 2022-03-30T10:18:49.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Implement different methods to hedge positions and measure the risk of a Zero
cupon bond portfolio
REFERENCE: Nawalkha, S. K; Soto, G. M.; Beliaeva, N. A., "Interest Rate Risk
Modeling, the fixed Income Valuation course". Wiley, 2005
@author: ucaiado
Created on 12/22/2016
"""
import numpy as np
import math
import pandas as pd
import pprint
'''
Begin help functions
'''
'''
End help functions
'''
def update_maxmin(f_frice, a):
'''
Update maximum and minimum price observed by the agent while positioned
:param f_frice: float.
:param a: agent object.
'''
if f_frice > a.current_max_price:
a.current_max_price = f_frice
if f_frice < a.current_min_price:
a.current_min_price = f_frice
class RiskModel(object):
'''
A basic risk model representation for a fixed income strategy that measures
the loss potential and the immunization needs
'''
def __init__(self, env, f_portfolio_value=10**6):
'''
Initiate a RiskModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param f_portfolio_value*: float. The total
'''
self.env = env
self.l_hedging_instr = env.l_hedge
self.s_main = env.s_main_intrument
self.l_ratios = []
self.d_dv01 = {}
self.na_pu = None
self.na_du = None
self.f_portfolio_value = f_portfolio_value
self.s_risk_model = 'BasicModel'
self.b_stop_trading = False
self.price_stop_buy = None
self.price_stop_sell = None
def reset(self):
'''
reset risk model parameters to use in a new simulation
'''
self.current_price = None
self.b_stop_trading = False
self.price_stop_buy = None
self.price_stop_sell = None
self.l_ratios = []
self.na_pu = None
self.na_du = None
def set_ratios(self):
'''
Set the DV01 ratios of the pair between the main instrument and the
others avaiable to hedging
'''
# calculate the dv01 for each instrument
d_aux = {}
l_rtn = []
l_du = []
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
f_du = self.env.l_du[self.env.order_matching.idx][idx]/252.
f_price, f_qty = book_obj.best_bid
f_dv01 = (f_du*10.)/(1. + f_price/100.)**(1. + f_du)
d_aux[s_key] = f_dv01
l_du.append(f_du)
# calculate the ration in relation to the main instrument
self.d_dv01 = d_aux
for s_instr in self.l_hedging_instr:
l_rtn.append(d_aux[s_instr]/d_aux[self.s_main])
self.l_du = l_du
return l_rtn
def portfolio_duration(self, d_position):
'''
Return the duration of a portfolio
:param d_position: dictionary. portfolio to be hedged
'''
l_pu = []
l_pos = []
l_du = []
self.last_pu = {}
self.last_pos = {}
self.last_du = {}
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
f_du = self.env.l_du[self.env.order_matching.idx][idx]
f_price, f_qty = book_obj.best_bid
f_pu = 10.**5/(1. + f_price/100.)**(f_du/252.)
f_pos = -d_position[s_key]['qBid'] # inverto para qty em PU ?
f_pos -= -d_position[s_key]['qAsk']
self.last_du[s_key] = f_du
l_du.append(f_du)
self.last_pos[s_key] = f_pos
l_pos.append(f_pos)
self.last_pu[s_key] = f_pu
l_pu.append(f_pu)
return self._get_duration(l_pu, l_du, l_pos)
def _get_duration(self, l_pu, l_du, l_pos):
'''
Calculate the duration for a given position
:param l_pu: list.
:param l_du: list.
:param l_pos: list. final position in each instrument traded
'''
na_weight = self._get_weights(l_pu, l_pos)
return sum(np.array(l_du)/252. * na_weight)
def _get_weights(self, l_pu, l_pos):
'''
Return the positions as portfolio weights
:param l_pu: list. the PU of each instrument
:param l_pos: list. final position in each instrument traded (in PU)
'''
na_weight = np.array(l_pu) * np.array(l_pos)
na_weight /= self.f_portfolio_value
return na_weight
def get_instruments_to_hedge(self, agent):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param agent: Agent object. agent that need to hedge
'''
d_position = agent.position
return self._get_instruments_to_hedge(d_position)
def _get_instruments_to_hedge(self, d_position):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param d_position: dictionary. portfolio in qty of contracts
'''
# check the ratios just once
if not self.l_ratios:
self.l_ratios = self.set_ratios()
f_current_duration = self.portfolio_duration(d_position)
# check were should hedge and what quantity
f_main_pos = -d_position[self.s_main]['qBid']
f_main_pos -= -d_position[self.s_main]['qAsk']
l_hedged_position = []
l_pos = [f_main_pos]
l_du = [self.last_du[self.s_main]]
l_pu = [self.last_pu[self.s_main]]
for s_instr, f_ratio in zip(self.l_hedging_instr, self.l_ratios):
if s_instr == self.s_main:
s_action = 'BUY'
if f_main_pos < 0:
s_action = 'SELL'
if f_main_pos == 0:
return []
return [(s_action, s_instr, f_main_pos)]
f_aux_pos = -d_position[s_instr]['qBid']
f_aux_pos -= -d_position[s_instr]['qAsk']
l_hedged_position.append(f_aux_pos*f_ratio)
l_pos.append(f_aux_pos)
l_du.append(self.last_du[s_instr])
l_pu.append(self.last_pu[s_instr])
f_main_position = f_main_pos + sum(np.array(l_hedged_position))
na_to_hedge = np.array([f_main_position] * len(l_hedged_position))
na_to_hedge /= np.array(self.l_ratios)
na_sign = np.sign(na_to_hedge)
na_mult = 5 * na_sign
if sum((abs(na_to_hedge)/5) < 1) != 0:
na_to_hedge = np.ceil(abs(na_to_hedge)/5).astype(int) * na_mult
else:
na_to_hedge = np.round(abs(na_to_hedge)/5).astype(int) * na_mult
l_to_hedge = list(na_to_hedge)
l_rtn = []
for idx, s_instr in enumerate(self.l_hedging_instr):
i_qty = -l_to_hedge[idx]
if i_qty != 0:
l_pos_aux = l_pos[:]
l_pos_aux[idx+1] += i_qty
f_future_duration = self._get_duration(l_pu, l_du, l_pos_aux)
f_abs_dur = abs(f_future_duration)
# if qty is not enough to dicrease the duration, increase it
if f_abs_dur > 1.2 and f_abs_dur < 3.:
i_qty *= 2
elif f_abs_dur >= 3.:
i_qty *= 3
l_pos_aux = l_pos[:]
l_pos_aux[idx+1] += i_qty
f_future_duration = self._get_duration(l_pu, l_du, l_pos_aux)
# recalculate all
if abs(f_future_duration) < abs(f_current_duration):
# change to rate quantity
s_action = 'BUY'
if -i_qty < 0:
s_action = 'SELL'
l_rtn.append((s_action, s_instr, -i_qty))
return l_rtn
class KRDModel(RiskModel):
'''
A key rate duration model representation that uses the KRDs selected to
decide what instruments sould be used in the immunization of a portfolio
'''
def __init__(self, env, l_krd, f_portfolio_value=10**6, s_kind='trava'):
'''
Initiate a KRDModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param l_krd: list. maturity of the key rates used, in years
:param f_portfolio_value*: float. The total
'''
super(KRDModel, self).__init__(env, f_portfolio_value)
self.s_risk_model = 'KRDModel_{}'.format(s_kind)
self.l_krd = l_krd
self.df_ratios = None
self.l_cmm_target = ['DI1F19', 'DI1F21', 'DI1F23']
self.s_kind = s_kind
def portfolio_krd(self, d_position):
'''
Return a tuple with the key rate durations of a portfolio and all
information needed to recalculate it
:param d_position: dictionary. portfolio to be hedged
'''
# recover variables
f_facevalue = 10.**5
l_rates = []
l_pos = []
l_maturity = []
l_instrument = []
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
l_instrument.append(book_obj.s_instrument)
f_du = self.env.l_du[self.env.order_matching.idx][idx]
f_price, f_qty = book_obj.best_bid
f_pos = -d_position[s_key]['qBid'] # inverto para qty em PU ?
f_pos -= -d_position[s_key]['qAsk']
l_maturity.append(f_du/252.)
l_pos.append(f_pos)
l_rates.append(f_price)
# get the key rate duration matrix
l_exp_pu = [f_facevalue * np.exp(-f_rate/100 * f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
l_pu = [f_facevalue * (1.+f_rate/100)**(-f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
l_dPdYP = [f_facevalue * f_mat * np.exp(-f_rate/100 * f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
df_krd = self.key_rates(l_dPdYP, l_exp_pu)
na_weights = self._get_weights(l_pu, l_pos)
df_exposure = self._get_krd_exposure(df_krd, na_weights)
t_rtn = (df_krd, na_weights, df_exposure, l_maturity, l_pos, l_pu,
l_instrument)
return t_rtn
def _get_krd_exposure(self, df_krd, na_weights):
'''
Return the exposure in KRDs based on krds passed and weights
:param df_krd: data frame. KRD of the instruments traded
:param na_weights: numpy array. the weight in portfolio of eack KRD
'''
df_exposure = pd.Series(df_krd.T.dot(na_weights))
df_exposure.index = self.l_krd
return df_exposure
def key_rates(self, l_dPdYP, l_pu):
'''
Return the matrix of key rates durations for the instruments traded
in the environment
:param l_dPdYP: list. $\frac{dP * P}{dY}$
:param l_pu: list. PU of aeach contract
'''
# add up the linear contributions $s(t, t_i)\$ for $i=1, 2, ..., m$ to
# obtain the change in the given zero-coupon rate $\Delta y(t)$
if isinstance(self.df_ratios, type(None)):
self._set_linear_contributions()
df = self.df_ratios
return df.apply(lambda x: x * np.array(l_dPdYP) / np.array(l_pu),
axis=0)
def get_target_krds(self, l_cmm, d_data, df_krd, s_kind='fly'):
'''
Rerturn the target krds pandas serties to be the same of a buttlerfly.
:param l_cmm: list. instruments used in the butterfly, ordered by matry
:param d_data: dictionary. maturity and PU of each instrument
:param s_kind*: string. the kind of target to return
'''
# calculate positions
if s_kind == 'fly':
f_Qm = 1. # quantity at the middle of the structure
f_alpha = (d_data[l_cmm[2]][1] * 1. - d_data[l_cmm[1]][1])
f_alpha /= (d_data[l_cmm[2]][1] / 1. - d_data[l_cmm[0]][1])
f_Qs = (f_Qm * f_alpha * d_data[l_cmm[1]][0]) / d_data[l_cmm[0]][0]
f_Ql = (f_Qm * (1 - f_alpha) * d_data[l_cmm[1]][0])
f_Ql /= d_data[l_cmm[2]][0]
l_pos = [-f_Qs, f_Qm, -f_Ql]
elif s_kind == 'trava':
l_pu = [d_data[s_key][0] for s_key in l_cmm]
l_mat = [d_data[s_key][1] for s_key in l_cmm]
l_pos = [0., 10, 0.]
na_weights = self._get_weights(l_pu, l_pos)
f_curr_duration = sum(np.array(l_mat) * na_weights)
l_pos_aux = []
for s_key in self.l_hedging_instr:
f_pu = d_data[s_key][0]
f_matr = d_data[s_key][1]
f_dur_aux = 5. * f_pu / self.f_portfolio_value * f_matr
f_unt = -f_curr_duration / f_dur_aux * 5.
l_pos_aux.append(f_unt)
l_pos = [l_pos_aux[0]/20.] + [1.] + [l_pos_aux[1]/20.]
# calculate targe
l_p = [d_data[l_cmm[0]][0], d_data[l_cmm[1]][0], d_data[l_cmm[2]][0]]
na_weights = self._get_weights(l_p, l_pos)
df_target = pd.Series(df_krd.T.dot(na_weights))
df_target.index = self.l_krd
return df_target
def _set_linear_contributions(self):
'''
Define the linear contribution $s(t, t_i)$ made by the change in the
ith key rate, $\Delta y(t_i)$, to the change in a given zero-coupon
rate $\Delta y(t)$, according to Nawalkha, 266
'''
l_maturity = []
l_krd = self.l_krd
# recover data from books
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
f_du = self.env.l_du[self.env.order_matching.idx][idx]
l_maturity.append(f_du/252.)
# create the $s(t, t_i)$ matrix, according to Nawalkha, 266
l = []
i_last_idx = len(l_krd) - 1
for i_list, f_mat in enumerate(l_maturity):
l.append([])
for idx in xrange(len(l_krd)):
f_krd = l_krd[idx]
if idx == 0:
f_krd1 = l_krd[idx+1]
if f_mat < f_krd:
l[i_list].append(1.)
elif f_mat > f_krd1:
l[i_list].append(0.)
else:
l[i_list].append((f_krd1 - f_mat)/(f_krd1-f_krd))
elif idx == i_last_idx:
f_krd_1 = l_krd[idx-1]
if f_mat > f_krd:
l[i_list].append(1.)
elif f_mat < f_krd_1:
l[i_list].append(0.)
else:
l[i_list].append((f_mat - f_krd_1)/(f_krd-f_krd_1))
else:
f_krd1 = l_krd[idx+1]
f_krd_1 = l_krd[idx-1]
if (f_mat >= f_krd_1) & (f_mat <= f_krd):
l[i_list].append((f_mat - f_krd_1)/(f_krd-f_krd_1))
elif (f_mat >= f_krd) & (f_mat <= f_krd1):
l[i_list].append((f_krd1 - f_mat)/(f_krd1-f_krd))
elif (f_mat < f_krd_1) | (f_mat > f_krd1):
l[i_list].append(0.)
else:
l[i_list].append(0.)
self.df_ratios = pd.DataFrame(l)
def _get_instruments_to_hedge(self, d_position):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio (in rate, not PU)
:param d_position: dictionary. portfolio in qty of contracts
'''
# measure the KRDs of the current portfolios
f_portfolio_value = self.f_portfolio_value
t_rtn = self.portfolio_krd(d_position)
df_krd, na_weights, df_expos, l_mat, l_pos, l_pu, l_instr = t_rtn
d_aux = dict(zip(l_instr, zip(l_pu, l_mat,
np.cumsum(len(l_instr) * [1])-1)))
df_target = self.get_target_krds(self.l_cmm_target, d_aux, df_krd,
s_kind=self.s_kind)
# NOTE: Why I am inverting the signal? I dont know
# ... maybe something related to positions in PU and rates
df_target *= (l_pos[d_aux[self.l_cmm_target[1]][2]])
# calculate the current duration and distance for the target in
# absolute percentage
f_curr_duration = sum(np.array(l_mat) * na_weights)
f_curr_abs_target = sum(abs((df_expos-df_target)/df_target))
# check which hedge will drive the strategy closer to the target
f_min_abs_target = f_curr_abs_target
l_rtn = []
for idx, s_key in enumerate(self.l_hedging_instr):
f_pu = d_aux[s_key][0]
f_matr = d_aux[s_key][1]
f_dur_aux = 5. * f_pu / f_portfolio_value * f_matr
f_unt = np.round(-f_curr_duration / f_dur_aux)
if abs(f_unt) > 10e-6:
s_debug = '\t{}: {:0.2f}, {:0.2f}'
# limit the number of contracts that can be traded at each time
i_qty = float(f_unt*5)
if f_unt > 3.:
i_qty = 15.
elif f_unt < -3.:
i_qty = -15.
# simulate how would be the measures doing the hedge
# recalculate all
idx = d_aux[s_key][2]
l_pos_aux = l_pos[:]
l_pos_aux[idx] += i_qty
na_weights_aux = self._get_weights(l_pu, l_pos_aux)
f_aux_duration = sum(np.array(l_mat) * na_weights_aux)
df_expos_aux = self._get_krd_exposure(df_krd, na_weights_aux)
f_aux_abs_target = sum(abs((df_expos_aux-df_target)/df_target))
# === DEBUG ===
# print s_debug.format(s_key, f_aux_duration, f_aux_abs_target)
# =============
# check the hedge instrument that will drive down the krd most
if abs(f_aux_duration) < abs(f_curr_duration):
if f_aux_abs_target < f_min_abs_target:
f_min_abs_target = f_aux_abs_target
# the quantity is in PU. So Convert to rate
s_action = 'BUY'
if -i_qty < 0:
s_action = 'SELL'
l_rtn = [(s_action, s_key, -i_qty)]
return l_rtn
class SingleHedgeModel(RiskModel):
'''
A SingleHedgeModel model representation that immunize portfolio using just
one instrument
'''
def __init__(self, env, f_portfolio_value=10**6, s_instrument='DI1F19'):
'''
Initiate a KRDModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param l_krd: list. maturity of the key rates used, in years
:param f_portfolio_value*: float. The total
'''
super(SingleHedgeModel, self).__init__(env, f_portfolio_value)
self.s_risk_model = 'SingleHedgeModel'
self.l_hedging_instr = [s_instrument]
class GreedyHedgeModel(RiskModel):
'''
A GreedyHedgeModel checks if the the market is offering a good deal to
hedge the agent's position. The immunization is done using a duration
neutral strategy that used just one instrument. The 'good deal' notion
should be implemented as something related to price, time or even
fair-priceness quant struff
'''
def __init__(self, env, f_value=10**6, s_instrument='DI1F19',
s_fairness='spread'):
'''
Initiate a GreedyHedgeModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param s_fairness*: string. the fair price notion of the agent
:param f_value*: float. The total value available
'''
super(GreedyHedgeModel, self).__init__(env, f_value)
self.s_fairness = s_fairness
if s_fairness == 'spread':
self.func_fair_price = self._compare_to_spread
elif s_fairness == 'closeout':
# closeout also should include stoploss?
self.func_fair_price = self._compare_to_closeout
s_instrument = env.s_main_intrument
self.s_risk_model = 'GreedyHedge_{}'.format(s_fairness)
self.l_hedging_instr = [s_instrument]
self.main_hedge = s_instrument
self.f_target = 0.03 # could be smaller when closeout (2 bps?)
self.f_stop = 0.03
self.last_txt = ''
self.current_price = None
self.f_last_gain = None
self.f_last_loss = None
self.price_stop_buy = None
self.price_stop_sell = None
def set_gain_loss(self, f_gain, f_loss):
'''
Set a target to the agent stop trading on the session
:param f_gain: float.
:param f_loss: float.
'''
self.f_last_gain = f_gain
self.f_last_loss = f_loss
def can_open_position(self, s_side, agent):
'''
Check the positions limits of an agent
:param s_side: string. Side of the trade to check the limit
:param agent: Agent object. agent that need to hedge
'''
if not self.l_ratios:
self.l_ratios = self.set_ratios()
# recover position limits
s_instr = self.env.s_main_intrument
f_max_pos = agent.max_pos
f_max_disclosed = agent.max_disclosed_pos
# calculate the current position
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
f_pnlt = 0.
# check if can open position to a specific side
if s_side == 'ASK':
if f_pos <= f_max_pos * -1:
return False
elif f_pos_discl <= f_max_disclosed * -1:
return False
elif s_side == 'BID':
if f_pos >= f_max_pos:
return False
elif f_pos_discl >= f_max_disclosed:
return False
return True
def should_open_at_current_price(self, s_side, agent):
'''
'''
# recover position limits
s_instr = self.env.s_main_intrument
f_pnlt = 0.
if agent.f_pnl < -1500.:
f_pnlt = self.f_stop / 3. * 3.
elif agent.f_pnl < -1000.:
f_pnlt = self.f_stop / 3. * 2
elif agent.f_pnl < -500.:
f_pnlt = self.f_stop / 3. * 1.
# calculate the current position
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
# recover prices
book_obj = agent.env.get_order_book(s_instr)
f_current_bid, i_qbid = book_obj.best_bid
f_current_ask, i_qask = book_obj.best_ask
f_bidask_spread = (f_current_ask - f_current_bid)
# check if there is something wierd in the prices
if (f_bidask_spread <= 0.005) or (f_bidask_spread > 0.04):
# print 'wierd bid-ask spread', f_bidask_spread
return False
# check if can open position based on the last stop
if self.price_stop_sell and s_side == 'ASK':
f_check = self.price_stop_sell
if f_current_ask >= f_check - f_pnlt:
if f_current_ask <= f_check + f_pnlt:
# print 'last time of stop at ask', f_check
return False
if self.price_stop_buy and s_side == 'BID':
f_check = self.price_stop_buy
if f_current_bid >= f_check - f_pnlt:
if f_current_bid <= f_check + f_pnlt:
# print 'last time of stop at bid', f_check
return False
# check if can open positions based on the last price traded
if f_pos < 0 and s_side == 'ASK':
l_agent_prices = [f_p for f_p, f_q, d_tob in
agent.d_trades[s_instr][s_side]]
f_min = min(l_agent_prices) - f_pnlt
f_max = max(l_agent_prices) + f_pnlt
if f_current_ask >= f_min and f_current_ask <= f_max:
# print 'same prices at ask', f_current_ask, f_max, f_min
return False
elif f_pos > 0 and s_side == 'BID':
l_agent_prices = [f_p for f_p, f_q, d_tob in
agent.d_trades[s_instr][s_side]]
f_min = min(l_agent_prices) - f_pnlt
f_max = max(l_agent_prices) + f_pnlt
if f_current_bid >= f_min and f_current_bid <= f_max:
# print 'same prices at bid', f_current_bid, f_max, f_min
return False
elif f_pos_discl > 0 and s_side == 'ASK':
f_agent_price = agent.current_open_price
if abs(f_current_ask - f_agent_price) < 0.005:
# print 'too low at ask', f_current_ask, f_agent_price
return False
elif f_pos_discl < 0 and s_side == 'BID':
f_agent_price = agent.current_open_price
if abs(f_current_bid - f_agent_price) < 0.005:
# print 'too low at bid', f_current_bid, f_agent_price
return False
return True
def should_hedge_open_position(self, agent):
'''
Check if the current open position should be hedged
:param agent: Agent object. agent that need to hedge
'''
# recover position limits
s_instr = self.env.s_main_intrument
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
# recover price from hedging instrument
obj_book = self.env.get_order_book(self.main_hedge)
if f_pos_discl < 0:
f_price, f_qty = obj_book.best_ask
elif f_pos_discl > 0:
f_price, f_qty = obj_book.best_bid
# check if is fair to mound a spread
if f_pos_discl != 0 and f_pos != 0:
s_side = 'ASK'
if f_pos > 0:
s_side = 'BID'
if not self.func_fair_price(f_price, f_pos_discl, agent, s_side):
return False
print '.',
# close out open positions by the current mid
if s_instr != self.main_hedge:
obj_book = self.env.get_order_book(s_instr)
f_ask, f_qty = obj_book.best_ask
f_bid, f_qty = obj_book.best_bid
f_mid = (f_ask + f_bid)/2.
if f_pos_discl < 0:
f_qty = abs(f_pos_discl)
f_vol = f_qty * f_mid
agent.disclosed_position[s_instr]['qBid'] += f_qty
agent.disclosed_position[s_instr]['Bid'] += f_vol
elif f_pos_discl > 0:
f_qty = abs(f_pos_discl)
f_vol = f_qty * f_mid
agent.disclosed_position[s_instr]['qAsk'] += f_qty
agent.disclosed_position[s_instr]['Ask'] += f_vol
return True
def get_instruments_to_hedge(self, agent):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param agent: Agent object. agent that need to hedge
'''
# TODO: if s_fairness==closeout, should "hedge" on the main instrument
d_position = agent.position
return self._get_instruments_to_hedge(d_position)
def should_stop_disclosed(self, agent):
'''
Return if the agent should stop the current disclosed position or not
:param agent: Agent object. agent that need to hedge
'''
s_instr = self.env.s_main_intrument
# calculate the current position
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
f_agent_price = agent.current_open_price
if not f_agent_price or f_pos_discl == 0.:
if self.b_stop_trading:
agent.done = True
return False
f_ref_price = f_agent_price
# recover prices
book_obj = agent.env.get_order_book(s_instr)
f_current_bid, i_qbid = book_obj.best_bid
f_current_ask, i_qask = book_obj.best_ask
f_bidask_spread = (f_current_ask - f_current_bid)
# check if there is something weird with the spread
if (f_bidask_spread <= 0.005) or (f_bidask_spread > 0.03):
return False
# check if should stop to trade
if self.b_stop_trading:
return True
if self.f_last_gain:
f_pnl = agent.f_pnl - 40. # due to MtM
if f_pnl > self.f_last_gain:
self.b_stop_trading = True
return True
elif f_pnl < self.f_last_loss:
self.b_stop_trading = True
return True
# check if should execute the stop gain
if f_pos_discl > 0:
update_maxmin(f_current_bid, agent)
f_ref_price = max(agent.current_max_price, f_ref_price)
f_loss = f_ref_price - self.f_stop
if f_current_bid < f_loss:
if i_qbid <= 600.:
return True
return f_current_bid < f_loss - self.f_stop/2.
elif f_pos_discl < 0:
update_maxmin(f_current_ask, agent)
f_ref_price = min(agent.current_min_price, f_ref_price)
f_loss = f_ref_price + self.f_stop
if f_current_ask > f_loss:
if i_qask <= 600.:
return True
return f_current_ask > f_loss + self.f_stop/2.
return False
def _compare_to_spread(self, f_current_price, f_open_pos, agent, s_side):
'''
...
:param f_current_price: float. The current price in the hedging instr
:param f_open_pos: float. the current disclosed position
:param agent: Agent object. agent that need to hedge
'''
# short_current_price >= (long_avg_price-avg_spread_price + param)
if f_open_pos > 0:
f_param = self.f_target # NOTE: hard coded
elif f_open_pos < 0:
f_param = -self.f_target # NOTE: hard coded
s_instr = self.env.s_main_intrument
s_hedge = self.main_hedge
# s_side = 'ASK'
# if f_open_pos > 0:
# s_side = 'BID'
# implement the prices accountability
idx = int(abs(f_open_pos/agent.order_size))
l_disclosed = agent.d_trades[s_instr][s_side][-idx:]
if len(l_disclosed) == 0:
print 'no disclosed position'
print '--open'
pprint.pprint(agent.d_trades)
print '--position'
pprint.pprint(agent.position)
print '--disclosed'
print agent.disclosed_position
print '--param'
print s_side, f_open_pos
raise NotImplementedError
f_long_avg_price = 0.
f_avg_spread = 0.
f_qtot = 0.
for f_p, f_q, d_tob in l_disclosed:
f_long_avg_price += f_p*f_q
f_qtot += f_q
f_aux = (d_tob[s_instr]['Ask'] + d_tob[s_instr]['Bid'])/2.
f_aux -= (d_tob[s_hedge]['Ask'] + d_tob[s_hedge]['Bid'])/2.
f_avg_spread += f_aux * f_q
f_long_avg_price /= f_qtot
f_avg_spread /= f_qtot
f_fair_price = (f_long_avg_price - f_avg_spread + f_param)
# keep the price into memory of the agent
agent.current_open_price = f_long_avg_price
s_err = 'PRICE: {}, DISCL: {}, AVG SPREAD: {}, MY PRICE: {}'
s_err += ', CURRNT: {}'
s_err = s_err.format(f_fair_price, f_open_pos, f_avg_spread,
f_long_avg_price, f_current_price)
if self.last_txt != s_err:
# print s_err
self.last_txt = s_err
if f_open_pos > 0:
return f_current_price >= f_fair_price
elif f_open_pos < 0:
return f_current_price <= f_fair_price
def _compare_to_closeout(self, f_current_price, f_open_pos, agent, s_side):
'''
'''
# short_current_price >= (long_avg_price-avg_spread_price + param)
s_instr = self.env.s_main_intrument
idx = int(abs(f_open_pos/agent.order_size))
l_disclosed = agent.d_trades[s_instr][s_side][-idx:]
f_long_avg_price = 0.
f_avg_spread = 0.
f_qtot = 0.
for f_p, f_q, d_tob in l_disclosed:
f_long_avg_price += f_p*f_q
f_qtot += f_q
f_long_avg_price /= f_qtot
f_avg_spread /= f_qtot
f_fair_price = (f_long_avg_price + self.f_target)
# keep the price into memory of the agent
agent.current_open_price = f_long_avg_price
s_err = 'POS: {}, MY PRICE: {}, CURRNT: {}, MAX: {}, MIN: {}'
s_err = s_err.format(f_open_pos, f_long_avg_price, f_current_price,
agent.current_max_price, agent.current_min_price)
if self.last_txt != s_err:
# print s_err + '\n'
self.last_txt = s_err
# recover prices
book_obj = agent.env.get_order_book(s_instr)
f_current_bid, i_qbid = book_obj.best_bid
f_current_ask, i_qask = book_obj.best_ask
f_bidask_spread = (f_current_ask - f_current_bid)
# check if there is something wierd in the prices
if (f_bidask_spread <= 0.005) or (f_bidask_spread > 0.04):
return False
# check if should execute the stop gain
if f_open_pos > 0:
f_gain = f_long_avg_price + self.f_target
if f_current_bid >= f_gain:
if i_qbid <= 400.:
return True
return f_current_bid > f_gain + self.f_target/2.
elif f_open_pos < 0:
f_gain = f_long_avg_price - self.f_target
if f_current_ask <= f_gain:
if i_qask <= 400.:
return True
return f_current_ask < f_gain - self.f_target/2.
return False
| 40.72973 | 79 | 0.578979 | 33,865 | 0.977035 | 0 | 0 | 0 | 0 | 0 | 0 | 9,531 | 0.274978 |
7d78430382af94d8d75d17a72371f34356ac1d39 | 193 | py | Python | hris/apps/jobs/admin.py | Minedomain/hris_backend | 90aab497c076c2d4ce4e05a441db0ee7a175df57 | [
"MIT"
]
| null | null | null | hris/apps/jobs/admin.py | Minedomain/hris_backend | 90aab497c076c2d4ce4e05a441db0ee7a175df57 | [
"MIT"
]
| null | null | null | hris/apps/jobs/admin.py | Minedomain/hris_backend | 90aab497c076c2d4ce4e05a441db0ee7a175df57 | [
"MIT"
]
| null | null | null | from django.contrib import admin
from .models import *
@admin.register(Job)
class JobAdmin(admin.ModelAdmin):
list_display = ['job_id', 'job_title', 'duties', 'min_salary', 'max_salary']
| 24.125 | 80 | 0.73057 | 114 | 0.590674 | 0 | 0 | 135 | 0.699482 | 0 | 0 | 51 | 0.264249 |
7d78bb6905459ba9f8b320facebb6b0cf69eca83 | 3,401 | py | Python | src/arche/readers/schema.py | WinterComes/arche | 6be3d7a4ec66f33f7af544aa7af4ea95c35bef2e | [
"MIT"
]
| 52 | 2019-03-18T21:12:59.000Z | 2022-01-24T05:49:23.000Z | src/arche/readers/schema.py | WinterComes/arche | 6be3d7a4ec66f33f7af544aa7af4ea95c35bef2e | [
"MIT"
]
| 173 | 2019-03-18T15:50:14.000Z | 2019-12-09T18:03:07.000Z | src/arche/readers/schema.py | WinterComes/arche | 6be3d7a4ec66f33f7af544aa7af4ea95c35bef2e | [
"MIT"
]
| 21 | 2019-03-20T17:14:22.000Z | 2022-01-30T18:33:22.000Z | from collections import defaultdict
from enum import Enum
import json
import pprint
from typing import Dict, List, Union, Any, Set, DefaultDict
from arche.tools import s3
import perfect_jsonschema
EXTENDED_KEYWORDS = {"tag", "unique", "coverage_percentage"}
SchemaObject = Dict[str, Union[str, bool, int, float, None, List]]
RawSchema = Dict[str, SchemaObject]
SchemaSource = Union[str, RawSchema]
TaggedFields = Dict[str, List[str]]
class Tag(Enum):
unique = (0,)
category = (1,)
name_field = (2,)
product_url_field = (3,)
product_price_field = (4,)
product_price_was_field = (5,)
class Schema:
allowed_tags = set([name for name, _ in Tag.__members__.items()])
def __init__(self, source: SchemaSource):
self.raw: RawSchema = self.read(source)
if not self.raw.get("properties", None):
raise ValueError("The schema does not have any 'properties'")
self.enums: List[str] = self.get_enums()
self.tags = self.get_tags(self.raw)
def json(self):
print(json.dumps(self.raw, indent=4))
def __repr__(self):
return pprint.pformat(self.raw)
def get_enums(self) -> List[str]:
enums: List[str] = []
for k, v in self.raw["properties"].items():
if isinstance(v, Dict) and "enum" in v.keys():
enums.append(k)
return enums
@staticmethod
def get_tags(schema: RawSchema) -> TaggedFields:
tagged_fields: DefaultDict[str, List[str]] = defaultdict(list)
for key, value in schema["properties"].items():
if isinstance(value, Dict):
property_tags = value.get("tag")
if property_tags:
tagged_fields = Schema.get_field_tags(
property_tags, key, tagged_fields
)
return dict(tagged_fields)
@classmethod
def get_field_tags(
cls, tags: Set[Any], field: str, tagged_fields: DefaultDict
) -> DefaultDict[str, List[str]]:
tags = cls.parse_tag(tags)
if not tags:
raise ValueError(
f"'{tags}' tag value is invalid, should be str or list[str]"
)
invalid_tags = tags - cls.allowed_tags
if invalid_tags:
raise ValueError(
f"{invalid_tags} tag(s) are unsupported, valid tags are:\n"
f"{', '.join(sorted(list(cls.allowed_tags)))}"
)
for tag in tags:
tagged_fields[tag].append(field)
return tagged_fields
@staticmethod
def parse_tag(value):
if isinstance(value, str):
return set([value])
if isinstance(value, list):
return set(value)
return None
@staticmethod
def read(schema_source: SchemaSource) -> RawSchema:
if isinstance(schema_source, str):
schema_source = Schema.from_url(schema_source)
if isinstance(schema_source, dict):
perfect_jsonschema.check(schema_source, EXTENDED_KEYWORDS)
return schema_source
else:
raise ValueError(
f"{json.dumps(str(schema_source), indent=4)} is an unidentified schema source."
f"\nA dict, a full s3 path or URL is expected"
)
@staticmethod
def from_url(path: str) -> RawSchema:
return json.loads(s3.get_contents(path))
| 31.490741 | 95 | 0.605116 | 2,958 | 0.869744 | 0 | 0 | 1,998 | 0.587474 | 0 | 0 | 414 | 0.121729 |
7d7a5b43416629a61d913d56e3d15ecd4f2e0f5f | 5,620 | py | Python | tensorflow_probability/python/mcmc/eight_schools_hmc.py | hephaex/probability | 740d0db0bf2b1e1a04cfd0b55481c44380b3cb05 | [
"Apache-2.0"
]
| 4 | 2019-03-07T05:15:13.000Z | 2019-06-13T20:35:45.000Z | tensorflow_probability/python/mcmc/eight_schools_hmc.py | hephaex/probability | 740d0db0bf2b1e1a04cfd0b55481c44380b3cb05 | [
"Apache-2.0"
]
| 2 | 2019-08-01T18:31:41.000Z | 2019-08-01T19:42:15.000Z | tensorflow_probability/python/mcmc/eight_schools_hmc.py | hephaex/probability | 740d0db0bf2b1e1a04cfd0b55481c44380b3cb05 | [
"Apache-2.0"
]
| 1 | 2019-09-18T15:17:53.000Z | 2019-09-18T15:17:53.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Shared library for `eight_schools_hmc_{graph,eager}_test.py`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
__all__ = [
'EightSchoolsHmcBenchmarkTestHarness',
'benchmark_eight_schools_hmc',
'eight_schools_joint_log_prob',
]
def mvn(*args, **kwargs):
"""Convenience function to efficiently construct a MultivariateNormalDiag."""
# Faster than using `tfd.MultivariateNormalDiag`.
return tfd.Independent(tfd.Normal(*args, **kwargs),
reinterpreted_batch_ndims=1)
def eight_schools_joint_log_prob(
treatment_effects, treatment_stddevs,
avg_effect, avg_stddev, school_effects_standard):
"""Eight-schools joint log-prob."""
rv_avg_effect = tfd.Normal(loc=0., scale=10.)
rv_avg_stddev = tfd.Normal(loc=5., scale=1.)
rv_school_effects_standard = mvn(
loc=tf.zeros_like(school_effects_standard),
scale=tf.ones_like(school_effects_standard))
rv_treatment_effects = mvn(
loc=(avg_effect + tf.exp(avg_stddev) * school_effects_standard),
scale=treatment_stddevs)
return (
rv_avg_effect.log_prob(avg_effect) +
rv_avg_stddev.log_prob(avg_stddev) +
rv_school_effects_standard.log_prob(school_effects_standard) +
rv_treatment_effects.log_prob(treatment_effects))
def benchmark_eight_schools_hmc(
num_results=int(5e3),
num_burnin_steps=int(3e3),
num_leapfrog_steps=3,
step_size=0.4):
"""Runs HMC on the eight-schools unnormalized posterior."""
num_schools = 8
treatment_effects = tf.constant(
[28, 8, -3, 7, -1, 1, 18, 12],
dtype=np.float32,
name='treatment_effects')
treatment_stddevs = tf.constant(
[15, 10, 16, 11, 9, 11, 10, 18],
dtype=np.float32,
name='treatment_stddevs')
def unnormalized_posterior_log_prob(
avg_effect, avg_stddev, school_effects_standard):
"""Eight-schools unnormalized log posterior."""
return eight_schools_joint_log_prob(
treatment_effects, treatment_stddevs,
avg_effect, avg_stddev, school_effects_standard)
if tf.executing_eagerly():
sample_chain = tf.function(tfp.mcmc.sample_chain)
else:
sample_chain = tfp.mcmc.sample_chain
def computation():
"""The benchmark computation."""
_, kernel_results = sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=(
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps))
return kernel_results.is_accepted
# Let's force evaluation of graph to ensure build time is not part of our time
# trial.
is_accepted_tensor = computation()
if not tf.executing_eagerly():
session = tf.compat.v1.Session()
session.run(is_accepted_tensor)
start_time = time.time()
if tf.executing_eagerly():
is_accepted = computation()
else:
is_accepted = session.run(is_accepted_tensor)
wall_time = time.time() - start_time
num_accepted = np.sum(is_accepted)
acceptance_rate = np.float32(num_accepted) / np.float32(num_results)
return dict(
iters=(num_results + num_burnin_steps) * num_leapfrog_steps,
extras={'acceptance_rate': acceptance_rate},
wall_time=wall_time)
class EightSchoolsHmcBenchmarkTestHarness(object):
"""Test harness for running HMC benchmark tests in graph/eager modes."""
def __init__(self):
self._mode = 'eager' if tf.executing_eagerly() else 'graph'
def benchmark_eight_schools_hmc_num_leapfrog_1(self):
self.report_benchmark(
name=self._mode + '_eight_schools_hmc_num_leapfrog_1',
**benchmark_eight_schools_hmc(num_leapfrog_steps=1))
def benchmark_eight_schools_hmc_num_leapfrog_2(self):
self.report_benchmark(
name=self._mode + '_eight_schools_hmc_num_leapfrog_2',
**benchmark_eight_schools_hmc(num_leapfrog_steps=2))
def benchmark_eight_schools_hmc_num_leapfrog_3(self):
self.report_benchmark(
name=self._mode + '_eight_schools_hmc_num_leapfrog_3',
**benchmark_eight_schools_hmc(num_leapfrog_steps=3))
def benchmark_eight_schools_hmc_num_leapfrog_10(self):
self.report_benchmark(
name=self._mode + '_eight_schools_hmc_num_leapfrog_10',
**benchmark_eight_schools_hmc(num_leapfrog_steps=10))
def benchmark_eight_schools_hmc_num_leapfrog_20(self):
self.report_benchmark(
name=self._mode + '_eight_schools_hmc_num_leapfrog_20',
**benchmark_eight_schools_hmc(num_leapfrog_steps=20))
| 34.691358 | 80 | 0.724377 | 1,258 | 0.223843 | 0 | 0 | 0 | 0 | 0 | 0 | 1,614 | 0.287189 |
7d7a5e990271c6f1b8c5e7eefd58b31203c16bfb | 16,456 | py | Python | src/pyspex/dem_io.py | rmvanhees/pyspex | 1e1370e57d131dba6880bdf7a56808e5ce638ca5 | [
"BSD-3-Clause"
]
| null | null | null | src/pyspex/dem_io.py | rmvanhees/pyspex | 1e1370e57d131dba6880bdf7a56808e5ce638ca5 | [
"BSD-3-Clause"
]
| 1 | 2022-02-06T14:21:48.000Z | 2022-03-22T15:19:40.000Z | src/pyspex/dem_io.py | rmvanhees/pyspex | 1e1370e57d131dba6880bdf7a56808e5ce638ca5 | [
"BSD-3-Clause"
]
| null | null | null | """
This file is part of pyspex
https://github.com/rmvanhees/pyspex.git
Python implementation to read SPEXone DEM output
Copyright (c) 2019-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from pathlib import Path
import numpy as np
from .lib.tmtc_def import tmtc_def
# - global parameters ------------------------------
# - local functions --------------------------------
def det_dtype():
"""
Returns numpy dtype with the registers of the SPEXone CMV4000 detector
"""
return np.dtype([
('UNUSED_000', 'u1'),
('NUMBER_LINES', 'u1', (2)),
('START1', 'u1', (2)),
('START2', 'u1', (2)),
('START3', 'u1', (2)),
('START4', 'u1', (2)),
('START5', 'u1', (2)),
('START6', 'u1', (2)),
('START7', 'u1', (2)),
('START8', 'u1', (2)),
('NUMBER_LINES1', 'u1', (2)),
('NUMBER_LINES2', 'u1', (2)),
('NUMBER_LINES3', 'u1', (2)),
('NUMBER_LINES4', 'u1', (2)),
('NUMBER_LINES5', 'u1', (2)),
('NUMBER_LINES6', 'u1', (2)),
('NUMBER_LINES7', 'u1', (2)),
('NUMBER_LINES8', 'u1', (2)),
('SUB_S', 'u1', (2)),
('SUB_A', 'u1', (2)),
('MONO', 'u1'), # 1 bits
('IMAGE_FLIPPING', 'u1'), # 2 bits
('INTE_SYNC', 'u1'), # 3 bits: Int_sync, Exp_dual, Exp_ext
('EXP_TIME', 'u1', (3)),
('EXP_STEP', 'u1', (3)),
('EXP_KP1', 'u1', (3)),
('EXP_KP2', 'u1', (3)),
('NR_SLOPES', 'u1'), # 2 bits
('EXP_SEQ', 'u1'),
('EXP_TIME2', 'u1', (3)),
('EXP_STEP2', 'u1', (3)),
('UNUSED_062', 'u1'),
('UNUSED_063', 'u1'),
('UNUSED_064', 'u1'),
('UNUSED_065', 'u1'),
('UNUSED_066', 'u1'),
('UNUSED_067', 'u1'),
('UNUSED_068', 'u1'),
('EXP2_SEQ', 'u1'),
('NUMBER_FRAMES', 'u1', (2)),
('OUTPUT_MODE', 'u1'), # 2 bits
('FOT_LENGTH', 'u1'),
('I_LVDS_REC', 'u1'), # 4 bits
('UNUSED_075', 'u1'),
('UNUSED_076', 'u1'),
('COL_CALIB', 'u1'), # 2 bits: Col_calib, ADC_calib
('TRAINING_PATTERN', 'u1', (2)), # 12 bits
('CHANNEL_EN', 'u1', (3)), # 19 bits
('I_LVDS', 'u1'), # 4 bits
('I_COL', 'u1'), # 4 bits
('I_COL_PRECH', 'u1'), # 4 bits
('I_ADC', 'u1'), # 4 bits
('I_AMP', 'u1'), # 4 bits
('VTF_L1', 'u1'), # 7 bits
('VLOW2', 'u1'), # 7 bits
('VLOW3', 'u1'), # 7 bits
('VRES_LOW', 'u1'), # 7 bits
('UNUSED_092', 'u1'),
('UNUSED_093', 'u1'),
('V_PRECH', 'u1'), # 7 bits
('V_REF', 'u1'), # 7 bits
('UNUSED_096', 'u1'),
('UNUSED_097', 'u1'),
('VRAMP1', 'u1'), # 7 bits
('VRAMP2', 'u1'), # 7 bits
('OFFSET', 'u1', (2)), # 14 bits
('PGA_GAIN', 'u1'), # 2 bits
('ADC_GAIN', 'u1'),
('UNUSED_104', 'u1'),
('UNUSED_105', 'u1'),
('UNUSED_106', 'u1'),
('UNUSED_107', 'u1'),
('T_DIG1', 'u1'), # 4 bits
('T_DIG2', 'u1'), # 4 bits
('UNUSED_110', 'u1'),
('BIT_MODE', 'u1'), # 1 bits
('ADC_RESOLUTION', 'u1'), # 2 bits
('PLL_ENABLE', 'u1'), # 1 bits
('PLL_IN_FRE', 'u1'), # 2 bits
('PLL_BYPASS', 'u1'), # 1 bits
('PLL_RANGE', 'u1'), # 8 bits: PLL range(1), out_fre(3), div(4)
('PLL_LOAD', 'u1'),
('DUMMY', 'u1'),
('UNUSED_119', 'u1'),
('UNUSED_120', 'u1'),
('BLACK_COL_EN', 'u1'), # 2 bits: Black_col_en, PGA_gain
('UNUSED_122', 'u1'),
('V_BLACKSUN', 'u1'), # 6 bits
('UNUSED_124', 'u1'),
('UNUSED_125', 'u1'),
('TEMP', 'u1', (2))
])
# - class DEMio -------------------------
class DEMio:
"""
This class can be used to read SPEXone DEM output
Methods
-------
hdr
Returns DEM header as numpy compound array.
number_lines
Returns number of lines (rows).
number_channels
Returns number of LVDS channels used.
lvds_clock
Returns flag for LVDS clock, as 0: disable, 1: enable)
pll_control
Returns PLL control parameters: (pll_range, pll_out_fre, pll_div).
exp_control
Exposure time control parameters: (inte_sync, exp_dual, exp_ext).
offset
Returns digital offset including ADC offset
pga_gain
Returns PGA gain (Volt).
temp_detector
Returns detector temperature as raw counts.
exp_time(t_mcp=1e-7)
Returns pixel exposure time [s].
fot_time(t_mcp=1e-7)
Returns frame overhead time [s].
rot_time(t_mcp=1e-7)
Returns image read-out time [s].
frame_period(n_coad=1)
Returns frame period [s].
get_sci_hk()
Returns Science telemetry, a subset of MPS and housekeeping parameters
get_data(numlines=None)
Returns data of a detector images (numpy uint16 array).
Notes
-----
Examples
--------
>>> dem = DEMio(dem_file)
>>> img_hk = dem.get_sci_hk()
>>> img_data = dem.get_data()
"""
def __init__(self, flname: str) -> None:
"""
Parameters
----------
flname : str
filename with header or binary data of DEM measurement
"""
self.__hdr = None
if flname.endswith('a.txt'):
self.bin_file = flname.replace('a.txt', 'b.bin')
self.hdr_file = flname
elif flname.endswith('b.bin'):
self.bin_file = flname
self.hdr_file = flname.replace('b.bin', 'a.txt')
else:
raise RuntimeError(f'invalid filename: {flname}')
if Path(self.hdr_file).is_file():
self.__get_hdr()
def __get_hdr(self) -> None:
"""
Read DEM header data
"""
self.__hdr = np.zeros((1,), dtype=det_dtype())
with open(self.hdr_file, 'r', encoding='ascii', errors='ignore') as fp:
for line in fp:
columns = line[:-1].split(',')
if columns[0] == 'Reg':
continue
# Fix possible errors in Name
name = columns[2].replace(' [', '[')
name = name.replace('_ ', '_').replace(' ', '_')
value = int(columns[-1])
indx = -1
if columns[2].endswith(':0]') \
or columns[2].endswith('[0]') \
or columns[2].endswith('[2]'):
name = name.split('[')[0]
indx = 0
elif columns[2].endswith(':8]'):
name = name.split('[')[0]
indx = 1
elif columns[2].endswith(':16]'):
name = name.split('[')[0]
indx = 2
elif name == 'Unused':
if columns[0] == '86':
continue
name = f'Unused_{int(columns[0]):03d}'
key = name.upper()
if isinstance(self.__hdr[0][key], np.ndarray):
self.__hdr[0][key][indx] = value
else:
self.__hdr[0][key] = value
@property
def hdr(self):
"""
Return DEM header as numpy compound array
"""
if self.__hdr is None:
return None
return self.__hdr[0]
@property
def number_lines(self) -> int:
"""
Return number of lines (rows)
Register address: [1, 2]
"""
return (self.hdr['NUMBER_LINES'][0]
+ (self.hdr['NUMBER_LINES'][1] << 8))
@property
def number_channels(self) -> int:
"""
Return number of LVDS channels used
"""
return 2 ** (4 - (self.hdr['OUTPUT_MODE'] & 0x3))
@property
def lvds_clock(self) -> bool:
"""
Return flag for LVDS clock (0: disable, 1: enable)
Register address: 82
"""
return ((self.hdr['PLL_ENABLE'] & 0x3) == 0
and (self.hdr['PLL_BYPASS'] & 0x3) != 0
and (self.hdr['CHANNEL_EN'][2] & 0x4) != 0)
def pll_control(self) -> tuple:
"""
Returns PLL control parameters: pll_range, pll_out_fre, pll_div
PLL_range: range (0 or 1)
PLL_out_fre: output frequency (0, 1, 2 or 5)
PLL_div: 9 (10 bit) or 11 (12 bit)
Register address: 116
"""
pll_div = self.hdr['PLL_RANGE'] & 0xF # bit [0:4]
pll_out_fre = (self.hdr['PLL_RANGE'] >> 4) & 0x7 # bit [4:7]
pll_range = (self.hdr['PLL_RANGE'] >> 7) # bit [7]
return (pll_range, pll_out_fre, pll_div)
@property
def exp_control(self) -> tuple:
"""
Exposure time control parameters: (inte_sync, exp_dual, exp_ext)
Register address: 41
"""
inte_sync = (self.hdr['INTE_SYNC'] >> 2) & 0x1
exp_dual = (self.hdr['INTE_SYNC'] >> 1) & 0x1
exp_ext = self.hdr['INTE_SYNC'] & 0x1
return (inte_sync, exp_dual, exp_ext)
@property
def offset(self) -> int:
"""
Returns digital offset including ADC offset
Register address: [100, 101]
"""
val = ((self.hdr['OFFSET'][1] << 8)
+ self.hdr['OFFSET'][0])
return 70 + (val if val < 8192 else val - 16384)
@property
def pga_gain(self) -> float:
"""
Returns PGA gain (Volt)
Register address: 102
"""
reg_pgagain = self.hdr['PGA_GAIN']
# need first bit of address 121
reg_pgagainfactor = self.hdr['BLACK_COL_EN'] & 0x1
return (1 + 0.2 * reg_pgagain) * 2 ** reg_pgagainfactor
@property
def temp_detector(self) -> int:
"""
Returns detector temperature as raw counts
Notes
-----
Uncalibrated conversion: ((1184 - 1066) * 0.3 * 40 / 40Mhz) + offs [K]
"""
return (self.hdr['TEMP'][1] << 8) + self.hdr['TEMP'][0]
def exp_time(self, t_mcp=1e-7):
"""
Returns pixel exposure time [s].
"""
# Nominal fot_length = 20, except for very short exposure_time
reg_fot = self.hdr['FOT_LENGTH']
reg_exptime = ((self.hdr['EXP_TIME'][2] << 16)
+ (self.hdr['EXP_TIME'][1] << 8)
+ self.hdr['EXP_TIME'][0])
return 129 * t_mcp * (0.43 * reg_fot + reg_exptime)
def fot_time(self, t_mcp=1e-7):
"""
Returns frame overhead time [s]
"""
# Nominal fot_length = 20, except for very short exposure_time
reg_fot = self.hdr['FOT_LENGTH']
return 129 * t_mcp * (reg_fot + 2 * (16 // self.number_channels))
def rot_time(self, t_mcp=1e-7):
"""
Returns image read-out time [s]
"""
return 129 * t_mcp * (16 // self.number_channels) * self.number_lines
def frame_period(self, n_coad=1):
"""
Returns frame period [s]
"""
return 2.38 + (n_coad
* (self.exp_time() + self.fot_time() + self.rot_time()))
def get_sci_hk(self):
"""
Returns Science telemetry, a subset of MPS and housekeeping parameters
Returns
-------
numpy array
"""
def convert_val(key):
"""
Convert byte array to integer
"""
val = 0
for ii, bval in enumerate(self.__hdr[0][key]):
val += bval << (ii * 8)
return val
# convert original detector parameter values to telemetry parameters
convert_det_params = {
'DET_NUMLINES': convert_val('NUMBER_LINES'),
'DET_START1': convert_val('START1'),
'DET_START2': convert_val('START2'),
'DET_START3': convert_val('START3'),
'DET_START4': convert_val('START4'),
'DET_START5': convert_val('START5'),
'DET_START6': convert_val('START6'),
'DET_START7': convert_val('START7'),
'DET_START8': convert_val('START8'),
'DET_NUMLINES1': convert_val('NUMBER_LINES1'),
'DET_NUMLINES2': convert_val('NUMBER_LINES2'),
'DET_NUMLINES3': convert_val('NUMBER_LINES3'),
'DET_NUMLINES4': convert_val('NUMBER_LINES4'),
'DET_NUMLINES5': convert_val('NUMBER_LINES5'),
'DET_NUMLINES6': convert_val('NUMBER_LINES6'),
'DET_NUMLINES7': convert_val('NUMBER_LINES7'),
'DET_NUMLINES8': convert_val('NUMBER_LINES8'),
'DET_SUBS': convert_val('SUB_S'),
'DET_SUBA': convert_val('SUB_A'),
'DET_MONO': self.__hdr[0]['MONO'],
'DET_IMFLIP': self.__hdr[0]['IMAGE_FLIPPING'],
'DET_EXPCNTR': self.__hdr[0]['INTE_SYNC'],
'DET_EXPTIME': convert_val('EXP_TIME'),
'DET_EXPSTEP': convert_val('EXP_STEP'),
'DET_KP1': convert_val('EXP_KP1'),
'DET_KP2': convert_val('EXP_KP2'),
'DET_NOFSLOPES': self.__hdr[0]['NR_SLOPES'],
'DET_EXPSEQ': self.__hdr[0]['EXP_SEQ'],
'DET_EXPTIME2': convert_val('EXP_TIME2'),
'DET_EXPSTEP2': convert_val('EXP_STEP2'),
'DET_EXP2_SEQ': self.__hdr[0]['EXP2_SEQ'],
'DET_NOFFRAMES': convert_val('NUMBER_FRAMES'),
'DET_OUTMODE': self.__hdr[0]['OUTPUT_MODE'],
'DET_FOTLEN': self.__hdr[0]['FOT_LENGTH'],
'DET_ILVDSRCVR': self.__hdr[0]['I_LVDS_REC'],
'DET_CALIB': self.__hdr[0]['COL_CALIB'],
'DET_TRAINPTRN': convert_val('TRAINING_PATTERN'),
'DET_CHENA': convert_val('CHANNEL_EN'),
'DET_ILVDS': self.__hdr[0]['I_LVDS'],
'DET_ICOL': self.__hdr[0]['I_COL'],
'DET_ICOLPR': self.__hdr[0]['I_COL_PRECH'],
'DET_IADC': self.__hdr[0]['I_ADC'],
'DET_IAMP': self.__hdr[0]['I_AMP'],
'DET_VTFL1': self.__hdr[0]['VTF_L1'],
'DET_VTFL2': self.__hdr[0]['VLOW2'],
'DET_VTFL3': self.__hdr[0]['VLOW3'],
'DET_VRSTL': self.__hdr[0]['VRES_LOW'],
'DET_VPRECH': self.__hdr[0]['V_PRECH'],
'DET_VREF': self.__hdr[0]['V_REF'],
'DET_VRAMP1': self.__hdr[0]['VRAMP1'],
'DET_VRAMP2': self.__hdr[0]['VRAMP2'],
'DET_OFFSET': convert_val('OFFSET'),
'DET_PGAGAIN': self.__hdr[0]['PGA_GAIN'],
'DET_ADCGAIN': self.__hdr[0]['ADC_GAIN'],
'DET_TDIG1': self.__hdr[0]['T_DIG1'],
'DET_TDIG2': self.__hdr[0]['T_DIG2'],
'DET_BITMODE': self.__hdr[0]['BIT_MODE'],
'DET_ADCRES': self.__hdr[0]['ADC_RESOLUTION'],
'DET_PLLENA': self.__hdr[0]['PLL_ENABLE'],
'DET_PLLINFRE': self.__hdr[0]['PLL_IN_FRE'],
'DET_PLLBYP': self.__hdr[0]['PLL_BYPASS'],
'DET_PLLRATE': self.__hdr[0]['PLL_RANGE'],
'DET_PLLLOAD': self.__hdr[0]['PLL_LOAD'],
'DET_DETDUM': self.__hdr[0]['DUMMY'],
'DET_BLACKCOL': self.__hdr[0]['BLACK_COL_EN'],
'DET_VBLACKSUN': self.__hdr[0]['V_BLACKSUN'],
'DET_T': convert_val('TEMP')
}
sci_hk = np.zeros((1,), dtype=np.dtype(tmtc_def(0x350)))
sci_hk[0]['REG_FULL_FRAME'] = 1
sci_hk[0]['REG_CMV_OUTPUTMODE'] = 3
for key, value in convert_det_params.items():
sci_hk[0][key] = value
return sci_hk
def get_data(self, numlines=None):
"""
Returns data of a detector frame (numpy uint16 array)
Parameters
----------
numlines : int, optional
Provide number of detector rows when no headerfile is present
"""
if numlines is None:
# obtain number of rows
numlines = self.number_lines
# Read binary big-endian data
return np.fromfile(self.bin_file, dtype='>u2').reshape(numlines, -1)
| 34.426778 | 79 | 0.49131 | 12,200 | 0.741371 | 0 | 0 | 2,219 | 0.134844 | 0 | 0 | 7,731 | 0.469798 |
7d7bdf74580e44ae7e0eab89dc294d34670eb290 | 7,827 | py | Python | tests/util/test_parsing_helpers.py | lkattis-signal/SignalSDK | f085b9cae0495f4e016b9982df271efc6fd0a8f5 | [
"Apache-2.0"
]
| 10 | 2020-09-29T06:36:45.000Z | 2022-03-14T18:15:50.000Z | tests/util/test_parsing_helpers.py | lkattis-signal/SignalSDK | f085b9cae0495f4e016b9982df271efc6fd0a8f5 | [
"Apache-2.0"
]
| 53 | 2020-10-08T10:05:00.000Z | 2022-03-29T14:21:18.000Z | tests/util/test_parsing_helpers.py | lkattis-signal/SignalSDK | f085b9cae0495f4e016b9982df271efc6fd0a8f5 | [
"Apache-2.0"
]
| 5 | 2020-09-25T07:48:04.000Z | 2021-11-23T07:08:56.000Z | from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Union, Type, List, Optional, Any, Tuple
import pytest
from signal_ocean.util import parsing_helpers
@pytest.mark.parametrize("cap_words, snake_cased",
[('VesselTypeId', 'vessel_type_id'),
('Flag', 'flag'),
('Id', 'id'),
('VesselName', 'vessel_name'), ('IMO', 'imo'),
('teU14', 'te_u14')])
def test_to_snake_case(cap_words: str, snake_cased: str) -> None:
transformed = parsing_helpers._to_snake_case(cap_words)
assert transformed == snake_cased
@pytest.mark.parametrize("snake_cased, camel_cased",
[('vessel_type_id', 'VesselTypeId'),
('token', 'Token'),
('first_load_arrival_date_to', 'FirstLoadArrivalDateTo')])
def test_to_camel_case(snake_cased: str, camel_cased: str) -> None:
transformed = parsing_helpers._to_camel_case(snake_cased)
assert transformed == camel_cased
@pytest.mark.parametrize("value, cls, expected",
[(None, type(None), None),
('Abc', str, 'Abc'),
(1, int, 1),
("1", int, 1),
(1, float, 1.),
(1., float, 1.),
("1", float, 1.),
(1, bool, True),
('1909-07-01T00:00:00', datetime,
datetime(1909, 7, 1, 0, 0, 0,
tzinfo=timezone.utc))])
def test_parse_class(value: Union[str, int, float, bool, None],
cls: Type,
expected: Union[str, int, float, bool, None, datetime]) \
-> None:
transformed = parsing_helpers._parse_class(value, cls)
assert isinstance(transformed, cls)
assert transformed == expected
@pytest.mark.parametrize("value, cls, expected_error",
[(None, str, TypeError),
([], list, TypeError),
('Abc', int, ValueError)])
def test_parse_class_raises_error(value: Union[str, int, float, bool, None],
cls: Type,
expected_error: Type[BaseException]) -> None:
with pytest.raises(expected_error):
parsing_helpers._parse_class(value, cls)
@pytest.mark.parametrize("value, field_type, expected",
[(1, int, 1),
(1, Union[int, None], 1),
(None, Union[int, None], None),
(['a', 'b'], List, ['a', 'b']),
(['1', '2'], List, ['1', '2']),
(['1', '2'], Tuple, ('1', '2')),
(['1', '2'], Tuple[int, ...], (1, 2)),
(['1', '2'], List[int], [1, 2]),
(['1', '2'], Optional[List[int]], [1, 2]),
(['1', '2'], Optional[List], ['1', '2']),
(['1', '2'], Union[List[int], None], [1, 2]),
(['1', '2'], Union[List, None], ['1', '2']),
(['1', '2'], Optional[Tuple[str, ...]], ('1', '2'))])
def test_parse_model_field(value: Union[str, int, float, bool, None],
field_type: Type,
expected: Union[str, int, float, bool, None, datetime,
List[int], List[str], Tuple[int, ...],
Tuple[str, ...]]) \
-> None:
transformed = parsing_helpers.parse_model(value, field_type)
assert type(transformed) == type(expected)
assert transformed == expected
@pytest.mark.parametrize("value, field_type, expected_error",
[(['a', 'b'], List[int], ValueError),
(['a', 'b'], Optional[List[int]], ValueError),
(1, Type[Any], NotImplementedError)])
def test_parse_model_field_raises_error(value: Union[str, int, float, bool, None],
field_type: Type,
expected_error: Type[BaseException]) -> None:
with pytest.raises(expected_error):
parsing_helpers.parse_model(value, field_type)
def test_parse_model():
@dataclass(frozen=True)
class TestModel:
model_id: int
model_name: str
model_score: float
touched_by: str
created_date: datetime
version: int = None
data = {'ModelID': 1, 'ModelName': 'model1', 'ModelScore': .97,
'TouchedBy': 'signal',
'CreatedDate': '2010-01-01T01:00:00'}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_name='model1',
model_score=.97,
touched_by='signal',
created_date=datetime(2010, 1, 1, 1, 0, 0,
tzinfo=timezone.utc))
def test_parse_nested_model():
@dataclass(frozen=True)
class TestNestedModel:
model_id: int
@dataclass(frozen=True)
class TestModel:
model_id: int
nested_model: TestNestedModel
data = {'ModelID': 1, 'nested_model': {'ModelID': 3}}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, nested_model=TestNestedModel(3))
def test_parse_model_rename_key():
@dataclass(frozen=True)
class TestModel:
model_id: int
model_name: str
data = {'ModelID': 1, 'NAME': 'model1'}
rename_keys = {'NAME': 'model_name'}
parsed = parsing_helpers.parse_model(data, TestModel, rename_keys)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_name='model1')
def test_parse_model_extra_attributes_are_ignored():
@dataclass(frozen=True)
class TestModel:
model_id: int
model_name: str
data = {'ModelID': 1, 'ModelName': 'model1', 'ModelScore': .97,
'TouchedBy': 'signal', 'CreatedDate': '2010-01-01'}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_name='model1')
def test_parse_model_default():
@dataclass(frozen=True)
class TestModel:
model_id: int
model_name: str = 'a'
data = {'ModelID': 1}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_name='a')
def test_parse_model_default_factory():
@dataclass(frozen=True)
class TestModel:
model_id: int
model_lists: List = field(default_factory=list)
data = {'ModelID': 1}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_lists=[])
def test_parse_model_missing_attribute_raises_type_error():
@dataclass(frozen=True)
class TestModel:
model_id: int
model_lists: Any
data = {'ModelID': 1}
with pytest.raises(TypeError):
parsing_helpers.parse_model(data, TestModel)
def test_parse_model_rename_key_extra_attribute_ignored():
@dataclass(frozen=True)
class TestModel:
model_id: int
data = {'ModelID': 1}
rename_keys = {'NAME': 'model_name'}
parsed = parsing_helpers.parse_model(data, TestModel, rename_keys)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1)
| 36.919811 | 84 | 0.54542 | 679 | 0.086751 | 0 | 0 | 5,140 | 0.656701 | 0 | 0 | 852 | 0.108854 |
7d7ca170be35a492481ffa204124b3d8dffb5cdc | 2,931 | py | Python | density-based/train.py | ramonpeter/UnbinnedMeasurements | 31c0a8125d48216718c22721cba63544d6b8897a | [
"MIT"
]
| null | null | null | density-based/train.py | ramonpeter/UnbinnedMeasurements | 31c0a8125d48216718c22721cba63544d6b8897a | [
"MIT"
]
| null | null | null | density-based/train.py | ramonpeter/UnbinnedMeasurements | 31c0a8125d48216718c22721cba63544d6b8897a | [
"MIT"
]
| null | null | null | import tensorflow as tf
import pandas as pd
import numpy as np
import sys
import time
from cflow import ConditionalFlow
from MoINN.modules.subnetworks import DenseSubNet
from utils import train_density_estimation, plot_loss, plot_tau_ratio
# import data
tau1_gen = np.reshape(np.load("../data/tau1s_Pythia_gen.npy"), (-1,1))
tau2_gen = np.reshape(np.load("../data/tau2s_Pythia_gen.npy"), (-1,1))
tau1_sim = np.reshape(np.load("../data/tau1s_Pythia_sim.npy"), (-1,1))
tau2_sim = np.reshape(np.load("../data/tau2s_Pythia_sim.npy"), (-1,1))
data_gen = tf.convert_to_tensor(np.concatenate([tau1_gen,tau2_gen], axis=-1), dtype=tf.float32)
data_sim = tf.convert_to_tensor(np.concatenate([tau1_sim,tau2_sim], axis=-1), dtype=tf.float32)
train_gen, test_gen = np.split(data_gen, 2)
train_sim, test_sim = np.split(data_sim, 2)
# Get the flow
meta = {
"units": 16,
"layers": 4,
"initializer": "glorot_uniform",
"activation": "leakyrelu",
}
cflow = ConditionalFlow(dims_in=[2], dims_c=[[2]], n_blocks=12, subnet_meta=meta, subnet_constructor=DenseSubNet)
# train the network
EPOCHS = 50
BATCH_SIZE = 1000
LR = 5e-3
DECAY_RATE=0.1
ITERS = len(train_gen)//BATCH_SIZE
DECAY_STEP=ITERS
#Prepare the tf.dataset
train_dataset = tf.data.Dataset.from_tensor_slices((train_gen, train_sim))
train_dataset = train_dataset.shuffle(buffer_size=500000).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(LR, DECAY_STEP, DECAY_RATE)
opt = tf.keras.optimizers.Adam(lr_schedule)
train_losses = []
#train_all = np.concatenate([train_gen, train_sim], axis=-1)
start_time = time.time()
for e in range(EPOCHS):
batch_train_losses = []
# Iterate over the batches of the dataset.
for step, (batch_gen, batch_sim) in enumerate(train_dataset):
batch_loss = train_density_estimation(cflow, opt, batch_gen, [batch_sim])
batch_train_losses.append(batch_loss)
train_loss = tf.reduce_mean(batch_train_losses)
train_losses.append(train_loss)
if (e + 1) % 1 == 0:
# Print metrics
print(
"Epoch #{}: Loss: {}, Learning_Rate: {}".format(
e + 1, train_losses[-1], opt._decayed_lr(tf.float32)
)
)
end_time = time.time()
print("--- Run time: %s hour ---" % ((end_time - start_time)/60/60))
print("--- Run time: %s mins ---" % ((end_time - start_time)/60))
print("--- Run time: %s secs ---" % ((end_time - start_time)))
# Make plots and sample
plot_loss(train_losses, name="Log-likelihood", log_axis=False)
detector = tf.constant(test_sim, dtype=tf.float32)
unfold_gen = cflow.sample(int(5e5),[detector])
plot_tau_ratio(test_gen, unfold_gen, detector, name="tau_ratio")
unfold_gen = {}
for i in range(10):
unfold_gen[i] = cflow.sample(int(5e5),[detector])
unfold_pythia = np.stack([unfold_gen[i] for i in range(10)])
np.save("inn_pythia",unfold_pythia) | 32.566667 | 113 | 0.702491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.189696 |
7d7cdf2a362ccd086f161b36591ea27b0857e365 | 2,408 | py | Python | assignment5/code/src/decoder.py | jschmidtnj/cs584 | d1d4d485d1fac8743cdbbc2996792db249dcf389 | [
"MIT"
]
| null | null | null | assignment5/code/src/decoder.py | jschmidtnj/cs584 | d1d4d485d1fac8743cdbbc2996792db249dcf389 | [
"MIT"
]
| null | null | null | assignment5/code/src/decoder.py | jschmidtnj/cs584 | d1d4d485d1fac8743cdbbc2996792db249dcf389 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
"""
decoder file
decoder class
"""
import tensorflow as tf
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
"""
attention layer from Bahdanau paper
"""
super().__init__()
self.w1 = tf.keras.layers.Dense(units)
self.w2 = tf.keras.layers.Dense(units)
self.vector = tf.keras.layers.Dense(1)
def call(self, query, values):
"""
get context and weights given query and values
"""
query_with_time_axis = tf.expand_dims(query, 1)
score = self.vector(tf.nn.tanh(
self.w1(query_with_time_axis) + self.w2(values)))
attention_weights = tf.nn.softmax(score, axis=1)
context_vector = tf.reduce_sum(attention_weights * values, axis=1)
return context_vector, attention_weights
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dimension, decoding_units, batch_size, gru: bool = True):
"""
decoder for attention model
"""
super().__init__()
self.batch_size = batch_size
self.decoding_units = decoding_units
self.embedding = tf.keras.layers.Embedding(
vocab_size, embedding_dimension)
if gru:
self.layer = tf.keras.layers.GRU(self.decoding_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
self.layer = tf.keras.layers.LSTM(self.decoding_units,
return_sequences=True,
return_state=True)
self.dense_layer = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.decoding_units)
def call(self, x, hidden, enc_output):
"""
given vector, hidden, and encoding, return new vector, state, and weights
"""
context_vector, attention_weights = self.attention(hidden, enc_output)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], -1)
output, state = self.layer(x)
output = tf.reshape(output, (-1, output.shape[2]))
x = self.dense_layer(output)
return x, state, attention_weights
| 31.684211 | 102 | 0.581395 | 2,318 | 0.962625 | 0 | 0 | 0 | 0 | 0 | 0 | 350 | 0.145349 |
7d7cfad6e60102e07f57c14396b2297a35ac5b1c | 2,203 | py | Python | camos/model/inputdata.py | danilexn/camos | 88d2457d3d71bb9f60a9b376a4b2dbeb611fd90d | [
"MIT"
]
| 1 | 2022-01-18T09:43:24.000Z | 2022-01-18T09:43:24.000Z | camos/model/inputdata.py | danilexn/camos | 88d2457d3d71bb9f60a9b376a4b2dbeb611fd90d | [
"MIT"
]
| null | null | null | camos/model/inputdata.py | danilexn/camos | 88d2457d3d71bb9f60a9b376a4b2dbeb611fd90d | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Created on Sat Jun 05 2021
# Last modified on Mon Jun 07 2021
# Copyright (c) CaMOS Development Team. All Rights Reserved.
# Distributed under a MIT License. See LICENSE for more info.
import numpy as np
import camos.model.image as img
from camos.utils.apptools import getGui
class InputData:
"""The InputData object.
This behaves as a container for the data, as a numpy array, and the main
properties of interest for the object to be handled in visualization and analysis.
"""
def __init__(self, file=None, memoryPersist=None, name="New Layer"):
"""Initialization of the object
Args:
file ([str, numpy.ndarray], optional): Can be a numpy array containing any numeric data, or a path to a file. The opening plugin must support this. Defaults to None.
memoryPersist (bool, optional): whether the data must be loaded into memory, at once, or can be loaded as required, from disk. Defaults to False.
stack (bool): the file bust be interpreted as a stack (False), various files
are interpreted as a single stack (True)
"""
self.file = file
self.name = name
self._image = None
self.frames = 0
self.data = None
if memoryPersist is None:
_persist = getGui().configuration.readConfiguration()[
"Performance/RAM_persistence"
]
self.memoryPersist = _persist
else:
self.memoryPersist = memoryPersist
self.max = 0
self.opacity = 50
self.brightness = 0
self.contrast = 0
self.colormap = "gray"
def image(self, index):
"""Returns the current frame for an image
Args:
index (int): index corresponding to the frame
Returns:
np.ndarray: current frame of the image, with shape (height, width, channels)
"""
return self._image[index]
def loadImage(self):
self._image = img.Stack(
self.file, dx=1, dz=1, units="nm", persistence=self.memoryPersist
)
self.frames = len(self._image)
self.max = self._image._imgs.max()
| 34.968254 | 177 | 0.625057 | 1,897 | 0.861099 | 0 | 0 | 0 | 0 | 0 | 0 | 1,225 | 0.55606 |
7d7f83cb6c3e80ad4e030d0441da9a9587d821b7 | 10,462 | py | Python | src/compas_fab/backends/ros/messages/services.py | Kathrin3010/compas_fab | 18230b70479ab57635b24832762c340e41102c10 | [
"MIT"
]
| null | null | null | src/compas_fab/backends/ros/messages/services.py | Kathrin3010/compas_fab | 18230b70479ab57635b24832762c340e41102c10 | [
"MIT"
]
| null | null | null | src/compas_fab/backends/ros/messages/services.py | Kathrin3010/compas_fab | 18230b70479ab57635b24832762c340e41102c10 | [
"MIT"
]
| null | null | null | from __future__ import absolute_import
from .geometry_msgs import PoseStamped
from .moveit_msgs import Constraints
from .moveit_msgs import MoveItErrorCodes
from .moveit_msgs import PlannerParams
from .moveit_msgs import PlanningScene
from .moveit_msgs import PlanningSceneComponents
from .moveit_msgs import PositionIKRequest
from .moveit_msgs import RobotState
from .moveit_msgs import RobotTrajectory
from .moveit_msgs import TrajectoryConstraints
from .moveit_msgs import WorkspaceParameters
from .std_msgs import Header
from .std_msgs import ROSmsg
class GetPositionIKRequest(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/srv/GetPositionIK.html
Examples
--------
>>> import roslibpy
>>> base_link = 'base_link' # robot.get_base_link_name()
>>> planning_group = 'manipulator' # robot.main_planning_group
>>> pose = Pose([0.420, -0.025, 0.459], [1, 0, 0], [0, 1, 0])
>>> joint_names = ['shoulder_pan_joint', 'shoulder_lift_joint',
'elbow_joint', 'wrist_1_joint', 'wrist_2_joint',
'wrist_3_joint']
>>> joint_positions = [3.39, -1.47, -2.05, 0.38, -4.96, -6.28]
>>> header = Header(frame_id='base_link')
>>> pose_stamped = PoseStamped(header, pose)
>>> joint_state = JointState(name=joint_names, position=joint_positions,
header=header)
>>> multi_dof_joint_state = MultiDOFJointState(header=header)
>>> start_state = RobotState(joint_state, multi_dof_joint_state)
>>> ik_request = PositionIKRequest(group_name=planning_group,
robot_state=start_state,
pose_stamped=pose_stamped,
avoid_collisions=True)
>>> reqmsg = GetPositionIKRequest(ik_request)
>>> srv = roslibpy.Service(ros_client, '/compute_ik', 'GetPositionIK')
>>> request = roslibpy.ServiceRequest(reqmsg.msg)
>>> srv.call(request, GetPositionIKResponse.from_msg, GetPositionIKResponse.from_msg)
"""
def __init__(self, ik_request=None):
self.ik_request = ik_request or PositionIKRequest()
class GetPositionIKResponse(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/srv/GetPositionIK.html
"""
def __init__(self, solution=None, error_code=None):
self.solution = solution or RobotState() # moveit_msgs/RobotState
self.error_code = error_code or MoveItErrorCodes() # moveit_msgs/MoveItErrorCodes
@classmethod
def from_msg(cls, msg):
solution = RobotState.from_msg(msg['solution'])
error_code = MoveItErrorCodes.from_msg(msg['error_code'])
return cls(solution, error_code)
class GetPositionFKRequest(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/srv/GetPositionFK.html
"""
def __init__(self, header=None, fk_link_names=None, robot_state=None):
self.header = header or Header()
self.fk_link_names = fk_link_names or []
self.robot_state = robot_state or RobotState()
class GetPositionFKResponse(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/srv/GetPositionFK.html
"""
def __init__(self, pose_stamped=None, fk_link_names=None, error_code=None):
self.pose_stamped = pose_stamped or [] # PoseStamped[]
self.fk_link_names = fk_link_names or []
self.error_code = error_code or MoveItErrorCodes() # moveit_msgs/MoveItErrorCodes
@classmethod
def from_msg(cls, msg):
pose_stamped = [PoseStamped.from_msg(d) for d in msg['pose_stamped']]
fk_link_names = msg['fk_link_names']
error_code = MoveItErrorCodes.from_msg(msg['error_code'])
return cls(pose_stamped, fk_link_names, error_code)
class GetCartesianPathRequest(ROSmsg):
"""http://docs.ros.org/melodic/api/moveit_msgs/html/srv/GetCartesianPath.html
Examples
--------
>>> import roslibpy
>>> base_link = 'base_link' # robot.get_base_link_name()
>>> ee_link = 'ee_link' # robot.get_end_effector_link_name()
>>> main_planning_group = 'manipulator' # robot.main_planning_group
>>> joint_names = ['j0', 'j1', 'j2', 'j3', 'j4', 'j5']
>>> position = [0, 0, 0, 0, 0, 0] # robot.get_configurable_joint_names()
>>> header = Header(frame_id=base_link)
>>> joint_state = JointState(header=header, name=joint_names, position=position) # or: robot.get_joint_state()
>>> multi_dof_joint_state = MultiDOFJointState(header=header)
>>> start_state = RobotState(joint_state=joint_state, multi_dof_joint_state=multi_dof_joint_state)
>>> start_pose = Pose([0.1068, -0.1818, 0.5930], [1., 0., 0.], [-0., 0., 1.])
>>> end_pose = Pose([0.1041, -0.2946, 0.1843], [1., 0., 0.], [0., 1., 0.])
>>> waypoints = [start_pose, end_pose]
>>> reqmsg = GetCartesianPathRequest(header=header,
start_state=start_state,
group_name=main_planning_group,
link_name=ee_link,
waypoints=waypoints,
max_step=10,
avoid_collisions=True)
>>> srv = roslibpy.Service(ros_client, '/compute_cartesian_path', 'GetCartesianPath')
>>> request = roslibpy.ServiceRequest(reqmsg.msg)
>>> srv.call(request, GetCartesianPathResponse.from_msg, GetCartesianPathResponse.from_msg)
"""
def __init__(self, header=None, start_state=None, group_name='',
link_name='', waypoints=None, max_step=10., jump_threshold=0.,
avoid_collisions=True, path_constraints=None):
self.header = header or Header()
self.start_state = start_state or RobotState() # moveit_msgs/RobotState
self.group_name = group_name
self.link_name = link_name # ee_link
self.waypoints = waypoints if waypoints else [] # geometry_msgs/Pose[]
self.max_step = float(max_step)
self.jump_threshold = float(jump_threshold)
self.avoid_collisions = avoid_collisions
self.path_constraints = path_constraints or Constraints() # moveit_msgs/Constraints
class GetCartesianPathResponse(ROSmsg):
"""http://docs.ros.org/melodic/api/moveit_msgs/html/srv/GetCartesianPath.html
"""
def __init__(self, start_state=None, solution=None,
fraction=0., error_code=None):
self.start_state = start_state or RobotState() # moveit_msgs/RobotState
self.solution = solution or RobotTrajectory() # moveit_msgs/RobotTrajectory
self.fraction = fraction
self.error_code = error_code or MoveItErrorCodes() # moveit_msgs/MoveItErrorCodes
@classmethod
def from_msg(cls, msg):
start_state = RobotState.from_msg(msg['start_state'])
solution = RobotTrajectory.from_msg(msg['solution'])
error_code = MoveItErrorCodes.from_msg(msg['error_code'])
return cls(start_state, solution, msg['fraction'], error_code)
class SetPlannerParamsRequest(ROSmsg):
"""http://docs.ros.org/melodic/api/moveit_msgs/html/srv/SetPlannerParams.html
"""
def __init__(self, planner_config='', group='', params=None, replace=True):
self.planner_config = planner_config
self.group = group
self.params = params or PlannerParams()
self.replace = replace
class MotionPlanRequest(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/MotionPlanRequest.html
"""
def __init__(self, workspace_parameters=None, start_state=None,
goal_constraints=None, path_constraints=None,
trajectory_constraints=None, planner_id='',
group_name='', num_planning_attempts=8,
allowed_planning_time=2., max_velocity_scaling_factor=1.,
max_acceleration_scaling_factor=1.):
self.workspace_parameters = workspace_parameters or WorkspaceParameters() # moveit_msgs/WorkspaceParameters
self.start_state = start_state or RobotState() # moveit_msgs/RobotState
self.goal_constraints = goal_constraints or [] # moveit_msgs/Constraints[]
self.path_constraints = path_constraints or Constraints() # moveit_msgs/Constraints
self.trajectory_constraints = trajectory_constraints or TrajectoryConstraints() # moveit_msgs/TrajectoryConstraints
self.planner_id = planner_id # string
self.group_name = group_name # string
self.num_planning_attempts = int(num_planning_attempts) # int32
self.allowed_planning_time = float(allowed_planning_time) # float64
self.max_velocity_scaling_factor = float(max_velocity_scaling_factor) # float64
self.max_acceleration_scaling_factor = float(max_acceleration_scaling_factor) # float64
@property
def msg(self):
msg = super(MotionPlanRequest, self).msg
return {"motion_plan_request": msg}
class MotionPlanResponse(ROSmsg):
"""http://docs.ros.org/kinetic/api/moveit_msgs/html/msg/MotionPlanResponse.html
"""
def __init__(self, trajectory_start=None, group_name=None, trajectory=None,
planning_time=None, error_code=None):
self.trajectory_start = trajectory_start or RobotState()
self.group_name = group_name or ''
self.trajectory = trajectory or RobotTrajectory()
self.planning_time = planning_time or 3.
self.error_code = error_code or MoveItErrorCodes()
@classmethod
def from_msg(cls, msg):
msg = msg["motion_plan_response"]
trajectory_start = RobotState.from_msg(msg['trajectory_start'])
trajectory = RobotTrajectory.from_msg(msg['trajectory'])
error_code = MoveItErrorCodes.from_msg(msg['error_code'])
return cls(trajectory_start, msg['group_name'], trajectory, msg['planning_time'], error_code)
class GetPlanningSceneRequest(ROSmsg):
"""http://docs.ros.org/melodic/api/moveit_msgs/html/srv/GetPlanningScene.html
"""
def __init__(self, components=None):
self.components = components or PlanningSceneComponents()
class GetPlanningSceneResponse(ROSmsg):
"""http://docs.ros.org/melodic/api/moveit_msgs/html/srv/GetPlanningScene.html
"""
def __init__(self, scene=None):
self.scene = scene or PlanningScene()
@classmethod
def from_msg(cls, msg):
return PlanningScene.from_msg(msg['scene'])
| 45.290043 | 124 | 0.677882 | 9,874 | 0.943797 | 0 | 0 | 1,392 | 0.133053 | 0 | 0 | 4,591 | 0.438826 |
7d803a9aa0c5e2c7510ceac09d326b16dcb098e1 | 9,946 | py | Python | PP4E/Examples/PP4E/Ai/ExpertSystem/holmes/holmes2/forward.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
]
| null | null | null | PP4E/Examples/PP4E/Ai/ExpertSystem/holmes/holmes2/forward.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
]
| null | null | null | PP4E/Examples/PP4E/Ai/ExpertSystem/holmes/holmes2/forward.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
]
| null | null | null | #
# module forward.py
#
# forward chaining inference engine
# see holmes/forward.py and holmes.doc for more info;
#
# optimization: uses known fact and rule 'if' indexes to avoid:
# a) exhaustive fact list search when matching an 'if'
# b) exhaustive fact list scan when seeing if fact redundant
# c) exhaustive fact list scan when seeing if should ask user
# d) reselecting and refiring rule/binding on each iteration
#
# only tries rules suggested (triggered) by facts added
# during the last iteration (restarts from top again);
#
# could be made slightly faster by using '(x,y)' tree rep
# for lists (proof list, etc.), but the gain would be minor
# compared to the index tree improvement;
#
# known fact list is now an index tree (members() generates
# the old list, but it is no longer in deduction-order);
###########################################################################
from match import *
from index import Index
from kbase import external, internal
from time import time
stop_chaining = 'stop_chaining'
def forward(rules, facts, *pmode):
time1 = time()
global kbase # avoid extra args
kbase = rules
known = initialize(facts, kbase)
try:
chain(facts+[['true']], known, kbase) # adds to 'known'
except stop_chaining: pass # user can stop it
return report(known, pmode, time1)
def chain(newfacts, known, kbase):
global user_answers # avoid extra args
while 1:
user_answers = 0
rules = triggered(newfacts, kbase) # if part in new
if not rules:
break
solns = bindings(rules, known) # all 'if's matched
if not solns and not user_answers:
break
newfacts = fire(solns, known) # add 'then' to known
if not newfacts and not user_answers:
break # no new facts added, or
# ask_user added no facts
#######################################################
# create fact index and init iteration counts;
# store_unique would remove redundant initial facts;
#######################################################
def initialize(facts, kbase):
known = Index().init()
for fact in facts:
known.store(fact, (fact, 'initial')) # fact, proof
known.store(['true'], (['true'], 'atomic')) # if true then...
for rule in kbase.rules:
rule['trigger'] = 0
return known
#################################################
# add 'then' parts of matched rules/bindings
# store_unique() might speed finding duplicates;
#################################################
def fire(solns, known):
added = []
for (rule, dict, proof) in solns:
for then in rule['then']:
fact = substitute(then, dict)
if fact[0] == 'delete':
if known.search_unique(fact[1:]):
known.delete(fact[1:])
added.append(['not'] + fact)
else:
if not known.search_unique(fact):
known.store(fact, (fact, (rule['rule'], proof)) )
added.append(fact)
return added
#############################################
# pick rules with matched 'if' parts;
# returns list with no redundant rules;
#############################################
trigger_id = 1
def triggered(newfacts, kbase):
global trigger_id
res = []
for fact in newfacts:
for rule in kbase.match_if(fact):
if rule['trigger'] != trigger_id:
res.append(rule)
rule['trigger'] = trigger_id
trigger_id = trigger_id + 1
return res
#####################################################
# generate bindings for rule's 'if' conjunction,
# for all rules triggered by latest deductions;
# note: 'not' goals must match explicitly asserted
# 'not' facts: we just match the whole 'not';
#####################################################
def bindings(triggered, known):
solns = []
for rule in triggered:
for (dict, proof) in conjunct(rule['if'], known, {}, rule['rule']):
solns.append((rule, dict, proof))
return solns
def conjunct(ifs, known, dict, why):
if ifs == []:
return [(copy_dict(dict), [])]
res = []
head, tail = ifs[0], ifs[1:]
if head[0] == 'ask':
term = substitute(head[1:], dict)
if ask_user(term, known, why):
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(term, 'told')] + proof2))
else:
for (fact, proof) in known.search(head, dict):
matched, changes = match(head, fact, dict, {})
if matched:
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(fact, proof)] + proof2))
for (var, env) in changes:
env[var] = '?'
return res
########################################################
# assorted stuff; dictionary copies should be built-in,
# since dictionary assignment 'shares' the same object;
########################################################
def copy_dict(dict):
res = {}
for f in dict.keys(): res[f] = dict[f]
return res
##########################################################
# the 'why' explanation in forward chaining just lists
# the rule containing the asked goal;
##########################################################
def ask_user(fact, known, why):
global user_answers
if known.search_unique(fact):
return 1
elif known.search_unique(['not'] + fact):
return 0
user_answers = 1
while 1:
ans = raw_input('is this true: ' + external([fact]) + ' ?')
if ans in ['y','Y','yes','YES']:
known.store(fact, (fact, 'told'))
return 1
elif ans in ['n','N','no','NO']:
known.store(['not']+fact, (['not']+fact, 'told'))
return 0
elif ans == 'why':
print 'to see if rule', why, 'can be applied'
elif ans == 'where':
print_solns(known, None)
elif ans == 'browse':
kbase.browse_pattern(raw_input('enter browse pattern: '))
elif ans == 'stop':
raise stop_chaining
else:
print 'what? ',
print '(expecting "y", "n", "why", "where", "browse", or "stop")'
######################################################
# 'how' explanations require us to construct proof
# trees for each fact added to the known facts list;
######################################################
def report(known, pmode, time1):
filter = None
if pmode:
if pmode[0] == None:
return known
else:
filter = pmode[0]
time2 = time() - time1
print_solns(known, filter)
print 'time: ', time2
show_proofs(known)
def print_solns(known, filter):
sources = {'rule':[], 'told':[], 'init':[], 'atom':[]}
for (fact, proof) in known.members():
if not filter or match(filter, fact, {}, {})[0]:
if type(proof) == type(()):
sources['rule'].append((fact, proof)) # deduced
elif proof == 'told' or proof == 'not':
sources['told'].append(fact)
elif proof == 'initial':
sources['init'].append(fact)
elif proof == 'atomic':
sources['atom'].append(fact)
if not sources['rule']:
print 'I have not deduced any new facts.'
else:
print 'I deduced these facts...'
for (fact, proof) in sources['rule']:
print ' ', external([fact]) #, '(by rule',proof[0]+')'
if sources['told']:
print 'You told me these facts...'
for fact in sources['told']:
print ' ', external([fact])
if sources['init']:
print 'I started with these facts...'
for fact in sources['init']:
print ' ', external([fact])
# ignore sources['atom']
def show_proofs(known):
while 1:
print
ans = raw_input('show proofs? ')
if ans in ['y','Y','yes','YES']:
[patt] = internal(raw_input('enter deductions pattern: '))
for (fact, proof) in known.members():
if match(patt, fact, {}, {})[0]:
trace_tree((fact, proof), 0)
elif ans in ['n','N','no','NO']:
break
elif ans == 'where':
print_solns(known, None)
elif ans == 'browse':
kbase.browse_pattern(raw_input('enter browse pattern: '))
else:
print 'what? (expecting "y", "n", "where", or "browse")'
def trace_tree((fact, proof), level):
print ' ' * level,
print '"' + external([fact]) + '"',
if proof == 'told':
print 'was your answer'
elif proof == 'initial':
print 'was on your initial facts list'
elif proof == 'atomic':
print 'is an absolute truth'
elif proof == 'not':
print 'was a negative answer, or was ommitted'
else:
rule, subproof = proof
print 'was deduced by firing rule', rule
for branch in subproof:
trace_tree(branch, level+3)
| 28.096045 | 81 | 0.478082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,646 | 0.36658 |
7d813b04029d3a96ccc4d0891812f3cdb0a8afd7 | 385 | py | Python | Photo.py | Matsoy/hashcode2019-qualificationRound | e663c93987dda07fafddf6ca006278d4aa0f1b26 | [
"Unlicense"
]
| null | null | null | Photo.py | Matsoy/hashcode2019-qualificationRound | e663c93987dda07fafddf6ca006278d4aa0f1b26 | [
"Unlicense"
]
| null | null | null | Photo.py | Matsoy/hashcode2019-qualificationRound | e663c93987dda07fafddf6ca006278d4aa0f1b26 | [
"Unlicense"
]
| null | null | null | class Photo:
def __init__(self, lid, tags_list, orientation):
"""
Constructor
:param lid: Photo identifier
:param tags_list: List of tags
:param orientation: Orientation. "H" for horizontal or "V" for vertical
"""
self.id = lid
self.tags_list = tags_list
self.orientation = orientation | 32.083333 | 79 | 0.566234 | 385 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.524675 |
7d8144c38e98997db49f5fa507e926dc5ff5e76c | 979 | py | Python | bert/tasks/read_file.py | rschoon/bert | 5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13 | [
"MIT"
]
| null | null | null | bert/tasks/read_file.py | rschoon/bert | 5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13 | [
"MIT"
]
| null | null | null | bert/tasks/read_file.py | rschoon/bert | 5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13 | [
"MIT"
]
| null | null | null |
import tarfile
import tempfile
from . import Task, TaskVar
class TaskReadFile(Task, name="read-file"):
"""
Read contents of a file in the image into a variable.
"""
class Schema:
path = TaskVar(help="Container file path to read data from")
var = TaskVar(help="Destination variable name to write file contents to")
def run_with_values(self, job, *, var, path):
container = job.create({})
with tempfile.TemporaryFile() as tf:
tstream, tstat = container.get_archive(path)
for chunk in tstream:
tf.write(chunk)
tf.seek(0)
with tarfile.open(fileobj=tf, mode="r") as tar:
for item in tar.members:
data = tar.extractfile(item).read().decode('utf-8')
if data.endswith("\n"):
data = data[:-1]
job.set_var(var, data)
break
job.cancel()
| 27.971429 | 81 | 0.544433 | 916 | 0.935649 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.18999 |
7d8289a62a068949c34be79180a4077eeeb19299 | 8,610 | py | Python | p2m/layers.py | dipaco/single-viewTo3D | 923a769afedd95651cc11c72bf4e744c783de87f | [
"Apache-2.0"
]
| null | null | null | p2m/layers.py | dipaco/single-viewTo3D | 923a769afedd95651cc11c72bf4e744c783de87f | [
"Apache-2.0"
]
| null | null | null | p2m/layers.py | dipaco/single-viewTo3D | 923a769afedd95651cc11c72bf4e744c783de87f | [
"Apache-2.0"
]
| null | null | null | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def project(img_feat, x, y, dim):
x1 = tf.floor(x)
x2 = tf.ceil(x)
y1 = tf.floor(y)
y2 = tf.ceil(y)
Q11 = tf.gather_nd(img_feat, tf.stack([tf.cast(x1,tf.int32), tf.cast(y1,tf.int32)],1))
Q12 = tf.gather_nd(img_feat, tf.stack([tf.cast(x1,tf.int32), tf.cast(y2,tf.int32)],1))
Q21 = tf.gather_nd(img_feat, tf.stack([tf.cast(x2,tf.int32), tf.cast(y1,tf.int32)],1))
Q22 = tf.gather_nd(img_feat, tf.stack([tf.cast(x2,tf.int32), tf.cast(y2,tf.int32)],1))
weights = tf.multiply(tf.subtract(x2,x), tf.subtract(y2,y))
Q11 = tf.multiply(tf.tile(tf.reshape(weights,[-1,1]),[1,dim]), Q11)
weights = tf.multiply(tf.subtract(x,x1), tf.subtract(y2,y))
Q21 = tf.multiply(tf.tile(tf.reshape(weights,[-1,1]),[1,dim]), Q21)
weights = tf.multiply(tf.subtract(x2,x), tf.subtract(y,y1))
Q12 = tf.multiply(tf.tile(tf.reshape(weights,[-1,1]),[1,dim]), Q12)
weights = tf.multiply(tf.subtract(x,x1), tf.subtract(y,y1))
Q22 = tf.multiply(tf.tile(tf.reshape(weights,[-1,1]),[1,dim]), Q22)
outputs = tf.add_n([Q11, Q21, Q12, Q22])
return outputs
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in list(kwargs.keys()):
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=False,
sparse_inputs=False, act=tf.nn.relu, bias=True, gcn_block_id=1,
featureless=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
if gcn_block_id == 1:
self.support = placeholders['support1']
elif gcn_block_id == 2:
self.support = placeholders['support2']
elif gcn_block_id == 3:
self.support = placeholders['support3']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = 3#placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for i in range(len(self.support)):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],
name='weights_' + str(i))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
for i in range(len(self.support)):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
support = dot(self.support[i], pre_sup, sparse=True)
supports.append(support)
output = tf.add_n(supports)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class GraphPooling(Layer):
"""Graph Pooling layer."""
def __init__(self, placeholders, pool_id=1, **kwargs):
super(GraphPooling, self).__init__(**kwargs)
self.pool_idx = placeholders['pool_idx'][pool_id-1]
def _call(self, inputs):
X = inputs
add_feat = (1/2.0) * tf.reduce_sum(tf.gather(X, self.pool_idx), 1)
outputs = tf.concat([X, add_feat], 0)
return outputs
class GraphProjection(Layer):
"""Graph Pooling layer."""
def __init__(self, placeholders, **kwargs):
super(GraphProjection, self).__init__(**kwargs)
self.img_feat = placeholders['img_feat']
'''
def _call(self, inputs):
coord = inputs
X = inputs[:, 0]
Y = inputs[:, 1]
Z = inputs[:, 2]
#h = (-Y)/(-Z)*248 + 224/2.0 - 1
#w = X/(-Z)*248 + 224/2.0 - 1 [28,14,7,4]
h = 248.0 * tf.divide(-Y, -Z) + 112.0
w = 248.0 * tf.divide(X, -Z) + 112.0
h = tf.minimum(tf.maximum(h, 0), 223)
w = tf.minimum(tf.maximum(w, 0), 223)
indeces = tf.stack([h,w], 1)
idx = tf.cast(indeces/(224.0/56.0), tf.int32)
out1 = tf.gather_nd(self.img_feat[0], idx)
idx = tf.cast(indeces/(224.0/28.0), tf.int32)
out2 = tf.gather_nd(self.img_feat[1], idx)
idx = tf.cast(indeces/(224.0/14.0), tf.int32)
out3 = tf.gather_nd(self.img_feat[2], idx)
idx = tf.cast(indeces/(224.0/7.00), tf.int32)
out4 = tf.gather_nd(self.img_feat[3], idx)
outputs = tf.concat([coord,out1,out2,out3,out4], 1)
return outputs
'''
def _call(self, inputs):
coord = inputs
X = inputs[:, 0]
Y = inputs[:, 1]
Z = inputs[:, 2]
h = 250 * tf.divide(-Y, -Z) + 112
w = 250 * tf.divide(X, -Z) + 112
h = tf.minimum(tf.maximum(h, 0), 223)
w = tf.minimum(tf.maximum(w, 0), 223)
x = h/(224.0/56)
y = w/(224.0/56)
out1 = project(self.img_feat[0], x, y, 64)
x = h/(224.0/28)
y = w/(224.0/28)
out2 = project(self.img_feat[1], x, y, 128)
x = h/(224.0/14)
y = w/(224.0/14)
out3 = project(self.img_feat[2], x, y, 256)
x = h/(224.0/7)
y = w/(224.0/7)
out4 = project(self.img_feat[3], x, y, 512)
outputs = tf.concat([coord,out1,out2,out3,out4], 1)
return outputs
| 32.126866 | 111 | 0.617073 | 5,818 | 0.675726 | 0 | 0 | 0 | 0 | 0 | 0 | 2,660 | 0.308943 |
7d82c9d35fc41989289ca1ca70bcd714b7bacd76 | 6,477 | py | Python | models/swarm_algorithm.py | AlexanderKlanovets/swarm_algorithms | 8da851baccd4d074c747b7d2b4df9952918fab31 | [
"MIT"
]
| 9 | 2019-10-29T13:30:57.000Z | 2022-01-30T14:23:26.000Z | models/swarm_algorithm.py | AlexanderKlanovets/swarm_algorithms | 8da851baccd4d074c747b7d2b4df9952918fab31 | [
"MIT"
]
| 2 | 2021-06-08T22:11:11.000Z | 2022-03-12T00:44:37.000Z | models/swarm_algorithm.py | AlexanderKlanovets/swarm_algorithms | 8da851baccd4d074c747b7d2b4df9952918fab31 | [
"MIT"
]
| 2 | 2020-02-11T09:26:48.000Z | 2020-05-11T17:47:22.000Z | from abc import ABC, abstractmethod
import numpy as np
class SwarmAlgorithm(ABC):
'''
A base abstract class for different swarm algorithms.
Parameters
----------
D : int
Search space dimension.
N : int
Population size.
fit_func : callable
Fitness (objective) function or a function returning multiple values
corresponding to different objectives (for multi-objective problems).
params : array_like
Model behavioral parameters.
bounds : ndarray
A 2 by D matrix containing lower and upper bounds of the search space
for each dimension.
seed : int, optional, default=None
Random generator seed.
max_iter : int, optional, default=100
Maximum number of iterations (generations).
stag_iter : int, optional, default=100
Specifies the allowed number of iterations without solution improvement
by equal or more than a given tolerance. If the number is exceeded,
the optimization process stagnations occurs and the algorithm stops.
e : float, optional, default=1e-5
Tolerance.
Attributes
----------
particles : ndarray
An N by D array representing the swarm of N particles.
scores : ndarray
An array of size N representing the value of the fitness function
for each particle.
gbest : ndarray
The D-dimensional vector representing the position of the current
global best particle.
gbest_score : float
The value of the fitness function for the current global best particle.
eval_num : int
The number of fitness function evaluations.
'''
def __init__(self, D, N, fit_func, params, bounds, seed=None, max_iter=100,
stag_iter=100, e=1e-5):
self.D = D
self.N = N
# Initialize problem parameters.
self.fit_func = fit_func
self.l_bounds = bounds[0]
self.u_bounds = bounds[1]
# Behavioural parameters' initialization.
self.set_params(params)
# Initializing the Numpy random numbers generator to reproduce results
# of the optimization processes.
self.seed = seed
# Stopping criteria.
self.max_iter = max_iter
self.stag_iter = stag_iter
self.e = e
self.reset()
@abstractmethod
def set_params(self, new_params):
'''
Initialize the algorithm with a strategy (vector of parameters).
Parameters
----------
new_params : array_like
Returns
-------
No value.
'''
pass
def reset(self):
'''
Resets the algorithm state.
Parameters
----------
No parameters.
Returns
-------
No value.
'''
if self.seed is not None:
np.random.seed(self.seed)
# Generate initial population and particles' velocities.
self.set_population([self.generate_particle()
for _ in range(self.N)])
def generate_particle(self):
'''
Generates a swarm particle within bounds.
Parameters
----------
No parameters.
Returns
-------
ndarray
A vector of size D representing particle's coordinates.
'''
coords_range = self.u_bounds - self.l_bounds
return self.l_bounds + np.random.uniform(size=self.D) * coords_range
def set_population(self, new_population):
'''
Sets a population with a pre-generated one.
Parameters
----------
new_population: array_like
A matrix with dimensions N by D, which represents the coordinates
of each particle.
Returns
-------
No value.
'''
self.eval_num = self.N
self.N = len(new_population)
self.particles = np.copy(new_population)
self.scores = np.array([self.fit_func(p) for p in self.particles])
# Initializing current best.
gbest_index = np.ndarray.argmin(self.scores)
self.gbest = np.copy(self.particles[gbest_index])
self.gbest_score = self.scores[gbest_index]
@abstractmethod
def optimize(self):
'''
Main loop of the algorithm.
Parameters
----------
No parameters.
Returns
-------
ndarray
The coordinates of the global best particle at the end of
the optimization process.
'''
pass
def update_best(self):
'''
Updates global best particle if needed.
Parameters
----------
No parameters.
Returns
-------
No value.
'''
current_best_index = np.argmin(self.scores)
current_best = self.particles[current_best_index]
current_best_score = self.scores[current_best_index]
if current_best_score < self.gbest_score:
self.gbest = np.copy(current_best)
self.gbest_score = current_best_score
def simplebounds(self, coords):
'''
Simple constraint rule for particles' positions
(in-place coordinate modification).
Parameters
----------
coords: ndarray
An array of particles to apply the rule to.
Returns
-------
No value.
'''
l_bounds_tiled = np.tile(self.l_bounds, [coords.shape[0], 1])
u_bounds_tiled = np.tile(self.u_bounds, [coords.shape[0], 1])
lower_bound_indexes = coords < self.l_bounds
upper_bound_indexes = coords > self.u_bounds
coords[lower_bound_indexes] = l_bounds_tiled[lower_bound_indexes]
coords[upper_bound_indexes] = u_bounds_tiled[upper_bound_indexes]
def info(self):
'''
Returns basic information about the algorithm state in a
human-readable representation.
Parameters
----------
No parameters.
Returns
-------
str
Information about current best position, score and
current number of fitness-function evaluations.
'''
info = f'Algorithm: {type(self).__name__}\n'
info += f'Best position: {self.gbest}\n'
info += f'Best score: {self.gbest_score}\n'
info += f'Fitness function evaluatiions number: {self.eval_num}'
return info
| 28.407895 | 79 | 0.589007 | 6,419 | 0.991045 | 0 | 0 | 626 | 0.09665 | 0 | 0 | 4,035 | 0.622974 |
7d8352a4615e2d80df5904ec6e1dc6850549b6ea | 1,376 | py | Python | Python-3/basic_examples/strings/python_str_to_datetime.py | ghiloufibelgacem/jornaldev | b9b27f9f7da595892520314b4ed1d2675556310a | [
"MIT"
]
| 1,139 | 2018-05-09T11:54:36.000Z | 2022-03-31T06:52:50.000Z | Python-3/basic_examples/strings/python_str_to_datetime.py | iamharshverma/journaldev | af24242a1ac1b7dc3e8e2404ec916b77ccf5044a | [
"MIT"
]
| 56 | 2018-06-20T03:52:53.000Z | 2022-02-09T22:57:41.000Z | Python-3/basic_examples/strings/python_str_to_datetime.py | iamharshverma/journaldev | af24242a1ac1b7dc3e8e2404ec916b77ccf5044a | [
"MIT"
]
| 2,058 | 2018-05-09T09:32:17.000Z | 2022-03-29T13:19:42.000Z | from datetime import datetime
# string to datetime object
datetime_str = '09/19/18 13:55:26'
datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S')
print(type(datetime_object))
print(datetime_object) # printed in default format
# string to date object
date_str = '09-19-2018'
date_object = datetime.strptime(date_str, '%m-%d-%Y').date()
print(type(date_object))
print(date_object) # printed in default formatting
# string to time object
time_str = '13::55::26'
time_object = datetime.strptime(time_str, '%H::%M::%S').time()
print(type(time_object))
print(time_object)
# time module
import time
time_obj = time.strptime(time_str, '%H::%M::%S')
print(type(time_obj))
print(time_obj)
# default formatting - "%a %b %d %H:%M:%S %Y"
print(time.strptime('Wed Sep 19 14:55:02 2018'))
# exception handling example
datetime_str = '09/19/18 13:55:26'
try:
datetime_object = datetime.strptime(datetime_str, '%m/%d/%y')
except ValueError as ve:
print('ValueError Raised:', ve)
time_str = '99::55::26'
try:
time_object = time.strptime(time_str, '%H::%M::%S')
except ValueError as e:
print('ValueError:', e)
# str to datetime with locale
import locale
locale.setlocale(locale.LC_ALL, 'de_DE')
date_str_de_DE = '10-Dezember-2018 Montag' # de_DE locale
datetime_object = datetime.strptime(date_str_de_DE, '%d-%B-%Y %A')
print(datetime_object)
| 24.571429 | 70 | 0.713663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 513 | 0.37282 |
7d85c7a93fbd0155d7bd1fe3e1af5e36cc75c497 | 484 | py | Python | sshspawner/tests/__init__.py | 1kastner/SSHSpawner | 2634b3ed863f1dcbc3b48d7bee1ac3d98042e75e | [
"BSD-3-Clause"
]
| 5 | 2019-09-23T19:04:59.000Z | 2020-08-06T18:07:48.000Z | sshspawner/tests/__init__.py | 1kastner/SSHSpawner | 2634b3ed863f1dcbc3b48d7bee1ac3d98042e75e | [
"BSD-3-Clause"
]
| 1 | 2020-08-08T12:41:35.000Z | 2020-08-10T18:21:48.000Z | sshspawner/tests/__init__.py | 1kastner/SSHSpawner | 2634b3ed863f1dcbc3b48d7bee1ac3d98042e75e | [
"BSD-3-Clause"
]
| 4 | 2020-02-25T22:37:02.000Z | 2021-04-13T14:43:16.000Z | ###############################################################################
# Copyright (c) 2018, Lawrence Livermore National Security, LLC
# Produced at the Lawrence Livermore National Laboratory
# Written by Thomas Mendoza [email protected]
# LLNL-CODE-771750
# All rights reserved
#
# This file is part of SSHSpawner: https://github.com/LLNL/SSHSpawner
#
# SPDX-License-Identifier: BSD-3-Clause
###############################################################################
| 37.230769 | 79 | 0.520661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.975207 |
7d85e7f96f3d8e7fbfc3a65a4dfc184c2bae42cc | 7,697 | py | Python | vnpy/app/cta_strategy/strategies/tsmyo_bias_accu_strategy.py | TheSuperMyo/vnpy | e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b | [
"MIT"
]
| null | null | null | vnpy/app/cta_strategy/strategies/tsmyo_bias_accu_strategy.py | TheSuperMyo/vnpy | e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b | [
"MIT"
]
| null | null | null | vnpy/app/cta_strategy/strategies/tsmyo_bias_accu_strategy.py | TheSuperMyo/vnpy | e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b | [
"MIT"
]
| null | null | null | from datetime import time
from vnpy.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager
)
from vnpy.app.cta_strategy.base import (
EngineType,
STOPORDER_PREFIX,
StopOrder,
StopOrderStatus,
)
from vnpy.app.cta_strategy.TSMtools import TSMArrayManager
import numpy as np
class TSMyoBiasAccuStrategy(CtaTemplate):
""""""
author = "TheSuperMyo"
# 日内交易
exit_time = time(hour=14, minute=57)
# 针对不同交易时间的市场
open_time_night = time(hour=21,minute=0)# 商品夜盘
open_time_day_1 = time(hour=9,minute=0)# 商品
open_time_day_2 = time(hour=9,minute=30)# 股指
close_time_day = time(hour=15,minute=0)# 商品/股指(除了利率期货)
close_time_night_1 = time(hour=23,minute=0)# 其他夜盘商品
close_time_night_2 = time(hour=1,minute=0)# 工业金属
close_time_night_3 = time(hour=2,minute=30)# 黄金/白银/原油
break_time_start_1 = time(hour=10,minute=15)# 商品茶歇
break_time_start_2 = time(hour=11,minute=30)# 全体午休
break_time_end_1 = time(hour=10,minute=30)# 商品茶歇
break_time_end_2 = time(hour=13,minute=0)# 股指下午
break_time_end_3 = time(hour=13,minute=30)# 商品下午
ma_len = 14 # 计算偏离的均线长度
accu_len = 8 # 偏离积累窗口
accu_std_fliter = 2 # 偏离std倍数
trailing_stop = 0.5 # 跟踪止损
fixed_size = 1 # 固定手数
bar_counter = 0 # 每日分钟计数器
signal = 0 # 开仓信号
stop_long = 0
stop_short = 0
hold_high = 0
hold_low = 0
parameters = ['ma_len','accu_len','accu_std_fliter','trailing_stop','fixed_size']
variables = ['bar_counter','signal','stop_long','stop_short']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(TSMyoBiasAccuStrategy, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.bg = BarGenerator(self.on_bar)
self.am = TSMArrayManager()
# 策略自身订单管理
self.active_orderids = []
self.bars = []
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
# 不会用到昨日数据
self.load_bar(5)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
def tick_filter(self, tick: TickData):
"""
过滤异常时间的tick
"""
tick_time = tick.datetime.time()
if tick_time < self.open_time_day_2:
return False
if tick_time > self.break_time_start_2 and tick_time < self.break_time_end_2:
return False
if tick_time > self.close_time_day:
return False
return True
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
if not self.tick_filter(tick):
return
self.bg.update_tick(tick)
def on_bar(self, bar: BarData):
"""
1.分钟计数
2.挂撤单
"""
self.bar_counter += 1
self.cancel_all()
am = self.am
am.update_bar(bar)
if not am.inited:
return
self.bars.append(bar)
if len(self.bars) <= 2:
return
else:
self.bars.pop(0)
last_bar = self.bars[-2]
if ( last_bar.datetime.date() != bar.datetime.date() ):
self.bar_counter = 1
# 保证偏离积累量信号只反应当天的情况
if self.bar_counter < max(self.accu_len,self.ma_len):
return
self.signal = am.bias_SMA_Accumulated_signal(self.ma_len, self.accu_len, self.accu_std_fliter, False)
if self.pos == 0:
if self.signal == 1:
# 入场开多
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.buy(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
if self.signal == -1:
# 入场开空
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.short(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
if self.pos > 0:
self.hold_high = max(self.hold_high,bar.high_price)
self.stop_long = self.hold_high*(1-self.trailing_stop/100)
if bar.datetime.time() > self.exit_time or self.signal == -1:
# 日内平仓
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.sell(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
else:
# 停止单平多
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.sell(self.stop_long, self.fixed_size, True, True)
self.active_orderids.extend(orderids)
if self.pos < 0:
self.hold_low = min(self.hold_low,bar.low_price)
self.stop_short = self.hold_low*(1+self.trailing_stop/100)
if bar.datetime.time() > self.exit_time or self.signal == 1:
# 日内平仓
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.cover(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
else:
# 停止单平空
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.cover(self.stop_short, self.fixed_size, True, True)
self.active_orderids.extend(orderids)
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
# 移除已成交或已撤销的订单
if not order.is_active() and order.vt_orderid in self.active_orderids:
self.active_orderids.remove(order.vt_orderid)
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
# 邮寄提醒
self.send_email(f"{trade.vt_symbol}在{trade.time}成交,价格{trade.price},方向{trade.direction}{trade.offset},数量{trade.volume}")
if self.pos == 0:
self.stop_long = 0
self.stop_short = 0
if self.pos > 0:
self.hold_high = trade.price
if self.pos < 0:
self.hold_low = trade.price
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
# 刚刚生成的本地停止单
if stop_order.status == StopOrderStatus.WAITING:
return
# 撤销的本地停止单,从活跃列表移除
if stop_order.status == StopOrderStatus.CANCELLED:
if stop_order.stop_orderid in self.active_orderids:
self.active_orderids.remove(stop_order.stop_orderid)
# 触发的本地停止单,停止单移除,限价单加入
if stop_order.status == StopOrderStatus.TRIGGERED:
if stop_order.stop_orderid in self.active_orderids:
self.active_orderids.remove(stop_order.stop_orderid)
self.active_orderids.extend(stop_order.vt_orderids)
# 撤掉其他停止单
for other_orderids in self.active_orderids:
if other_orderids.startswith(STOPORDER_PREFIX):
self.cancel_order(other_orderids) | 31.545082 | 127 | 0.572951 | 7,985 | 0.954116 | 0 | 0 | 0 | 0 | 0 | 0 | 1,790 | 0.213885 |
7d86bb1a8869218343e11c5b17e9cc10ddeac450 | 4,249 | py | Python | test/test-beam-dataflow-nlp.py | tarrade/proj_NLP_text_classification_with_GCP | ac09d6dbf8c07470d03cfb8140a26db7cd5bef9f | [
"Apache-2.0"
]
| 1 | 2020-07-19T16:10:19.000Z | 2020-07-19T16:10:19.000Z | test/test-beam-dataflow-nlp.py | tarrade/proj_NLP_text_classification_with_GCP | ac09d6dbf8c07470d03cfb8140a26db7cd5bef9f | [
"Apache-2.0"
]
| 46 | 2019-11-01T08:53:32.000Z | 2022-01-15T10:27:56.000Z | test/test-beam-dataflow-nlp.py | tarrade/proj_NLP_text_classification_with_GCP | ac09d6dbf8c07470d03cfb8140a26db7cd5bef9f | [
"Apache-2.0"
]
| null | null | null | import sys
import os
import pathlib
import logging
import subprocess
import datetime
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import SetupOptions
import src.preprocessing.preprocessing as pp
print(os.environ['PROJECT_ID'])
print(os.environ['BUCKET_NAME'])
print(os.environ['REGION'])
# define query table
def create_query():
query = """
SELECT
id,
title,
body,
tags
FROM
`bigquery-public-data.stackoverflow.stackoverflow_posts`
LIMIT 100
"""
return query
table_schema = {'fields': [
{'name': 'id', 'type': 'NUMERIC', 'mode': 'REQUIRED'},
{'name': 'title', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'text_body', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'code_body', 'type': 'STRING', 'mode': 'NULLABLE'},
{"fields": [
{"mode": "NULLABLE",
"name": "value",
"type": "STRING"}
],
"mode": "REPEATED",
"name": "tags",
"type": "RECORD"
}
]}
def preprocess():
"""
Arguments:
-RUNNER: "DirectRunner" or "DataflowRunner". Specfy to run the pipeline locally or on Google Cloud respectively.
Side-effects:
-Creates and executes dataflow pipeline.
See https://beam.apache.org/documentation/programming-guide/#creating-a-pipeline
"""
job_name = 'test-stackoverflow' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')
project = os.environ['PROJECT_ID']
region = os.environ['REGION']
output_dir = "gs://{0}/stackoverflow/".format(os.environ['BUCKET_NAME'])
# options
options = PipelineOptions()
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.project = project
google_cloud_options.job_name = job_name
google_cloud_options.region = region
google_cloud_options.staging_location = os.path.join(output_dir, 'tmp', 'staging')
google_cloud_options.temp_location = os.path.join(output_dir, 'tmp')
# done by command line
#options.view_as(StandardOptions).runner = RUNNER
options.view_as(SetupOptions).setup_file=os.environ['DIR_PROJ']+'/setup.py'
# instantantiate Pipeline object using PipelineOptions
print('Launching Dataflow job {} ... hang on'.format(job_name))
p = beam.Pipeline(options=options)
table = p | 'Read from BigQuery' >> beam.io.Read(beam.io.BigQuerySource(
# query
query=create_query(),
# use standard SQL for the above query
use_standard_sql=True)
)
clean_text = table | 'Clean Text' >> beam.ParDo(pp.NLPProcessing())
clean_text | 'Write to BigQuery' >> beam.io.WriteToBigQuery(
# The table name is a required argument for the BigQuery
table='test_stackoverflow_beam_nlp',
dataset='test',
project=project,
# Here we use the JSON schema read in from a JSON file.
# Specifying the schema allows the API to create the table correctly if it does not yet exist.
schema=table_schema,
# Creates the table in BigQuery if it does not yet exist.
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
# Deletes all data in the BigQuery table before writing.
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
# not needed, from with clause
if options.view_as(StandardOptions).runner == 'DataflowRunner':
print('DataflowRunner')
p.run()
else:
print('Default: DirectRunner')
result = p.run()
result.wait_until_finish()
print('Done')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
print('Starting main process ...')
preprocess()
# Usage
# python3 test-beam-dataflow.py --runner DataflowRunner
# python3 test-beam-dataflow.py
# python3 test-beam-dataflow.py --runner DataflowRunner --no_use_public_ips --subnetwork https://www.googleapis.com/compute/v1/projects/xxx/regions/europe-west1/subnetworks/yyyy --region=europe-west1 --zone=europe-west1-b
| 35.408333 | 221 | 0.676865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,989 | 0.46811 |
7d87158e11ce4ed100a35dda4334c28bbf1bf852 | 3,882 | py | Python | slixmpp/plugins/xep_0405/mix_pam.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
]
| 86 | 2016-07-04T13:26:02.000Z | 2022-02-19T10:26:21.000Z | slixmpp/plugins/xep_0405/mix_pam.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
]
| 10 | 2016-09-30T18:55:41.000Z | 2020-05-01T14:22:47.000Z | slixmpp/plugins/xep_0405/mix_pam.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
]
| 45 | 2016-09-30T18:48:41.000Z | 2022-03-18T21:39:33.000Z | # Slixmpp: The Slick XMPP Library
# Copyright (C) 2020 Mathieu Pasquet <[email protected]>
# This file is part of Slixmpp.
# See the file LICENSE for copying permission.
from typing import (
List,
Optional,
Set,
Tuple,
)
from slixmpp import JID, Iq
from slixmpp.exceptions import IqError, IqTimeout
from slixmpp.plugins import BasePlugin
from slixmpp.stanza.roster import RosterItem
from slixmpp.plugins.xep_0405 import stanza
from slixmpp.plugins.xep_0369 import stanza as mix_stanza
BASE_NODES = [
'urn:xmpp:mix:nodes:messages',
'urn:xmpp:mix:nodes:participants',
'urn:xmpp:mix:nodes:info',
]
class XEP_0405(BasePlugin):
'''XEP-0405: MIX-PAM'''
name = 'xep_0405'
description = 'XEP-0405: MIX-PAM'
dependencies = {'xep_0369'}
stanza = stanza
namespace = stanza.NS
def plugin_init(self) -> None:
stanza.register_plugins()
async def check_server_capability(self) -> bool:
"""Check if the server is MIX-PAM capable"""
result = await self.xmpp.plugin['xep_0030'].get_info(jid=self.xmpp.boundjid.bare)
features = result['disco_info']['features']
return stanza.NS in features
async def join_channel(self, room: JID, nick: str, subscribe: Optional[Set[str]] = None, *,
ito: Optional[JID] = None,
ifrom: Optional[JID] = None,
**iqkwargs) -> Set[str]:
"""
Join a MIX channel.
:param JID room: JID of the MIX channel
:param str nick: Desired nickname on that channel
:param Set[str] subscribe: Set of nodes to subscribe to when joining.
If empty, all nodes will be subscribed by default.
:rtype: Set[str]
:return: The nodes that failed to subscribe, if any
"""
if subscribe is None:
subscribe = set(BASE_NODES)
if ito is None:
ito = self.xmpp.boundjid.bare
iq = self.xmpp.make_iq_set(ito=ito, ifrom=ifrom)
iq['client_join']['channel'] = room
iq['client_join']['mix_join']['nick'] = nick
for node in subscribe:
sub = mix_stanza.Subscribe()
sub['node'] = node
iq['client_join']['mix_join'].append(sub)
result = await iq.send(**iqkwargs)
result_nodes = {sub['node'] for sub in result['client_join']['mix_join']}
return subscribe.difference(result_nodes)
async def leave_channel(self, room: JID, *,
ito: Optional[JID] = None,
ifrom: Optional[JID] = None,
**iqkwargs) -> Iq:
""""
Leave a MIX channel
:param JID room: JID of the channel to leave
"""
if ito is None:
ito = self.xmpp.boundjid.bare
iq = self.xmpp.make_iq_set(ito=ito, ifrom=ifrom)
iq['client_leave']['channel'] = room
iq['client_leave'].enable('mix_leave')
return await iq.send(**iqkwargs)
async def get_mix_roster(self, *,
ito: Optional[JID] = None,
ifrom: Optional[JID] = None,
**iqkwargs) -> Tuple[List[RosterItem], List[RosterItem]]:
"""
Get the annotated roster, with MIX channels.
:return: A tuple of (contacts, mix channels) as RosterItem elements
"""
iq = self.xmpp.make_iq_get(ito=ito, ifrom=ifrom)
iq['roster'].enable('annotate')
result = await iq.send(**iqkwargs)
self.xmpp.event("roster_update", result)
contacts = []
mix = []
for item in result['roster']:
channel = item.get_plugin('channel', check=True)
if channel:
mix.append(item)
else:
contacts.append(item)
return (contacts, mix)
| 34.660714 | 95 | 0.580629 | 3,249 | 0.83694 | 0 | 0 | 0 | 0 | 2,961 | 0.762751 | 1,221 | 0.314529 |
7d872614c5ec53276181d661d5d56268e35d080a | 1,360 | py | Python | MoraisParkingPython/view/funcoes_areas.py | larissacauane/Morais-Parking-Python | 9063845cabef10459dde76b53d3a51975788a54d | [
"MIT"
]
| null | null | null | MoraisParkingPython/view/funcoes_areas.py | larissacauane/Morais-Parking-Python | 9063845cabef10459dde76b53d3a51975788a54d | [
"MIT"
]
| null | null | null | MoraisParkingPython/view/funcoes_areas.py | larissacauane/Morais-Parking-Python | 9063845cabef10459dde76b53d3a51975788a54d | [
"MIT"
]
| null | null | null | from control.controller_veiculos import ControllerVeiculos
from control.controller_proprietario import ControllerProprietario
from control.controller_area import ControllerAreaEstacionamento
from model.constants import *
controller_veiculo = ControllerVeiculos()
controller_proprietario = ControllerProprietario()
controller_areas = ControllerAreaEstacionamento()
def cadastrar_area_especial():
print("\n====== CADASTRAR AREA ESPECIAL ======")
nome = input("Nome: ")
try:
capacidade = int(input("Capacidade: "))
print("[1] Carro; [2] Motocicleta; [3] Onibus")
tipo = TIPO_VEICULO[int(input("Tipo de veiculo (1, 2, ou 3): "))]
controller_areas.register_area(nome, tipo, capacidade)
except:
print("Input invalido")
def remover_area_especial():
print("\n====== REMOVER AREA ESPECIAL ======")
areas = controller_areas.find_special_areas()
areas_str = ""
if len(areas) <= 0:
print("Nao existem areas especiais cadastradas")
return
for i in range(len(areas)):
areas_str += "[{}] {} ".format((i + 1), areas[i].get_nome())
print(areas_str)
try:
area_nome = areas[(int(input("Area (indice): ")) - 1)].get_nome()
controller_areas.remove_area(area_nome)
except:
print("Input invalido. Voce precisa inserir um indice valido")
| 32.380952 | 73 | 0.675735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.232353 |
7d889fb0ab0b91db363297f53747bd0adaa5fe54 | 2,811 | py | Python | tests/gold_tests/h2/h2spec.test.py | a-canary/trafficserver | df01ace2b0bdffd3ddcc5b2c7587b6d6fed5234c | [
"Apache-2.0"
]
| null | null | null | tests/gold_tests/h2/h2spec.test.py | a-canary/trafficserver | df01ace2b0bdffd3ddcc5b2c7587b6d6fed5234c | [
"Apache-2.0"
]
| null | null | null | tests/gold_tests/h2/h2spec.test.py | a-canary/trafficserver | df01ace2b0bdffd3ddcc5b2c7587b6d6fed5234c | [
"Apache-2.0"
]
| null | null | null | '''
Test HTTP/2 with h2spec
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test HTTP/2 with httpspec
'''
Test.SkipUnless(
Condition.HasProgram("h2spec", "h2spec need to be installed on system for this test to work"),
)
Test.ContinueOnFail = True
# ----
# Setup httpbin Origin Server
# ----
httpbin = Test.MakeHttpBinServer("httpbin")
# ----
# Setup ATS. Disable the cache to simplify the test.
# ----
ts = Test.MakeATSProcess("ts", enable_tls=True, enable_cache=False)
# add ssl materials like key, certificates for the server
ts.addDefaultSSLFiles()
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}'.format(httpbin.Variables.Port)
)
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.records_config.update({
'proxy.config.http.insert_request_via_str': 1,
'proxy.config.http.insert_response_via_str': 1,
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'http',
})
# ----
# Test Cases
# ----
# In case you need to disable some of the tests, you can specify sections like http2/6.4.
h2spec_targets = "http2/1 http2/2 http2/3 http2/4 http2/5 http2/6 http2/7 http2/8 hpack"
test_run = Test.AddTestRun()
test_run.Processes.Default.Command = 'h2spec {0} -t -k --timeout 10 -p {1}'.format(h2spec_targets, ts.Variables.ssl_port)
test_run.Processes.Default.ReturnCode = 0
test_run.Processes.Default.StartBefore(httpbin, ready=When.PortOpen(httpbin.Variables.Port))
test_run.Processes.Default.StartBefore(Test.Processes.ts)
test_run.Processes.Default.Streams.stdout = "gold/h2spec_stdout.gold"
test_run.StillRunningAfter = httpbin
# Over riding the built in ERROR check since we expect some error cases
ts.Disk.diags_log.Content = Testers.ContainsExpression("ERROR: HTTP/2", "h2spec tests should have error log")
| 37.48 | 121 | 0.743863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,829 | 0.650658 |
7d8a92045f001897812e0811e27aaab163f27e32 | 576 | py | Python | examples/02/client.py | cjrh/aiosmartsock | a4ab5ffe5b673ada2a3002d7a9cb68ee1ea4a48f | [
"Apache-2.0"
]
| 9 | 2019-03-25T23:25:08.000Z | 2022-01-17T00:49:26.000Z | examples/02/client.py | cjrh/aiomsg | 74b646675e3d7296f0334d3e17c1be0370c5d852 | [
"Apache-2.0"
]
| 33 | 2019-04-13T02:31:07.000Z | 2022-03-21T19:12:14.000Z | examples/02/client.py | cjrh/aiosmartsock | a4ab5ffe5b673ada2a3002d7a9cb68ee1ea4a48f | [
"Apache-2.0"
]
| 1 | 2021-04-26T09:07:36.000Z | 2021-04-26T09:07:36.000Z | import logging
import itertools
import asyncio
import random
import aiomsg
import aiorun
logging.basicConfig(level="DEBUG")
async def main():
s = aiomsg.Søcket(send_mode=aiomsg.SendMode.ROUNDROBIN)
await s.connect()
async def receiver():
while True:
msg = await s.recv_string()
print("Got back: ", msg)
loop = aiorun.asyncio.get_running_loop()
loop.create_task(receiver())
for i in itertools.count():
await s.send_string(f"{i}")
await asyncio.sleep(random.randint(0, 30) / 6)
aiorun.run(main())
| 19.2 | 59 | 0.65625 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.740035 | 25 | 0.043328 |
7d8b956b2e624082889be95139c9c63feed50163 | 1,901 | py | Python | data_structures/class_dependency_injection.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
]
| 14 | 2020-02-07T21:36:39.000Z | 2022-03-12T22:37:04.000Z | data_structures/class_dependency_injection.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
]
| 19 | 2019-05-18T23:58:30.000Z | 2022-01-09T16:45:35.000Z | data_structures/class_dependency_injection.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
]
| 5 | 2020-10-06T06:10:27.000Z | 2021-07-08T12:58:46.000Z | # Dependency injection:
# Technique where one object (or static method) supplies the dependencies of another object.
# The objective is to decouple objects to the extent that no client code has to be changed
# simply because an object it depends on needs to be changed to a different one.
# Dependency injection is one form of the broader technique of inversion of control.
# Theoretically, the client is not allowed to call the injector code; it is the injecting code
# that constructs the services and calls the client to inject them. This means the client code
# does not need to know about the injecting code, just the interfaces. This separates the
# responsibilities of use and construction.
# In Python there are not many frameworks for dependency injection: https://stackoverflow.com/questions/2461702/why-is-ioc-di-not-common-in-python
#
# source code: http://stackoverflow.com/a/3076636/5620182
class Shape(object):
def __new__(cls, *args, **kwargs):
# required because Line's __new__ method is the same as Shape's
if cls is Shape:
description, args = args[0], args[1:]
if description == "It's flat":
new_cls = Line
else:
raise ValueError(
"Invalid description: {}.".format(description))
else:
new_cls = cls
return super(Shape, cls).__new__(new_cls, *args, **kwargs)
def number_of_edges(self):
return "A shape can have many edges..."
class Line(Shape):
def number_of_edges(self):
return 1
class SomeShape(Shape):
pass
if __name__ == "__main__":
l1 = Shape("It's flat")
print(l1.number_of_edges()) # 1
l2 = Line()
print(l2.number_of_edges()) # 1
u = SomeShape()
print(u.number_of_edges()) # A shape can have many edges...
s = Shape("Hexagon") # ValueError: Invalid description: Hexagon.
| 35.203704 | 146 | 0.678064 | 685 | 0.360337 | 0 | 0 | 0 | 0 | 0 | 0 | 1,136 | 0.59758 |
7d8c2a23670b05afd3505faf37ad0aff75f308fd | 5,073 | py | Python | vcommand/libs/crypto.py | virink/vCommand | 328dd5a8bc9390c5edde80f5544d797f54690f91 | [
"MIT"
]
| 7 | 2019-08-01T14:57:34.000Z | 2019-11-26T12:12:17.000Z | vcommand/libs/crypto.py | virink/vCommand | 328dd5a8bc9390c5edde80f5544d797f54690f91 | [
"MIT"
]
| null | null | null | vcommand/libs/crypto.py | virink/vCommand | 328dd5a8bc9390c5edde80f5544d797f54690f91 | [
"MIT"
]
| 2 | 2019-08-16T04:52:50.000Z | 2019-11-26T12:12:25.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Author : Virink <[email protected]>
Date : 2019/04/18, 14:49
"""
import string
import re
L = string.ascii_lowercase
U = string.ascii_uppercase
A = string.ascii_letters
def func_atbash(*args):
"""埃特巴什码解码"""
arg = args[0]
arg = arg.lower().replace(' ', 'vvvzzzvvv')
res = [L[25 - j] for i in arg for j in range(26) if i == L[j]]
return ''.join(res).replace('eeeaaaeee', ' ')
def __caesar(offset, arg):
"""凯撒编码 : 内部调用"""
result = ""
for ch in arg:
if ch.isupper():
result += U[((U.index(ch) + offset) % 26)]
elif ch.islower():
result += L[((L.index(ch) + offset) % 26)]
elif ch.isdigit():
result += ch
else:
result += ch
return result
def func_caesar(*args):
"""凯撒编码"""
res = []
for offset in range(26):
res.append("[+] offset : %d\tresult : %s" %
(offset, __caesar(offset, args[0])))
return "\r\n".join(res)
def func_rot13(*args):
"""rot13"""
return __caesar(13, args[0])
def func_mpkc(*args):
"""手机键盘编码 Mobile Phone Keyboard Cipher"""
T = {
'A': 21, 'B': 22, 'C': 23, 'D': 31, 'E': 32, 'F': 33,
'G': 41, 'H': 42, 'I': 43, 'J': 51, 'K': 52, 'L': 53,
'M': 61, 'N': 62, 'O': 63, 'P': 71, 'Q': 72, 'R': 73, 'S': 74,
'T': 81, 'U': 82, 'V': 83, 'W': 91, 'X': 92, 'Y': 93, 'Z': 94
}
arg = args[0].upper()
if arg[0] in U:
return ','.join([str(T.get(i, i)) for i in arg])
else:
T = {str(T[k]): k for k in T}
if ',' in arg:
arg = arg.split(',')
elif ' ' in arg:
arg = arg.split(' ')
return ''.join([T.get(i, i) for i in arg])
def func_morse(*args):
"""摩斯电码"""
T = {
'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.',
',': '--..--', '.': '.-.-.-', ':': '---...', ';': '-.-.-.',
'?': '..--..', '=': '-...-', "'": '.----.', '/': '-..-.',
'!': '-.-.--', '-': '-....-', '_': '..--.-', '(': '-.--.',
')': '-.--.-', '$': '...-..-', '&': '. . . .', '@': '.--.-.',
'{': '----.--', '}': '-----.-'
}
arg = args[0]
if re.match(r'^[\.\-\/ ]+$', arg):
T = {str(T[k]): k for k in T}
if len(args) > 1:
arg = ' '.join(args)
arg = arg.replace('/', ' ').split(' ')
# TODO: morse auto decode when it is not sep
# p = 0
# res = ''
# d = 5
# while p < (len(arg)+7) and d > 0:
# print("[D] len : %d p : %d" % (len(arg), p))
# for j in [6, 5, 4, 3, 2, 1, 0]:
# tmp = T.get(arg[p:p+j], None)
# print("[D] tmp = arg[%d:%s] = %s => %s" %
# (p, j, arg[p:p+j], tmp))
# if tmp:
# p = p+j
# res += tmp
# break
# # p = p+j-1
# # break
# d -= 1
# print("[D] Result : %s" % res)
return ''.join([T.get(i) for i in arg])
else:
return '/'.join([str(T.get(i, '?')) for i in arg.upper()])
def func_peigen(*args):
"""培根密码"""
T = {
'H': 'aabbb', 'G': 'aabba', 'R': 'baaab', 'Q': 'baaaa',
'Z': 'bbaab', 'Y': 'bbaaa', 'N': 'abbab', 'M': 'abbaa',
'U': 'babaa', 'V': 'babab', 'I': 'abaaa', 'J': 'abaab',
'F': 'aabab', 'E': 'aabaa', 'A': 'aaaaa', 'B': 'aaaab',
'T': 'baabb', 'S': 'baaba', 'C': 'aaaba', 'D': 'aaabb',
'P': 'abbbb', 'O': 'abbba', 'K': 'ababa', 'L': 'ababb',
'W': 'babba', 'X': 'babbb'
}
arg = args[0]
if re.match(r'^[ab]+$', arg):
T = {str(T[k]): k for k in T}
return ''.join([T.get(arg[i:i+5]) for i in range(0, len(arg), 5)])
else:
return ''.join([T.get(i.upper()) for i in arg])
def __vigenere(s, key='virink', de=0):
"""维吉利亚密码"""
s = str(s).replace(" ", "").upper()
key = str(key).replace(" ", "").upper()
res = ''
i = 0
while i < len(s):
j = i % len(key)
k = U.index(key[j])
m = U.index(s[i])
if de:
if m < k:
m += 26
res += U[m - k]
else:
res += U[(m + k) % 26]
i += 1
return res
def func_vigenere(*args):
"""维吉利亚密码"""
if len(args) < 2:
return '[-] Vigenere Usage : command key text [isdecode]'
return __vigenere(args[1], args[0], 1 if len(args) >= 3 else 0)
| 30.196429 | 74 | 0.350089 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,886 | 0.365291 |
7d8c33c577dc39007eec8277d366b069630608c1 | 1,773 | py | Python | backend/risk_factors/tasks.py | Doctorinna/backend | cfff4fe751d668dcaf4834ebb730f5158c26e201 | [
"MIT"
]
| 24 | 2021-09-13T06:16:44.000Z | 2022-01-08T08:56:04.000Z | backend/risk_factors/tasks.py | Doctorinna/backend | cfff4fe751d668dcaf4834ebb730f5158c26e201 | [
"MIT"
]
| 32 | 2021-09-28T05:33:00.000Z | 2021-12-12T09:51:09.000Z | backend/risk_factors/tasks.py | Doctorinna/backend | cfff4fe751d668dcaf4834ebb730f5158c26e201 | [
"MIT"
]
| 1 | 2021-10-04T21:52:15.000Z | 2021-10-04T21:52:15.000Z | from .utils import (get_prescription, get_attributes, get_group)
from .models import Disease, Result, Score, Question, SurveyResponse
from .analysis import cardio_risk_group, diabetes_risk_group, stroke_risk_group
from statistics import mean
from celery import shared_task
@shared_task
def worker(session_id):
df, attributes = get_attributes(session_id)
diseases = list(Disease.objects.all())
supported_methods = {
'cardiovascular disease': cardio_risk_group,
'diabetes': diabetes_risk_group,
'stroke': stroke_risk_group
}
question_region = Question.objects.get(label='region')
session_region = (list(SurveyResponse.objects.filter(
session_id=session_id,
question_id=question_region.id))[0]).answer
results = []
for disease in diseases:
illness = disease.illness
result_kwargs = {
'session_id': session_id,
'disease': disease,
'region': session_region
}
if illness not in supported_methods:
result_kwargs['risk_factor'] = 0
result_kwargs['prescription'] = 'Method is currently not supported'
else:
method = supported_methods[illness]
score = method(df, attributes[illness])
result_kwargs['risk_factor'] = float(score)
result_kwargs['label'] = get_group(score)
result_kwargs['prescription'] = get_prescription(score)
result_obj = Result.objects.update_or_create(
session_id=session_id, disease=disease,
defaults=result_kwargs
)
results.append(result_obj[0])
score = (1 - mean([res.risk_factor for res in results])) * 100
Score.objects.create(session_id=session_id, score=score)
| 34.764706 | 79 | 0.668359 | 0 | 0 | 0 | 0 | 1,497 | 0.844332 | 0 | 0 | 175 | 0.098703 |
7d8c64c1f1dba35610d7552ede42b4b2192a13c9 | 419 | py | Python | augur/routes/__init__.py | Nayan-Das/augur | 857f4a4e7d688fd54356aa0f546834071fbabbf2 | [
"MIT"
]
| 3 | 2019-10-31T19:07:48.000Z | 2019-11-20T23:14:15.000Z | augur/routes/__init__.py | Nayan-Das/augur | 857f4a4e7d688fd54356aa0f546834071fbabbf2 | [
"MIT"
]
| 3 | 2021-03-09T22:54:52.000Z | 2021-05-10T19:19:00.000Z | augur/routes/__init__.py | Nayan-Das/augur | 857f4a4e7d688fd54356aa0f546834071fbabbf2 | [
"MIT"
]
| 4 | 2019-11-05T20:22:12.000Z | 2019-12-12T18:08:30.000Z | import importlib
import os
import glob
from .user import create_user_routes
from .repo import create_repo_routes
from .broker import create_broker_routes
def create_routes(server):
create_user_routes(server)
create_repo_routes(server)
create_broker_routes(server)
# for plugin_name in server._augur._loaded_plugins:
# module = server._augur[plugin_name]
# module.create_routes(server)
| 26.1875 | 55 | 0.778043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.300716 |
7d8fe3a63259aba89e6864813dbcb43ee8122092 | 2,117 | py | Python | stests/chain/set_transfer_native.py | goral09/stests | 4de26485535cadf1b708188a7133a976536ccba3 | [
"Apache-2.0"
]
| 4 | 2020-03-10T15:28:17.000Z | 2021-10-02T11:41:17.000Z | stests/chain/set_transfer_native.py | goral09/stests | 4de26485535cadf1b708188a7133a976536ccba3 | [
"Apache-2.0"
]
| 1 | 2020-03-25T11:31:44.000Z | 2020-03-25T11:31:44.000Z | stests/chain/set_transfer_native.py | goral09/stests | 4de26485535cadf1b708188a7133a976536ccba3 | [
"Apache-2.0"
]
| 9 | 2020-02-25T18:43:42.000Z | 2021-08-10T17:08:42.000Z | import json
import random
import subprocess
from stests.core.logging import log_event
from stests.chain.utils import execute_cli
from stests.chain.utils import DeployDispatchInfo
from stests.core.types.chain import Account
from stests.core.types.infra import Network
from stests.core.types.infra import Node
from stests.core.utils import paths
from stests.events import EventType
# Method upon client to be invoked.
_CLIENT_METHOD = "transfer"
# Maximum value of a transfer ID.
_MAX_TRANSFER_ID = (2 ** 63) - 1
@execute_cli(_CLIENT_METHOD, EventType.WFLOW_DEPLOY_DISPATCH_FAILURE)
def execute(info: DeployDispatchInfo, cp2: Account, amount: int, verbose: bool = True) -> str:
"""Executes a transfer between 2 counter-parties & returns resulting deploy hash.
:param info: Standard information required to dispatch deploy.
:param cp2: Account information of counter party 2.
:param amount: Amount (in motes) to be transferred.
:param verbose: Flag inidcating whether event will be logged.
:returns: Dispatched deploy hash.
"""
binary_path = paths.get_path_to_client(info.network)
cp1 = info.dispatcher
cli_response = subprocess.run([
binary_path, _CLIENT_METHOD,
"--target-account", cp2.account_key,
"--amount", str(amount),
"--chain-name", info.network.chain_name,
"--gas-price", str(info.gas_price),
"--node-address", info.node_address,
"--payment-amount", str(info.fee),
"--secret-key", info.dispatcher.get_private_key_pem_filepath(),
"--transfer-id", str(random.randint(1, _MAX_TRANSFER_ID)),
"--ttl", str(info.time_to_live),
],
stdout=subprocess.PIPE,
)
deploy_hash = json.loads(cli_response.stdout)['result']['deploy_hash']
if verbose:
log_event(
EventType.WFLOW_DEPLOY_DISPATCHED,
f"{info.node.address} :: {deploy_hash} :: transfer (native) :: {amount} CSPR :: from {cp1.account_key[:8]} -> {cp2.account_key[:8]} ",
info.node,
deploy_hash=deploy_hash,
)
return deploy_hash
| 34.704918 | 146 | 0.687293 | 0 | 0 | 0 | 0 | 1,599 | 0.755314 | 0 | 0 | 731 | 0.3453 |
7d90aa90743d9451f50ce626438114785520c9d1 | 1,143 | py | Python | Binary Search Tree/235. Lowest Common Ancestor of a Binary Search Tree.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
]
| 138 | 2020-02-08T05:25:26.000Z | 2021-11-04T11:59:28.000Z | Binary Search Tree/235. Lowest Common Ancestor of a Binary Search Tree.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
]
| null | null | null | Binary Search Tree/235. Lowest Common Ancestor of a Binary Search Tree.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
]
| 24 | 2021-01-02T07:18:43.000Z | 2022-03-20T08:17:54.000Z | """
235. Lowest Common Ancestor of a Binary Search Tree
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
minn = min(p.val, q.val)
maxx = max(p.val,q.val)
while root.val < minn or root.val>maxx:
if root.val < minn: root = root.right
else: root = root.left
return root
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if (p.val-root.val)*(q.val-root.val) <= 0:
return root
elif p.val < root.val:
return self.lowestCommonAncestor(root.left,p,q)
else:
return self.lowestCommonAncestor(root.right,p,q)
| 28.575 | 61 | 0.523185 | 888 | 0.776903 | 0 | 0 | 0 | 0 | 0 | 0 | 483 | 0.422572 |
7d9246bc05b6e5994b39b6b9455b5e82dd240f3c | 3,494 | py | Python | waliki/acl.py | sckevmit/waliki | 5baaf6f043275920a1174ff233726f7ff4bfb5cf | [
"BSD-3-Clause"
]
| 324 | 2015-01-02T20:48:33.000Z | 2021-12-11T14:44:34.000Z | waliki/acl.py | sckevmit/waliki | 5baaf6f043275920a1174ff233726f7ff4bfb5cf | [
"BSD-3-Clause"
]
| 103 | 2015-01-02T03:01:34.000Z | 2020-04-02T19:03:53.000Z | waliki/acl.py | sckevmit/waliki | 5baaf6f043275920a1174ff233726f7ff4bfb5cf | [
"BSD-3-Clause"
]
| 84 | 2015-01-07T08:53:05.000Z | 2021-01-04T00:26:38.000Z | from functools import wraps
from collections import Iterable
from django.conf import settings
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six import string_types
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.shortcuts import resolve_url
from waliki.utils import is_authenticated
from .models import ACLRule
from .settings import (WALIKI_ANONYMOUS_USER_PERMISSIONS,
WALIKI_LOGGED_USER_PERMISSIONS,
WALIKI_RENDER_403)
def check_perms(perms, user, slug, raise_exception=False):
"""a helper user to check if a user has the permissions
for a given slug"""
if isinstance(perms, string_types):
perms = {perms}
else:
perms = set(perms)
allowed_users = ACLRule.get_users_for(perms, slug)
if allowed_users:
return user in allowed_users
if perms.issubset(set(WALIKI_ANONYMOUS_USER_PERMISSIONS)):
return True
if is_authenticated(user) and perms.issubset(set(WALIKI_LOGGED_USER_PERMISSIONS)):
return True
# First check if the user has the permission (even anon users)
if user.has_perms(['waliki.%s' % p for p in perms]):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
this is analog to django's builtin ``permission_required`` decorator, but
improved to check per slug ACLRules and default permissions for
anonymous and logged in users
if there is a rule affecting a slug, the user needs to be part of the
rule's allowed users. If there isn't a matching rule, defaults permissions
apply.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if check_perms(perms, request.user, kwargs['slug'], raise_exception=raise_exception):
return view_func(request, *args, **kwargs)
if is_authenticated(request.user):
if WALIKI_RENDER_403:
return render(request, 'waliki/403.html', kwargs, status=403)
else:
raise PermissionDenied
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(login_url or settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
| 39.704545 | 111 | 0.690326 | 0 | 0 | 0 | 0 | 1,349 | 0.38609 | 0 | 0 | 790 | 0.226102 |
7d9293e84f4a03376c976e40854cc463c3d0b2fe | 529 | py | Python | 2808.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
]
| 6 | 2021-04-13T00:33:43.000Z | 2022-02-10T10:23:59.000Z | 2808.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
]
| null | null | null | 2808.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
]
| 3 | 2021-03-23T18:42:24.000Z | 2022-02-10T10:24:07.000Z | def conv(s):
if s[0] == 'a': v = '1'
elif s[0] == 'b': v = '2'
elif s[0] == 'c': v = '3'
elif s[0] == 'd': v = '4'
elif s[0] == 'e': v = '5'
elif s[0] == 'f': v = '6'
elif s[0] == 'g': v = '7'
elif s[0] == 'h': v = '8'
v += s[1]
return v
e = str(input()).split()
a = conv(e[0])
b = conv(e[1])
ax = int(a[0])
ay = int(a[1])
bx = int(b[0])
by = int(b[1])
if (abs(ax - bx) == 1 and abs(ay - by) == 2) or (abs(ax - bx) == 2 and abs(ay - by) == 1):
print('VALIDO')
else: print('INVALIDO')
| 23 | 90 | 0.404537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.124764 |
7d92e1048d2857d5559e9d7bb1d06d56001488c0 | 4,095 | py | Python | RabiesRefNAAP_CLI.py | jiangweiyao/RabiesRefNAAP | bd10ca5d9b759381e09ecc25e1456370e94a0744 | [
"Apache-1.1"
]
| null | null | null | RabiesRefNAAP_CLI.py | jiangweiyao/RabiesRefNAAP | bd10ca5d9b759381e09ecc25e1456370e94a0744 | [
"Apache-1.1"
]
| null | null | null | RabiesRefNAAP_CLI.py | jiangweiyao/RabiesRefNAAP | bd10ca5d9b759381e09ecc25e1456370e94a0744 | [
"Apache-1.1"
]
| 1 | 2021-03-01T22:20:26.000Z | 2021-03-01T22:20:26.000Z | #!/usr/bin/env python
import sys
import os
import glob
import re
from datetime import date
import argparse
import subprocess
from pathlib import Path
def main():
local_path = os.path.dirname(os.path.realpath(__file__))
#print(local_path)
data_path = f"{local_path}"
scaffold_helper = f"{local_path}/scaffold_cutter.R"
gapfixer_helper = f"{local_path}/gapfixer.R"
now = date.today()
home = str(Path.home())
cli = argparse.ArgumentParser()
cli.add_argument('-i', '--InputFolder', help="Folder containing barcoded fastq", required=True)
cli.add_argument('-o', '--OutputFolder', help=f"Output Folder. Default is {home}/rabiesrefnaap_results/output_{now}", required=False, default=f"{home}/rabiesrefnaap_results/output_{now}")
cli.add_argument('--TopN', help="The top N reference sequences with the most depth are analyzed. Default is 1.", type=int, required=False, default=1)
cli.add_argument('--MinCov', help="Amplicon regions need a minimum of this average coverage number. Default is 5.", type=int, required=False, default=5)
cli.add_argument('--threads', help="Number of threads. More is faster if your computer supports it. Default is 4.", type=int, required=False, default=4)
cli.add_argument('--verbose', help = "Keep Intermediate Files. Default is false.", required=False, default=4)
cli.add_argument('--model', help="Basecall Model", required=False, type=str, default='r10_min_high_g303')
args = cli.parse_args()
#Run fastqc and multiqc on all the fastq/fastq.gz files in the folder
subprocess.check_output(['python', local_path+'/fastqc_multiqc.py', '-i', args.InputFolder, '-o', args.OutputFolder+'/multiqc'])
subprocess.check_output(['cp', args.OutputFolder+'/multiqc/multiqc_report.html', args.OutputFolder+'/multiqc_report.html'])
#Interate over all the fastq/fastq.gz files
files = sorted([f for f in glob.glob(args.InputFolder+"/**", recursive = True) if re.search(r'(.*)\.((fastq|fq)(|\.gz))$', f)])
print(files)
OutputFolder = os.path.expanduser(args.OutputFolder)
f = open(f"{args.OutputFolder}/coverage_summary.txt", "w")
f.writelines(["filename", "\t", "reads", "\t", "mapped", "\t", "ncov", "\t", "gcov", "\t", "avelength"])
f.flush()
for i in range(0, len(files)):
filec = files[i]
base = os.path.splitext(os.path.basename(filec))[0]
base = os.path.splitext(base)[0]
print(base)
filec2 = args.OutputFolder+'/'+"filtered/"+base+"_filtered.fastq"
#Trim and filter the reads
subprocess.check_output(['python', local_path+'/seqtk_sizefilter_trim.py', '-i', filec, '-o', filec2])
#Get coverage
subprocess.check_output(['python', local_path+'/rabiescoverage.py', '-i', filec2, '-o', args.OutputFolder+'/coverage/'+base+"_coverage/"+base+"_coverage.txt", '-t', str(args.threads)])
#subprocess.check_output(['cp', args.OutputFolder+'/coverage/'+base+"_coverage/"+base+"_coverage.txt", args.OutputFolder+'/'+base+"_coverage.txt"])
subprocess.Popen(['tail', '-n 1', args.OutputFolder+'/coverage/'+base+"_coverage/"+base+"_coverage.txt"], stdout=f)
f.write("\n")
f.flush()
#Get assembly
subprocess.check_output(['python', local_path+'/refnaap_cli.py', '-i', filec2, '-o', args.OutputFolder+'/assembly/'+base+"_assembly/", '-t', str(args.threads), '--TopN', str(args.TopN), '--MinCov', str(args.MinCov)])
subprocess.check_output(['cp', args.OutputFolder+'/assembly/'+base+"_assembly/final_scaffold.fasta", args.OutputFolder+"/"+base+"_final_scaffold.fasta"])
print("progress: {}/{}".format(i+1, len(files)))
f.close()
if not args.verbose:
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/coverage'])
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/assembly'])
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/filtered'])
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/multiqc'])
if __name__ == "__main__":
sys.exit(main())
| 47.616279 | 224 | 0.668132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,694 | 0.413675 |
7d93db8015155beda4e7ca3caccf0926ce883652 | 8,887 | py | Python | mtp_cashbook/apps/disbursements/tests/test_search.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-cashbook | d35a621e21631e577faacaeacb5ab9f883c9b4f4 | [
"MIT"
]
| 4 | 2016-01-05T12:21:39.000Z | 2016-12-22T15:56:37.000Z | mtp_cashbook/apps/disbursements/tests/test_search.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-cashbook | d35a621e21631e577faacaeacb5ab9f883c9b4f4 | [
"MIT"
]
| 132 | 2015-06-10T09:53:14.000Z | 2022-02-01T17:35:54.000Z | mtp_cashbook/apps/disbursements/tests/test_search.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-cashbook | d35a621e21631e577faacaeacb5ab9f883c9b4f4 | [
"MIT"
]
| 3 | 2015-07-07T14:40:33.000Z | 2021-04-11T06:20:14.000Z | import datetime
from django.test import SimpleTestCase
from django.urls import reverse
from django.utils.html import strip_tags
import responses
from cashbook.tests import MTPBaseTestCase, api_url
from disbursements.forms import SearchForm
class DisbursementSearchViewTextCase(MTPBaseTestCase):
user = {'username': 'abc123', 'first_name': 'A', 'last_name': 'User'}
@property
def url(self):
return reverse('disbursements:search')
def test_no_disbursements_response(self):
self.login()
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, api_url('/disbursements/?resolution=pending&limit=1',),
json={'count': 0, 'results': []}, match_querystring=True)
rsps.add(rsps.GET, api_url('/disbursements/',),
json={'count': 0, 'results': []})
response = self.client.get(self.url)
self.assertContains(response, 'No disbursements found')
form = response.context_data['form']
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['page'], 1)
self.assertEqual(form.cleaned_data['ordering'], '-created')
self.assertEqual(form.cleaned_data['date_filter'], 'confirmed')
content = response.content.decode()
self.assertNotIn('This service is currently unavailable', content)
def test_disbursements_listed(self):
self.login()
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, api_url('/disbursements/?resolution=pending&limit=1',),
json={'count': 1, 'results': []}, match_querystring=True)
rsps.add(rsps.GET, api_url('/disbursements/',),
json={'count': 1, 'results': [{
'id': 100, 'amount': 1250, 'invoice_number': 'PMD1000100',
'method': 'cheque', 'resolution': 'sent', 'nomis_transaction_id': '123-1',
'prisoner_name': 'JOHN HALLS', 'prisoner_number': 'A1409AE',
'recipient_is_company_name': False,
'recipient_first_name': 'FN', 'recipient_last_name': 'SN', 'recipient_email': '',
'address_line1': '102 Petty France', 'address_line2': '',
'city': 'London', 'postcode': 'SW1H 9AJ', 'country': 'UK',
'sort_code': '', 'account_number': '', 'roll_number': '',
'log_set': [{'action': 'created', 'created': '2018-01-10T08:00:00Z',
'user': self.user},
{'action': 'confirmed', 'created': '2018-01-10T09:00:00Z',
'user': self.user},
{'action': 'sent', 'created': '2018-01-10T10:00:00Z',
'user': self.user}],
}]})
response = self.client.get(self.url)
content = response.content.decode()
self.assertNotIn('This service is currently unavailable', content)
self.assertIn('Cheque', content)
self.assertNotIn('Bank transfer', content)
self.assertIn('Confirmed 10/01/2018', content)
self.assertIn('Sent to SSCL', content)
self.assertIn('PMD1000100', content)
self.assertIn('£12.50', content)
self.assertIn('123-1', content)
self.assertIn('JOHN HALLS', content)
self.assertIn('A1409AE', content)
self.assertIn('FN SN', content)
self.assertIn('102 Petty France', content)
self.assertIn('London', content)
self.assertIn('SW1H 9AJ', content)
self.assertIn('Page 1 of 1', content)
def test_disbursements_search(self):
self.login()
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, api_url('/disbursements/?offset=10&limit=10&ordering=-created&resolution=confirmed',),
match_querystring=True,
json={'count': 11, 'results': [{
'id': 99, 'amount': 25010, 'invoice_number': '1000099',
'method': 'bank_transfer', 'resolution': 'confirmed', 'nomis_transaction_id': None,
'prisoner_name': 'JOHN HALLS', 'prisoner_number': 'A1409AE',
'recipient_is_company_name': False,
'recipient_first_name': 'FN', 'recipient_last_name': 'SN', 'recipient_email': 'email@local',
'address_line1': '13 Place Vendôme', 'address_line2': '',
'city': 'Paris', 'postcode': '75001', 'country': 'France',
'sort_code': '000000', 'account_number': '1234567890', 'roll_number': '',
'log_set': [{'action': 'created', 'created': '2018-01-10T08:00:00Z',
'user': self.user},
{'action': 'confirmed', 'created': '2018-01-10T09:00:00Z',
'user': self.user}],
}]})
response = self.client.get(self.url + '?page=2&resolution=confirmed')
form = response.context_data['form']
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['page'], 2)
self.assertEqual(form.cleaned_data['resolution'], 'confirmed')
content = response.content.decode()
self.assertNotIn('This service is currently unavailable', content)
self.assertNotIn('Cheque', content)
self.assertIn('Account 1234567890', content)
self.assertIn('Confirmed 10/01/2018', content)
self.assertNotIn('Sent by SSCL', content)
self.assertNotIn('1000099', content)
self.assertIn('£250.10', content)
self.assertIn('France', content)
self.assertIn('00-00-00', content)
self.assertIn('1234567890', content)
self.assertIn('email@local', content)
self.assertIn('Page 2 of 2', content)
class DisbursementSearchFormTextCase(SimpleTestCase):
def test_blank_form_valid(self):
form = SearchForm(request=None, data={})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['page'], 1)
description = form.search_description
self.assertFalse(description['has_filters'])
self.assertIn('Showing all disbursements', description['description'])
def test_invalid_options(self):
form = SearchForm(request=None, data={
'page': 0,
'ordering': 'date',
'method': 'cash',
'resolution': 'preconfirmed',
})
self.assertFalse(form.is_valid())
errors = form.errors.as_data()
self.assertIn('page', errors)
self.assertIn('ordering', errors)
self.assertIn('method', errors)
self.assertIn('resolution', errors)
def test_date_options(self):
form = SearchForm(request=None, data={
'date_filter': 'created',
'date__gte': '10/1/18',
'date__lt': '11/01/2018',
})
self.assertTrue(form.is_valid())
query_params = form.get_api_request_params()
query_params.pop('resolution', None)
self.assertDictEqual(query_params, {
'ordering': '-created',
'log__action': 'created',
'logged_at__gte': datetime.date(2018, 1, 10),
'logged_at__lt': datetime.date(2018, 1, 12),
})
description = form.search_description
self.assertTrue(description['has_filters'])
self.assertIn('date entered between 10 Jan 2018 and 11 Jan 2018', strip_tags(description['description']))
form = SearchForm(request=None, data={
'ordering': '-amount',
'date_filter': 'confirmed',
'date__lt': '2018-01-10',
})
self.assertTrue(form.is_valid())
query_params = form.get_api_request_params()
query_params.pop('resolution', None)
self.assertDictEqual(query_params, {
'ordering': '-amount',
'log__action': 'confirmed',
'logged_at__lt': datetime.date(2018, 1, 11),
})
description = form.search_description
self.assertTrue(description['has_filters'])
self.assertIn('date confirmed before 10 Jan 2018', strip_tags(description['description']))
def test_invalid_date_options(self):
form = SearchForm(
request=None,
data={
'date_filter': 'created',
'date__gte': '11/01/18',
'date__lt': '10/01/2018',
}
)
self.assertFalse(form.is_valid())
errors = form.errors.as_data()
self.assertEqual(
[error.message for error in errors['date__lt']],
['Must be after the ‘from’ date']
)
| 47.271277 | 117 | 0.567008 | 8,647 | 0.972228 | 0 | 0 | 75 | 0.008433 | 0 | 0 | 2,713 | 0.305037 |
7d93e9d98b1bfee0032c7712ee1027aadf9abac0 | 620 | py | Python | pipelines/pipeline_util/graphite_extract_utility.py | MatMoore/app-performance-summary | e94c63c26dec5da39b8458b1e46bcc4f922ab7dc | [
"MIT"
]
| null | null | null | pipelines/pipeline_util/graphite_extract_utility.py | MatMoore/app-performance-summary | e94c63c26dec5da39b8458b1e46bcc4f922ab7dc | [
"MIT"
]
| 10 | 2018-03-05T17:56:11.000Z | 2018-03-13T16:50:51.000Z | pipelines/pipeline_util/graphite_extract_utility.py | MatMoore/app-performance-summary | e94c63c26dec5da39b8458b1e46bcc4f922ab7dc | [
"MIT"
]
| 1 | 2021-04-10T19:50:33.000Z | 2021-04-10T19:50:33.000Z | '''
Utility for extracting data from the graphite API
'''
import os
from urllib.parse import urlencode
import pandas as pd
class GraphiteExtractUtility:
def __init__(self, default_url):
url = os.environ.get('GRAPHITE_URL', default_url)
self.render_url = url + '/render/?format=csv'
def get_csv(self, params, metric_name='count'):
'''
Fetch a CSV from the graphite API
'''
query_params = urlencode(params)
full_url = self.render_url + '&' + query_params
return pd.read_csv(full_url, header=None, names=('timestamp', metric_name), usecols=(1,2))
| 28.181818 | 98 | 0.662903 | 494 | 0.796774 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.274194 |
7d953acfe0d26007513dac6a05f6317497155128 | 712 | py | Python | backend/streetsignup/migrations/0002_auto_20200901_1758.py | nicoepp/the-prayer-walk | 6c8217c33f399cfe46dc23075e13ca9464079cae | [
"MIT"
]
| null | null | null | backend/streetsignup/migrations/0002_auto_20200901_1758.py | nicoepp/the-prayer-walk | 6c8217c33f399cfe46dc23075e13ca9464079cae | [
"MIT"
]
| null | null | null | backend/streetsignup/migrations/0002_auto_20200901_1758.py | nicoepp/the-prayer-walk | 6c8217c33f399cfe46dc23075e13ca9464079cae | [
"MIT"
]
| null | null | null | # Generated by Django 3.1.1 on 2020-09-01 17:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('streetsignup', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='segment',
name='street',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='segments', to='streetsignup.street'),
),
migrations.AlterField(
model_name='subscription',
name='street',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='streetsignup.street'),
),
]
| 28.48 | 132 | 0.634831 | 586 | 0.823034 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.233146 |
7d9767476bcf26c64a3560357db2dd0c005504a9 | 9,830 | py | Python | deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
]
| 3,782 | 2016-02-21T03:53:11.000Z | 2022-03-31T16:10:26.000Z | deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
]
| 2,666 | 2016-02-11T01:54:54.000Z | 2022-03-31T11:14:33.000Z | deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
]
| 1,597 | 2016-02-21T03:10:08.000Z | 2022-03-30T13:21:28.000Z | """
Generate coulomb matrices for molecules.
See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
from deepchem.utils.data_utils import pad_array
from deepchem.feat.base_classes import MolecularFeaturizer
class CoulombMatrix(MolecularFeaturizer):
"""Calculate Coulomb matrices for molecules.
Coulomb matrices provide a representation of the electronic structure of
a molecule. For a molecule with `N` atoms, the Coulomb matrix is a
`N X N` matrix where each element gives the strength of the
electrostatic interaction between two atoms. The method is described
in more detail in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrix(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
upper_tri: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
upper_tri: bool, optional (default False)
Generate only upper triangle part of Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.upper_tri = upper_tri
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate Coulomb matrices for molecules. If extra randomized
matrices are generated, they are treated as if they are features
for additional conformers.
Since Coulomb matrices are symmetric, only the (flattened) upper
triangular portion is returned.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule.
The default shape is `(num_confs, max_atoms, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms, max_atoms)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
features = self.coulomb_matrix(datapoint)
if self.upper_tri:
features = [f[np.triu_indices_from(f)] for f in features]
features = np.asarray(features)
if features.shape[0] == 1:
# `(1, max_atoms, max_atoms)` -> `(max_atoms, max_atoms)`
features = np.squeeze(features, axis=0)
return features
def coulomb_matrix(self, mol: RDKitMol) -> np.ndarray:
"""
Generate Coulomb matrices for each conformer of the given molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
# Check whether num_confs >=1 or not
num_confs = len(mol.GetConformers())
if num_confs == 0:
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, AllChem.ETKDG())
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol)
n_atoms = mol.GetNumAtoms()
z = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
rval = []
for conf in mol.GetConformers():
d = self.get_interatomic_distances(conf)
m = np.outer(z, z) / d
m[range(n_atoms), range(n_atoms)] = 0.5 * np.array(z)**2.4
if self.randomize:
for random_m in self.randomize_coulomb_matrix(m):
random_m = pad_array(random_m, self.max_atoms)
rval.append(random_m)
else:
m = pad_array(m, self.max_atoms)
rval.append(m)
return np.asarray(rval)
def randomize_coulomb_matrix(self, m: np.ndarray) -> List[np.ndarray]:
"""Randomize a Coulomb matrix as decribed in [1]_:
1. Compute row norms for M in a vector row_norms.
2. Sample a zero-mean unit-variance noise vector e with dimension
equal to row_norms.
3. Permute the rows and columns of M with the permutation that
sorts row_norms + e.
Parameters
----------
m: np.ndarray
Coulomb matrix.
Returns
-------
List[np.ndarray]
List of the random coulomb matrix
References
----------
.. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003
"""
rval = []
row_norms = np.asarray([np.linalg.norm(row) for row in m], dtype=float)
rng = np.random.RandomState(self.seed)
for i in range(self.n_samples):
e = rng.normal(size=row_norms.size)
p = np.argsort(row_norms + e)
new = m[p][:, p] # permute rows first, then columns
rval.append(new)
return rval
@staticmethod
def get_interatomic_distances(conf: Any) -> np.ndarray:
"""
Get interatomic distances for atoms in a molecular conformer.
Parameters
----------
conf: rdkit.Chem.rdchem.Conformer
Molecule conformer.
Returns
-------
np.ndarray
The distances matrix for all atoms in a molecule
"""
n_atoms = conf.GetNumAtoms()
coords = [
# Convert AtomPositions from Angstrom to bohr (atomic units)
conf.GetAtomPosition(i).__idiv__(0.52917721092) for i in range(n_atoms)
]
d = np.zeros((n_atoms, n_atoms), dtype=float)
for i in range(n_atoms):
for j in range(i):
d[i, j] = coords[i].Distance(coords[j])
d[j, i] = d[i, j]
return d
class CoulombMatrixEig(CoulombMatrix):
"""Calculate the eigenvalues of Coulomb matrices for molecules.
This featurizer computes the eigenvalues of the Coulomb matrices for provided
molecules. Coulomb matrices are described in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues
are returned sorted by absolute value in descending order and padded
by max_atoms.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The eigenvalues of Coulomb matrix for molecules.
The default shape is `(num_confs, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms,)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
cmat = self.coulomb_matrix(datapoint)
features_list = []
for f in cmat:
w, v = np.linalg.eig(f)
w_abs = np.abs(w)
sortidx = np.argsort(w_abs)
sortidx = sortidx[::-1]
w = w[sortidx]
f = pad_array(w, self.max_atoms)
features_list.append(f)
features = np.asarray(features_list)
if features.shape[0] == 1:
# `(1, max_atoms)` -> `(max_atoms,)`
features = np.squeeze(features, axis=0)
return features
| 31.812298 | 88 | 0.653713 | 9,499 | 0.966131 | 0 | 0 | 737 | 0.074959 | 0 | 0 | 5,757 | 0.585537 |
7d9822ec626534a501f48b72a69df1f8b8c72c49 | 2,882 | py | Python | edk2toollib/uefi/edk2/fmp_payload_header.py | mikeytdisco/edk2-pytool-library | eab28cab8cf26f1018f7cbfac510a503444f0f0d | [
"BSD-2-Clause-Patent"
]
| 32 | 2019-06-28T06:04:30.000Z | 2022-03-11T10:44:44.000Z | edk2toollib/uefi/edk2/fmp_payload_header.py | mikeytdisco/edk2-pytool-library | eab28cab8cf26f1018f7cbfac510a503444f0f0d | [
"BSD-2-Clause-Patent"
]
| 107 | 2019-07-10T19:09:51.000Z | 2022-03-10T22:52:58.000Z | edk2toollib/uefi/edk2/fmp_payload_header.py | mikeytdisco/edk2-pytool-library | eab28cab8cf26f1018f7cbfac510a503444f0f0d | [
"BSD-2-Clause-Patent"
]
| 26 | 2019-07-24T03:27:14.000Z | 2022-03-11T10:44:49.000Z | ## @file
# Module that encodes and decodes a FMP_PAYLOAD_HEADER with a payload.
# The FMP_PAYLOAD_HEADER is processed by the FmpPayloadHeaderLib in the
# FmpDevicePkg.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
FmpPayloadHeader
'''
import struct
def _SIGNATURE_32(A, B, C, D):
return struct.unpack('=I', bytearray(A + B + C + D, 'ascii'))[0]
def _SIGNATURE_32_TO_STRING(Signature):
return struct.pack("<I", Signature).decode()
class FmpPayloadHeaderClass (object):
#
# typedef struct {
# UINT32 Signature;
# UINT32 HeaderSize;
# UINT32 FwVersion;
# UINT32 LowestSupportedVersion;
# } FMP_PAYLOAD_HEADER;
#
# #define FMP_PAYLOAD_HEADER_SIGNATURE SIGNATURE_32 ('M', 'S', 'S', '1')
#
_StructFormat = '<IIII'
_StructSize = struct.calcsize(_StructFormat)
_FMP_PAYLOAD_HEADER_SIGNATURE = _SIGNATURE_32('M', 'S', 'S', '1')
def __init__(self):
self.Signature = self._FMP_PAYLOAD_HEADER_SIGNATURE
self.HeaderSize = self._StructSize
self.FwVersion = 0x00000000
self.LowestSupportedVersion = 0x00000000
self.Payload = b''
def Encode(self):
FmpPayloadHeader = struct.pack(
self._StructFormat,
self.Signature,
self.HeaderSize,
self.FwVersion,
self.LowestSupportedVersion
)
return FmpPayloadHeader + self.Payload
def Decode(self, Buffer):
if len(Buffer) < self._StructSize:
raise ValueError
(Signature, HeaderSize, FwVersion, LowestSupportedVersion) = struct.unpack(
self._StructFormat,
Buffer[0:self._StructSize]
)
if Signature != self._FMP_PAYLOAD_HEADER_SIGNATURE:
raise ValueError
if HeaderSize < self._StructSize:
raise ValueError
self.Signature = Signature
self.HeaderSize = HeaderSize
self.FwVersion = FwVersion
self.LowestSupportedVersion = LowestSupportedVersion
self.Payload = Buffer[self.HeaderSize:]
return self.Payload
def DumpInfo(self):
print('FMP_PAYLOAD_HEADER.Signature = {Signature:08X} ({SignatureString})'
.format(Signature=self.Signature, SignatureString=_SIGNATURE_32_TO_STRING(self.Signature)))
print('FMP_PAYLOAD_HEADER.HeaderSize = {HeaderSize:08X}'.format(HeaderSize=self.HeaderSize))
print('FMP_PAYLOAD_HEADER.FwVersion = {FwVersion:08X}'.format(FwVersion=self.FwVersion))
print('FMP_PAYLOAD_HEADER.LowestSupportedVersion = {LowestSupportedVersion:08X}'
.format(LowestSupportedVersion=self.LowestSupportedVersion))
print('sizeof (Payload) = {Size:08X}'.format(Size=len(self.Payload)))
| 33.905882 | 112 | 0.651631 | 2,360 | 0.818876 | 0 | 0 | 0 | 0 | 0 | 0 | 890 | 0.308813 |
7d9846b8c90e6af12c68768b068248c24ba1f30a | 1,580 | py | Python | 21-fs-ias-lec/15-AudioLink/Testing.py | paultroeger/BACnet | 855b931f2a0e9b64e9571f41de2a8cd71d7a01f4 | [
"MIT"
]
| 8 | 2020-03-17T21:12:18.000Z | 2021-12-12T15:55:54.000Z | 21-fs-ias-lec/15-AudioLink/Testing.py | paultroeger/BACnet | 855b931f2a0e9b64e9571f41de2a8cd71d7a01f4 | [
"MIT"
]
| 2 | 2021-07-19T06:18:43.000Z | 2022-02-10T12:17:58.000Z | 21-fs-ias-lec/15-AudioLink/Testing.py | paultroeger/BACnet | 855b931f2a0e9b64e9571f41de2a8cd71d7a01f4 | [
"MIT"
]
| 25 | 2020-03-20T09:32:45.000Z | 2021-07-18T18:12:59.000Z | from Sender import Sender
from Receiver import Receiver
import scipy
import numpy as np
import scipy.io
import scipy.io.wavfile
import matplotlib.pyplot as plt
from scipy import signal
def readFromFile(path):
file = open(path, "rb")
data = file.read()
file.close()
return data
def readWav(file_name) -> np.ndarray:
rate, data = scipy.io.wavfile.read(file_name)
if data.dtype == np.int16:
return data.astype(np.float32, order='C') / 32768.0
return data
testData = readWav('testbitsnopilots.wav')
subset = readWav('wrongbitstest.wav')
r = Receiver()
rate = 160
corr = 235292
offset = r.findOffsetToFirstChange(testData)
truncated = r.truncateToTauS(testData, offset)
plt.plot(testData[corr - len(subset)//2:corr + len(subset)//2])
plt.show()
plt.plot(subset)
plt.show()
plt.plot(truncated)
plt.show()
demod = r.demodulate(truncated, 1/16, 1/40)
result = []
start = 0
for i in range(20):
if i == 2:
a = 5
plt.plot(truncated[start: start + 10 * 36 * 160])
plt.show
a = 6
#part_demod = r.demodulate(truncated[start: start + 10*36 * 160], 1/16, 1/40)
#result.append(list(r.repdecode(part_demod, 10)))
start = start + 10*36*160
print('result', result)
print(demod)
print(len(demod[1:]))
print(repdecode(demod[1:], 10))
sender = Sender()
demod = repdecode(demod, 10)
expected = sender.getTestDataAsBits()
error_sum = np.sum(np.abs(expected - demod))
print('error sum', error_sum)
print('error weight', np.sum(expected - demod))
print('error percentage', error_sum / len(expected) * 100) | 21.944444 | 81 | 0.68038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.142405 |
7d984b4f33bcef674a43431532ba484ab9af642d | 615 | py | Python | suppress.py | j0hntv/suppress | eea5dbdb904e67abdc792fd946ab51f4d550734f | [
"MIT"
]
| null | null | null | suppress.py | j0hntv/suppress | eea5dbdb904e67abdc792fd946ab51f4d550734f | [
"MIT"
]
| null | null | null | suppress.py | j0hntv/suppress | eea5dbdb904e67abdc792fd946ab51f4d550734f | [
"MIT"
]
| null | null | null | """A simple wrapper around contextlib.suppress"""
import contextlib
from functools import wraps
__version__ = "0.1.1"
def suppress(*exceptions):
def wrap(func):
@wraps(func)
def inner(*args, **kwargs):
with contextlib.suppress(exceptions):
return func(*args, **kwargs)
return inner
return wrap
def async_suppress(*exceptions):
def wrap(func):
@wraps(func)
async def inner(*args, **kwargs):
with contextlib.suppress(exceptions):
return await func(*args, **kwargs)
return inner
return wrap
| 21.964286 | 50 | 0.604878 | 0 | 0 | 0 | 0 | 298 | 0.484553 | 134 | 0.217886 | 56 | 0.091057 |
7d9928a0889c40b5a6ffd1d19e7ea9f5236cde32 | 7,015 | py | Python | anaconda_project/requirements_registry/requirements/conda_env.py | vertingo/Anaconda_Videos_Tutos | f30f2a0549a7b81c17f4d5d249edc59eb3c05458 | [
"BSD-3-Clause"
]
| null | null | null | anaconda_project/requirements_registry/requirements/conda_env.py | vertingo/Anaconda_Videos_Tutos | f30f2a0549a7b81c17f4d5d249edc59eb3c05458 | [
"BSD-3-Clause"
]
| null | null | null | anaconda_project/requirements_registry/requirements/conda_env.py | vertingo/Anaconda_Videos_Tutos | f30f2a0549a7b81c17f4d5d249edc59eb3c05458 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""Conda-env-related requirements."""
from __future__ import absolute_import, print_function
from os.path import join
from anaconda_project.requirements_registry.requirement import EnvVarRequirement, RequirementStatus
from anaconda_project.conda_manager import new_conda_manager, CondaManagerError
from anaconda_project.internal import conda_api
class CondaEnvRequirement(EnvVarRequirement):
"""A requirement for CONDA_PREFIX to point to a conda env."""
_provider_class_name = 'CondaEnvProvider'
def __init__(self, registry, env_specs=None, env_var=None):
"""Extend superclass to default to CONDA_PREFIX and carry environment information.
Args:
registry (RequirementsRegistry): plugin registry
env_specs (dict): dict from env name to ``CondaEnvironment``
"""
if env_var is None:
env_var = conda_api.conda_prefix_variable()
super(CondaEnvRequirement, self).__init__(registry=registry, env_var=env_var)
self.env_specs = env_specs
self._conda = new_conda_manager()
@property
def title(self):
"""Override superclass to provide our title."""
return "A Conda environment"
@property
def description(self):
"""Override superclass to provide our description."""
return "The project needs a Conda environment containing all required packages."
@property
def ignore_patterns(self):
"""Override superclass with our ignore patterns."""
return set(['/envs/'])
def _status_from_analysis(self, environ, local_state_file, analysis):
config = analysis.config
assert 'source' in config
assert config['source'] != 'default'
assert config['source'] != 'unset'
prefix = None
if 'value' in config and config['source'] in ('variables', 'project', 'inherited', 'environ'):
prefix = config['value']
assert prefix is not None
env_name = config.get('env_name', None)
if env_name is not None:
environment_spec = self.env_specs[env_name]
try:
deviations = self._conda.find_environment_deviations(prefix, environment_spec)
if not deviations.ok:
return (False, deviations.summary)
except CondaManagerError as e:
return (False, str(e))
current_env_setting = environ.get(self.env_var, None)
if current_env_setting is None:
# this is our vaguest / least-descriptionful message so only if we didn't do better above
return (False, "%s is not set." % self.env_var)
else:
return (True, "Using Conda environment %s." % prefix)
def check_status(self, environ, local_state_file, default_env_spec_name, overrides, latest_provide_result=None):
"""Override superclass to get our status."""
return self._create_status_from_analysis(environ,
local_state_file,
default_env_spec_name,
overrides=overrides,
provider_class_name=self._provider_class_name,
status_getter=self._status_from_analysis,
latest_provide_result=latest_provide_result)
class CondaBootstrapEnvRequirement(CondaEnvRequirement):
"""A requirement for CONDA_PREFIX to point to a conda env."""
_provider_class_name = 'CondaBootstrapEnvProvider'
def __init__(self, registry, env_specs=None):
"""Extend superclass to default to CONDA_PREFIX and carry environment information.
Args:
registry (RequirementsRegistry): plugin registry
env_specs (dict): dict from env name to ``CondaEnvironment``
"""
super(CondaBootstrapEnvRequirement, self).__init__(registry=registry, env_var="BOOTSTRAP_ENV_PREFIX")
self.env_specs = env_specs
self._conda = new_conda_manager()
@property
def description(self):
"""Override superclass to provide our description."""
return "The project needs a Conda bootstrap environment containing all required packages."
@property
def title(self):
"""Override superclass to provide our title."""
return "Anaconda-project bootstrap environment"
def _status_from_analysis(self, environ, local_state_file, analysis):
config = analysis.config
assert 'source' in config
# we expect the bootstrap env to not be the env running the cmd
assert config['source'] in ['unset', 'environ', 'project']
env_name = 'bootstrap-env'
prefix = join(environ['PROJECT_DIR'], 'envs', env_name)
if config['source'] == 'environ':
assert config['value'] == prefix
environment_spec = self.env_specs[env_name]
try:
deviations = self._conda.find_environment_deviations(prefix, environment_spec)
if not deviations.ok:
return (False, deviations.summary)
except CondaManagerError as e:
return (False, str(e))
current_env_setting = environ.get(self.env_var, None)
if current_env_setting is None:
# this is our vaguest / least-descriptionful message so only if we didn't do better above
return (False, "%s is not set." % self.env_var)
else:
return (True, "Using Conda environment %s." % prefix)
def _create_status_from_analysis(self, environ, local_state_file, default_env_spec_name, overrides,
latest_provide_result, provider_class_name, status_getter):
provider = self.registry.find_provider_by_class_name(provider_class_name)
analysis = provider.analyze(self, environ, local_state_file, default_env_spec_name, overrides)
(has_been_provided, status_description) = status_getter(environ, local_state_file, analysis)
# hardcode bootstrap env name since it's a very especial case
env_spec_name = 'bootstrap-env'
return RequirementStatus(self,
has_been_provided=has_been_provided,
status_description=status_description,
provider=provider,
analysis=analysis,
latest_provide_result=latest_provide_result,
env_spec_name=env_spec_name)
| 41.264706 | 116 | 0.623236 | 6,282 | 0.89551 | 0 | 0 | 780 | 0.11119 | 0 | 0 | 2,222 | 0.31675 |
7d9a43e7079b4241b2e56a68cd01b2edf6c43289 | 1,697 | py | Python | data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c | [
"Apache-2.0"
]
| null | null | null | data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c | [
"Apache-2.0"
]
| null | null | null | data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c | [
"Apache-2.0"
]
| null | null | null | # Copyright 2020 Hieu Nguyen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from glob import glob
from PIL import Image
from torch.utils.data import Dataset
from ..transforms import get_transforms
from .build import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class KodakDataset(Dataset):
def __init__(self, data_folder, mode, cfg, **kwargs):
"""
"""
super().__init__()
self.cfg = cfg
self.paths = sorted(glob(f"{data_folder}/*"))
print(f"There are {len(self)} image in {mode} dataset")
self.transforms = get_transforms(cfg, mode)
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
"""
"""
path = self.paths[idx]
image_id = os.path.split(path)[-1].replace(".png", "")
img = self._load_img(idx)
img = self.transforms(img)
return image_id, img
def _load_img(self, idx):
"""
args: image path
return: pillow image
"""
image = Image.open(self.paths[idx]).convert('RGB')
return image
| 26.936508 | 80 | 0.61815 | 832 | 0.490277 | 0 | 0 | 861 | 0.507366 | 0 | 0 | 820 | 0.483206 |
7d9a756d138cef5d7f938318a3b5d1bd98451587 | 1,055 | py | Python | ohs/domain/create_component.py | codejsha/infrastructure | 01ff58fea0a7980fce30e37cb02a7c1217c46d9f | [
"Apache-2.0"
]
| 4 | 2021-02-13T03:39:38.000Z | 2022-01-30T19:41:43.000Z | ohs/domain/create_component.py | codejsha/infrastructure | 01ff58fea0a7980fce30e37cb02a7c1217c46d9f | [
"Apache-2.0"
]
| null | null | null | ohs/domain/create_component.py | codejsha/infrastructure | 01ff58fea0a7980fce30e37cb02a7c1217c46d9f | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
domain_home = os.environ['DOMAIN_HOME']
node_manager_name = os.environ['NODE_MANAGER_NAME']
component_name = os.environ['COMPONENT_NAME']
component_admin_listen_address = os.environ['COMPONENT_ADMIN_LISTEN_ADDRESS']
component_admin_listen_port = os.environ['COMPONENT_ADMIN_LISTEN_PORT']
component_listen_address = os.environ['COMPONENT_LISTEN_ADDRESS']
component_listen_port = os.environ['COMPONENT_LISTEN_PORT']
component_ssl_listen_port = os.environ['COMPONENT_SSL_LISTEN_PORT']
######################################################################
readDomain(domain_home)
cd('/')
create(component_name, 'SystemComponent')
cd('/SystemComponent/' + component_name)
cmo.setComponentType('OHS')
set('Machine', node_manager_name)
cd('/OHS/' + component_name)
cmo.setAdminHost(component_admin_listen_address)
cmo.setAdminPort(component_admin_listen_port)
cmo.setListenAddress(component_listen_address)
cmo.setListenPort(component_listen_port)
cmo.setSSLListenPort(component_ssl_listen_port)
updateDomain()
closeDomain()
exit()
| 31.969697 | 77 | 0.777251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.318483 |
7d9ad66a69e3d43361db2e0fdcc4e1f1ce926057 | 1,965 | py | Python | ironicclient/tests/functional/test_driver.py | sapcc/python-ironicclient | 8dcbf5b6d0bc2c2dc3881dbc557e2e403e2fe2b4 | [
"Apache-2.0"
]
| null | null | null | ironicclient/tests/functional/test_driver.py | sapcc/python-ironicclient | 8dcbf5b6d0bc2c2dc3881dbc557e2e403e2fe2b4 | [
"Apache-2.0"
]
| null | null | null | ironicclient/tests/functional/test_driver.py | sapcc/python-ironicclient | 8dcbf5b6d0bc2c2dc3881dbc557e2e403e2fe2b4 | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.tests.functional import base
class DriverSanityTestIronicClient(base.FunctionalTestBase):
"""Sanity tests for testing actions with driver.
Smoke test for the Ironic CLI commands which checks basic actions with
driver command like driver-show, driver-properties.
"""
def test_driver_show(self):
"""Test steps:
1) get drivers names
2) check that each driver exists in driver-show output
"""
drivers_names = self.get_drivers_names()
for driver in drivers_names:
driver_show = self.show_driver(driver)
self.assertEqual(driver, driver_show['name'])
def test_driver_properties(self):
"""Test steps:
1) get drivers names
2) check that each driver has some properties
"""
drivers_names = self.get_drivers_names()
for driver in drivers_names:
driver_properties = self.properties_driver(driver)
self.assertNotEqual([], [x['Property'] for x in driver_properties])
def test_driver_list(self):
"""Test steps:
1) get list of drivers
2) check that list of drivers is not empty
"""
driver = 'fake-hardware'
available_drivers = self.get_drivers_names()
self.assertGreater(len(available_drivers), 0)
self.assertIn(driver, available_drivers)
| 34.473684 | 79 | 0.686514 | 1,336 | 0.679898 | 0 | 0 | 0 | 0 | 0 | 0 | 1,122 | 0.570992 |
7d9bd1161fcdf87364f5ca0317aac04cfac291b2 | 380 | py | Python | hw2/2.3 - list.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
]
| null | null | null | hw2/2.3 - list.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
]
| 40 | 2021-12-30T15:57:10.000Z | 2022-01-26T16:44:24.000Z | hw2/2.3 - list.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
]
| 1 | 2022-03-12T19:17:26.000Z | 2022-03-12T19:17:26.000Z | # https://github.com/ArtemNikolaev/gb-hw/issues/18
seasons = [
'ЗИМА',
'ВЕСНА',
'ЛЕТО',
'ОСЕНЬ'
]
month = int(input('Введите номер месяца: '))
if month < 1 or month > 12:
print('Месяцев всего 12. Поэтому минимальное значение - 1, а максимальное - 12')
else:
seasonInt = (month % 12) // 3
print('Сезон выбранного тобой месяца: ' + seasons[seasonInt]) | 23.75 | 84 | 0.634211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 319 | 0.647059 |
7d9be08030c54e953623ba6d26f1efa4c9f9a3bb | 414 | py | Python | modoboa/admin/signals.py | vinaebizs/modoboa | fb1e7f4c023b7eb6be3aa77174bfa12fc653670e | [
"0BSD"
]
| null | null | null | modoboa/admin/signals.py | vinaebizs/modoboa | fb1e7f4c023b7eb6be3aa77174bfa12fc653670e | [
"0BSD"
]
| null | null | null | modoboa/admin/signals.py | vinaebizs/modoboa | fb1e7f4c023b7eb6be3aa77174bfa12fc653670e | [
"0BSD"
]
| null | null | null | """Modoboa admin signals."""
import django.dispatch
use_external_recipients = django.dispatch.Signal(providing_args=["recipients"])
extra_domain_actions = django.dispatch.Signal(
providing_args=["user", "domain"])
extra_domain_dashboard_widgets = django.dispatch.Signal(
providing_args=["user", "domain"])
extra_account_dashboard_widgets = django.dispatch.Signal(
providing_args=["user", "account"])
| 34.5 | 79 | 0.772947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.200483 |
7d9c78ce7d3a0631fc266360f9979634e2fb0ff2 | 1,401 | py | Python | psono/restapi/tests/health_check.py | psono/psono-fileserver | 537fd392ea9b50807451dbb814266dfeed8c783b | [
"Apache-2.0"
]
| 2 | 2020-02-12T15:10:02.000Z | 2021-07-02T18:35:34.000Z | psono/restapi/tests/health_check.py | psono/psono-fileserver | 537fd392ea9b50807451dbb814266dfeed8c783b | [
"Apache-2.0"
]
| 2 | 2019-10-29T18:59:26.000Z | 2019-12-28T15:43:19.000Z | psono/restapi/tests/health_check.py | psono/psono-fileserver | 537fd392ea9b50807451dbb814266dfeed8c783b | [
"Apache-2.0"
]
| 4 | 2019-10-04T00:41:27.000Z | 2021-04-28T13:25:37.000Z | from django.urls import reverse
from rest_framework import status
from .base import APITestCaseExtended
from mock import patch
from restapi import models
class HealthCheckTest(APITestCaseExtended):
"""
Test for health check
"""
def test_put_healthcheckn(self):
"""
Tests PUT method on healthcheck
"""
url = reverse('healthcheck')
data = {}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_post_healthcheckn(self):
"""
Tests POST method on healthcheck
"""
url = reverse('healthcheck')
data = {}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_healthcheckn(self):
"""
Tests DELETE method on healthcheck
"""
url = reverse('healthcheck')
data = {}
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_get_healthcheckn(self):
"""
Tests GET method on healthcheck
"""
url = reverse('healthcheck')
data = {}
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 20.910448 | 82 | 0.631692 | 1,241 | 0.885796 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.223412 |
7d9d90a49a7ce7f5c4dc585757591fb9e4a928b7 | 1,217 | py | Python | conftest.py | elijahr/python-portaudio | 8434396cf7a9faa8934cab289749daf08b04d0b3 | [
"MIT"
]
| null | null | null | conftest.py | elijahr/python-portaudio | 8434396cf7a9faa8934cab289749daf08b04d0b3 | [
"MIT"
]
| null | null | null | conftest.py | elijahr/python-portaudio | 8434396cf7a9faa8934cab289749daf08b04d0b3 | [
"MIT"
]
| null | null | null | import asyncio
import contextlib
import glob
import itertools
import logging
import os
import pytest
import uvloop
try:
import tracemalloc
tracemalloc.start()
except ImportError:
# Not available in pypy
pass
# clear compiled cython tests
for path in itertools.chain(
glob.glob(os.path.join('tests', '*.so')),
glob.glob(os.path.join('tests', '*.c'))):
os.unlink(path)
@pytest.fixture(params=[
asyncio,
uvloop
])
def loop_mod(request):
return request.param
@pytest.fixture(autouse=True)
def foo():
print('TEST IS', os.environ.get('PYTEST_CURRENT_TEST'))
def event_loop(loop_mod):
loop = loop_mod.new_event_loop()
asyncio.set_event_loop(loop)
if loop_mod != uvloop:
# uvloop in debug mode calls extract_stack, which results in "ValueError: call stack is not deep enough"
# for Cython code
loop.set_debug(True)
with contextlib.closing(loop):
yield loop
def pytest_configure(config):
if config.getoption('verbose') > 0:
h = logging.StreamHandler()
h.setLevel(logging.DEBUG)
logger = logging.getLogger('portaudio')
logger.addHandler(h)
logger.setLevel(logging.DEBUG) | 21.350877 | 112 | 0.676253 | 0 | 0 | 344 | 0.282662 | 199 | 0.163517 | 0 | 0 | 248 | 0.20378 |
7d9edb01d9ce450078aba93d6df890971eee58cc | 3,297 | py | Python | tests/test_storage.py | angru/datamodel | d242b393970dac1a8a53603454ed870fe70b27cf | [
"MIT"
]
| 2 | 2020-06-17T21:00:09.000Z | 2020-07-07T15:49:00.000Z | tests/test_storage.py | angru/datamodel | d242b393970dac1a8a53603454ed870fe70b27cf | [
"MIT"
]
| 14 | 2020-06-17T14:39:19.000Z | 2020-12-25T17:05:43.000Z | tests/test_storage.py | angru/corm | d242b393970dac1a8a53603454ed870fe70b27cf | [
"MIT"
]
| null | null | null | from corm import Entity, Field, Storage, RelationType
def test_add_by_primary_key():
class User(Entity):
id: int = Field(pk=True)
storage = Storage()
john = User(
data={'id': 1},
storage=storage,
)
assert storage.get(User.id, 1) == john
def test_make_relation():
class User(Entity):
id: int
class Address(Entity):
id: int
storage = Storage()
user = User(data={'id': 1}, storage=storage)
address1 = Address(data={'id': 1}, storage=storage)
address2 = Address(data={'id': 2}, storage=storage)
storage.make_relation(
from_=user,
to_=address1,
relation_type=RelationType.RELATED,
)
storage.make_relation(
from_=user,
to_=address2,
relation_type=RelationType.CHILD,
)
assert storage.get_one_related_entity(
user,
Address,
RelationType.RELATED,
) == address1
assert storage.get_related_entities(
user,
Address,
RelationType.RELATED,
) == [address1]
def test_remove_relation():
class User(Entity):
id: int
class Address(Entity):
id: int
storage = Storage()
user = User(data={'id': 1}, storage=storage)
address1 = Address(data={'id': 1}, storage=storage)
address2 = Address(data={'id': 2}, storage=storage)
address3 = Address(data={'id': 3}, storage=storage)
storage.make_relation(
from_=user,
to_=address1,
relation_type=RelationType.RELATED,
)
storage.make_relation(
from_=user,
to_=address2,
relation_type=RelationType.RELATED,
)
storage.make_relation(
from_=user,
to_=address3,
relation_type=RelationType.PARENT,
)
assert storage.get_related_entities(
user,
Address,
RelationType.RELATED,
) == [
address1,
address2,
]
storage.remove_relation(user, address1, RelationType.RELATED)
assert storage.get_related_entities(
user,
Address,
RelationType.RELATED,
) == [address2]
def test_remove_relations():
class User(Entity):
id: int
class Address(Entity):
id: int
storage = Storage()
user = User(data={'id': 1}, storage=storage)
address1 = Address(data={'id': 1}, storage=storage)
address2 = Address(data={'id': 2}, storage=storage)
address3 = Address(data={'id': 3}, storage=storage)
storage.make_relation(
from_=user,
to_=address1,
relation_type=RelationType.RELATED,
)
storage.make_relation(
from_=user,
to_=address2,
relation_type=RelationType.RELATED,
)
storage.make_relation(
from_=user,
to_=address3,
relation_type=RelationType.PARENT,
)
assert storage.get_related_entities(
user,
Address,
RelationType.RELATED,
) == [
address1,
address2,
]
storage.remove_relations(user, Address, RelationType.RELATED)
assert storage.get_related_entities(
user,
Address,
RelationType.RELATED,
) == []
assert storage.get_related_entities(
user,
Address,
RelationType.PARENT,
) == [address3]
| 22.127517 | 65 | 0.59721 | 271 | 0.082196 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.014559 |
7da0f8191abd59b72b6876b877822726d97f2ede | 2,268 | py | Python | server/test/test_serverInfoAPI.py | rmetcalf9/VirtualPresencePicture | 4822d2dac0be18d0da30bab9a4f7a8b34091799e | [
"MIT"
]
| null | null | null | server/test/test_serverInfoAPI.py | rmetcalf9/VirtualPresencePicture | 4822d2dac0be18d0da30bab9a4f7a8b34091799e | [
"MIT"
]
| null | null | null | server/test/test_serverInfoAPI.py | rmetcalf9/VirtualPresencePicture | 4822d2dac0be18d0da30bab9a4f7a8b34091799e | [
"MIT"
]
| null | null | null | from TestHelperSuperClass import testHelperAPIClient, env
import unittest
import json
from appObj import appObj
import pytz
import datetime
serverInfoWithoutAnyPictures = {
'Server': {
'Version': env['APIAPP_VERSION']
},
'Pictures': []
}
samplePictureIdentifier = 'ABC123'
samplePictureContent = { 'SomeContent': 'abc' }
serverInfoWithSamplePictureContent = {
'Server': {
'Version': env['APIAPP_VERSION']
},
'Pictures': [{
'Identifier': samplePictureIdentifier,
'Expires': "2018-11-22T14:16:00+00:00",
'Contents': samplePictureContent
}]
}
class test_api(testHelperAPIClient):
def test_getServerInfo(self):
result = self.testClient.get('/api/serverinfo/')
self.assertEqual(result.status_code, 200)
resultJSON = json.loads(result.get_data(as_text=True))
self.assertJSONStringsEqual(resultJSON, serverInfoWithoutAnyPictures)
def test_swaggerJSONProperlyShared(self):
result = self.testClient.get('/api/swagger.json')
self.assertEqual(result.status_code, 200)
result = self.testClient.get('/apidocs/swagger.json')
self.assertEqual(result.status_code, 200)
def test_getAddPicture(self):
appObj.setTestingDateTime(pytz.timezone('Europe/London').localize(datetime.datetime(2018,11,22,13,46,0,0)))
result = self.testClient.get('/api/serverinfo/')
self.assertEqual(result.status_code, 200)
resultJSON = json.loads(result.get_data(as_text=True))
self.assertJSONStringsEqual(resultJSON, serverInfoWithoutAnyPictures)
result = self.testClient.post('/api/serverinfo/' + samplePictureIdentifier, json=samplePictureContent)
self.assertEqual(result.status_code, 201)
resultJSON = json.loads(result.get_data(as_text=True))
self.assertJSONStringsEqual(resultJSON, serverInfoWithSamplePictureContent)
def test_getAddedPictureExpires(self):
self.test_getAddPicture()
appObj.setTestingDateTime(pytz.timezone('Europe/London').localize(datetime.datetime(2018,11,22,14,36,0,0)))
result = self.testClient.get('/api/serverinfo/')
self.assertEqual(result.status_code, 200)
resultJSON = json.loads(result.get_data(as_text=True))
self.assertJSONStringsEqual(resultJSON, serverInfoWithoutAnyPictures)
| 36 | 111 | 0.738536 | 1,640 | 0.723104 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.138448 |
7da3966430bc2a6549730b528f313eb6f4d29793 | 7,990 | py | Python | zp_database/make_zp/create_hard_xray_zp.py | sajid-ali-nu/zone_plate_testing | c50afd575a6e733fce265db2ab8cc1c7b21cfe69 | [
"MIT"
]
| null | null | null | zp_database/make_zp/create_hard_xray_zp.py | sajid-ali-nu/zone_plate_testing | c50afd575a6e733fce265db2ab8cc1c7b21cfe69 | [
"MIT"
]
| null | null | null | zp_database/make_zp/create_hard_xray_zp.py | sajid-ali-nu/zone_plate_testing | c50afd575a6e733fce265db2ab8cc1c7b21cfe69 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# coding: utf-8
# This script generates a zone plate pattern (based on partial filling) given the material, energy, grid size and number of zones as input
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from numba import njit
from joblib import Parallel, delayed
from tqdm import tqdm, trange
import urllib,os,pickle
from os.path import dirname as up
# Importing all the required libraries. Numba is used to optimize functions.
# In[2]:
def repeat_pattern(X,Y,Z):
flag_ = np.where((X>0)&(Y>0))
flag1 = np.where((X>0)&(Y<0))
flag1 = tuple((flag1[0][::-1],flag1[1]))
Z[flag1] = Z[flag_]
flag2 = np.where((X<0)&(Y>0))
flag2 = tuple((flag2[0],flag2[1][::-1]))
Z[flag2] = Z[flag_]
flag3 = np.where((X<0)&(Y<0))
flag3 = tuple((flag3[0][::-1],flag3[1][::-1]))
Z[flag3] = Z[flag_]
return Z
# *repeat_pattern* : produces the zone plate pattern given the pattern in only one quadrant(X,Y>0) as input.
# * *Inputs* : X and Y grid denoting the coordinates and Z containing the pattern in one quadrant.
# * *Outputs* : Z itself is modified to reflect the repition.
# In[3]:
def get_property(mat,energy):
url = "http://henke.lbl.gov/cgi-bin/pert_cgi.pl"
data = {'Element':str(mat), 'Energy':str(energy), 'submit':'Submit Query'}
data = urllib.parse.urlencode(data)
data = data.encode('utf-8')
req = urllib.request.Request(url, data)
resp = urllib.request.urlopen(req)
respDat = resp.read()
response = respDat.split()
d = b'g/cm^3<li>Delta'
i = response.index(d)
delta = str(response[i+2])[:str(response[i+2]).index('<li>Beta')][2:]
beta = str(response[i+4])[2:-1]
return float(delta),float(beta)
# *get_property* : gets delta and beta for a given material at the specified energy from Henke et al.
# * *Inputs* : mat - material, energy - energy in eV
# * *Outputs* : delta, beta
# In[4]:
@njit # equivalent to "jit(nopython=True)".
def partial_fill(x,y,step,r1,r2,n):
x_ = np.linspace(x-step/2,x+step/2,n)
y_ = np.linspace(y-step/2,y+step/2,n)
cnts = 0
for i in range(n):
for j in range(n):
z = (x_[i] * x_[i] + y_[j] * y_[j])
if r1*r1 < z < r2*r2:
cnts += 1
fill_factor = cnts/(n*n)
return fill_factor
# *partial_fill* : workhorse function for determining the fill pattern. This function is thus used in a loop. njit is used to optimize the function.
# * *Inputs* : x,y - coordinates of the point, step - step size, r1,r2 - inner and outer radii of ring, n - resolution
# * *Outputs* : fill_factor - value of the pixel based on amount of ring passing through it
# In[5]:
#find the radius of the nth zone
def zone_radius(n,f,wavel):
return np.sqrt(n*wavel*f + ((n*wavel)/2)**2)
# *zone_radius* : functon to find the radius of a zone given the zone number and wavelength
# * *Inputs* : n - zone number, f - focal length, wavel - wavelength
# * *Outputs* : radius of the zone as specified by the inputs
# In[6]:
def make_quadrant(X,Y,flag,r1,r2,step,n,zone_number):
z = np.zeros(np.shape(X))
Z = np.sqrt(X**2+Y**2)
for l in range(len(flag[0])):
i = flag[0][l]
j = flag[1][l]
if 0.75*r1< Z[i][j] < 1.25*r2:
x1 = X[i][j]
y1 = Y[i][j]
z[i][j] = partial_fill(x1,y1,step,r1,r2,n)
z[tuple((flag[1],flag[0]))] = z[tuple((flag[0],flag[1]))]
return z
# *make_quadrant* : function used to create a quadrant of a ring given the inner and outer radius and zone number
# * *Inputs* : X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0), r1,r2 - inner and outer radii, n - parameter for the partial_fill function
# * *Outputs* : z - output pattern with one quadrant filled.
# In[7]:
#2D ZP
def make_ring(i):
print(i)
r1 = radius[i-1]
r2 = radius[i]
n = 250
ring = make_quadrant(X,Y,flag,r1,r2,step_xy,n,zone_number = i)
ring = repeat_pattern(X,Y,ring)
ring_ = np.where(ring!=0)
vals_ = ring[ring_]
np.save('ring_locs_'+str(i)+'.npy',ring_)
np.save('ring_vals_'+str(i)+'.npy',vals_)
return
# *make_ring* : function used to create a ring given the relevant parameters
# * *Inputs* : i-zone number,radius - array of radii ,X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0),n - parameter for the partial_fill function
# * *Outputs* : None. Saves the rings to memory.
# In[8]:
mat = 'Au'
energy = 10000 #Energy in EV
f = 10e-3 #focal length in meters
wavel = (1239.84/energy)*10**(-9) #Wavelength in meters
delta,beta = get_property(mat,energy)
zones = 700 #number of zones
radius = np.zeros(zones)
# Setting up the parameters and initializing the variables.
# In[9]:
for k in range(zones):
radius[k] = zone_radius(k,f,wavel)
# Filling the radius array with the radius of zones for later use in making the rings.
# In the next few code blocks, we check if the parameters of the simulation make sense. First we print out the input and output pixel sizes assuming we will be using the 1FT propagator. Then we see if the pixel sizes are small enough compared to the outermost zone width. Finally we check if the focal spot can be contained for the given amount of tilt angle.
# In[10]:
grid_size = 55296
input_xrange = 262e-6
step_xy = input_xrange/grid_size
L_out = (1239.84/energy)*10**(-9)*f/(input_xrange/grid_size)
step_xy_output = L_out/grid_size
print(' Ouput L : ',L_out)
print(' output pixel size(nm) : ',step_xy_output*1e9)
print(' input pixel size(nm) : ',step_xy*1e9)
# In[11]:
drn = radius[-1]-radius[-2]
print(' maximum radius(um) : ',radius[-1]*1e6)
print(' outermost zone width(nm) :',drn*1e9)
# In[12]:
print(' max shift of focal spot(um) : ',(L_out/2)*1e6)
# invert the following to get max tilt allowance
# after which the focal spot falls of the
# simulation plane
# np.sin(theta*(np.pi/180))*f = (L_out/2)
theta_max = np.arcsin((L_out/2)*(1/f))*(180/np.pi)
print(' max wavefield aligned tilt(deg) : ',theta_max)
# In[13]:
if step_xy > 0.25*drn :
print(' WARNING ! input pixel size too small')
print(' ratio of input step size to outermost zone width', step_xy/drn)
if step_xy_output > 0.25*drn :
print(' WARNING ! output pixel size too small')
print(' ratio of output step size to outermost zone width', step_xy_output/drn)
# In[14]:
zones_to_fill = []
for i in range(zones):
if i%2 == 1 :
zones_to_fill.append(i)
zones_to_fill = np.array(zones_to_fill)
# Making a list of zones to fill. (Since only alternate zones are filled in our case. This can be modified as per convenience)
# In[ ]:
try :
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
except :
os.mkdir(up(os.getcwd())+str('/hard_xray_zp'))
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
# Store the location of each ring of the zone plate separately in a sub directory. This is more efficient than storing the whole zone plate array !
# In[ ]:
x1 = input_xrange/2
x = np.linspace(-x1,x1,grid_size)
step_xy = x[-1]-x[-2]
zp_coords =[-x1,x1,-x1,x1]
# In[ ]:
X,Y = np.meshgrid(x,x)
flag = np.where((X>0)&(Y>0)&(X>=Y))
# Creating the input 1D array and setting the parameters for use by the make ring function.
# Note that X,Y,flag and step_xy will be read by multiple processes which we will spawn using joblib.
# In[ ]:
get_ipython().run_cell_magic('capture', '', 'from joblib import Parallel, delayed \nresults = Parallel(n_jobs=5)(delayed(make_ring)(i) for i in zones_to_fill)')
# Creating the rings ! (Adjust the number of jobs depending on CPU cores.)
# In[ ]:
params = {'grid_size':grid_size,'step_xy':step_xy,'energy(in eV)':energy,'wavelength in m':wavel,'focal_length':f,'zp_coords':zp_coords,'delta':delta,'beta':beta}
pickle.dump(params,open('parameters.pickle','wb'))
# Pickling and saving all the associated parameters along with the rings for use in simulation!
| 29.592593 | 359 | 0.659324 | 0 | 0 | 0 | 0 | 388 | 0.048561 | 0 | 0 | 4,321 | 0.540801 |
7da45f218ab8516fdf8f91e39f9a7c42a449c690 | 1,740 | py | Python | model/kubernetes.py | adracus/cc-utils | dcd1ff544d8b18a391188903789d1cac929f50f9 | [
"Apache-2.0"
]
| null | null | null | model/kubernetes.py | adracus/cc-utils | dcd1ff544d8b18a391188903789d1cac929f50f9 | [
"Apache-2.0"
]
| null | null | null | model/kubernetes.py | adracus/cc-utils | dcd1ff544d8b18a391188903789d1cac929f50f9 | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model.base import (
NamedModelElement,
ModelBase,
)
class KubernetesConfig(NamedModelElement):
def _required_attributes(self):
return {
'kubeconfig',
'version',
'monitoring',
}
def kubeconfig(self):
return self.raw.get('kubeconfig')
def cluster_version(self):
return self.raw.get('version')
def monitoring(self):
return MonitoringConfig(self.raw.get('monitoring'))
class MonitoringConfig(ModelBase):
def _required_attributes(self):
return {
'namespace',
'kube_state_metrics_namespaces_to_monitor',
'kube_state_metrics_collectors',
}
def namespace(self):
return self.raw.get('namespace')
def kube_state_metrics_namespaces_to_monitor(self):
return self.raw.get('kube_state_metrics_namespaces_to_monitor')
def kube_state_metrics_collectors(self):
return self.raw.get('kube_state_metrics_collectors')
| 31.071429 | 99 | 0.7 | 933 | 0.536207 | 0 | 0 | 0 | 0 | 0 | 0 | 955 | 0.548851 |
7da75a749aad9d8e1c359fa964268c99722cc54e | 180 | py | Python | test/test.py | justifyzz/Python-Assignment-1 | 8386203a9cf7099754586c26ba6646ec77dc6165 | [
"MIT"
]
| null | null | null | test/test.py | justifyzz/Python-Assignment-1 | 8386203a9cf7099754586c26ba6646ec77dc6165 | [
"MIT"
]
| null | null | null | test/test.py | justifyzz/Python-Assignment-1 | 8386203a9cf7099754586c26ba6646ec77dc6165 | [
"MIT"
]
| null | null | null | from pycoingecko import CoinGeckoAPI
number = int(input('Enter the number of coins: '))
for i in range(length):
print(i + 1, ':', listOfNames[i], listOfMarketCaps[i])
| 22.5 | 62 | 0.672222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.177778 |
7da76f883c897444204f5a70123af7ff361ec610 | 2,528 | py | Python | pymagnitude/third_party/allennlp/tests/data/dataset_readers/snli_reader_test.py | tpeng/magnitude | aec98628b5547773ca8c4114ec6d1ad51e21b230 | [
"MIT"
]
| 1,520 | 2018-03-01T13:37:49.000Z | 2022-03-25T11:40:20.000Z | pymagnitude/third_party/allennlp/tests/data/dataset_readers/snli_reader_test.py | tpeng/magnitude | aec98628b5547773ca8c4114ec6d1ad51e21b230 | [
"MIT"
]
| 87 | 2018-03-03T15:12:50.000Z | 2022-02-21T15:24:12.000Z | pymagnitude/third_party/allennlp/tests/data/dataset_readers/snli_reader_test.py | tpeng/magnitude | aec98628b5547773ca8c4114ec6d1ad51e21b230 | [
"MIT"
]
| 121 | 2018-03-03T08:40:53.000Z | 2022-03-16T05:19:38.000Z | # pylint: disable=no-self-use,invalid-name
from __future__ import division
from __future__ import absolute_import
import pytest
from allennlp.data.dataset_readers import SnliReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
class TestSnliReader(object):
@pytest.mark.parametrize(u"lazy", (True, False))
def test_read_from_file(self, lazy):
reader = SnliReader(lazy=lazy)
instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'snli.jsonl')
instances = ensure_list(instances)
instance1 = {u"premise": [u"A", u"person", u"on", u"a", u"horse", u"jumps", u"over", u"a", u"broken",
u"down", u"airplane", u"."],
u"hypothesis": [u"A", u"person", u"is", u"training", u"his", u"horse", u"for", u"a",
u"competition", u"."],
u"label": u"neutral"}
instance2 = {u"premise": [u"A", u"person", u"on", u"a", u"horse", u"jumps", u"over", u"a", u"broken",
u"down", u"airplane", u"."],
u"hypothesis": [u"A", u"person", u"is", u"at", u"a", u"diner", u",", u"ordering", u"an",
u"omelette", u"."],
u"label": u"contradiction"}
instance3 = {u"premise": [u"A", u"person", u"on", u"a", u"horse", u"jumps", u"over", u"a", u"broken",
u"down", u"airplane", u"."],
u"hypothesis": [u"A", u"person", u"is", u"outdoors", u",", u"on", u"a", u"horse", u"."],
u"label": u"entailment"}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields[u"premise"].tokens] == instance1[u"premise"]
assert [t.text for t in fields[u"hypothesis"].tokens] == instance1[u"hypothesis"]
assert fields[u"label"].label == instance1[u"label"]
fields = instances[1].fields
assert [t.text for t in fields[u"premise"].tokens] == instance2[u"premise"]
assert [t.text for t in fields[u"hypothesis"].tokens] == instance2[u"hypothesis"]
assert fields[u"label"].label == instance2[u"label"]
fields = instances[2].fields
assert [t.text for t in fields[u"premise"].tokens] == instance3[u"premise"]
assert [t.text for t in fields[u"hypothesis"].tokens] == instance3[u"hypothesis"]
assert fields[u"label"].label == instance3[u"label"]
| 52.666667 | 109 | 0.560918 | 2,244 | 0.887658 | 0 | 0 | 2,210 | 0.874209 | 0 | 0 | 822 | 0.325158 |
7da9d5721ae20d0a2dd2bfb648ef9c35e133f2d4 | 4,362 | py | Python | binding/python/setup.py | pmateusz/libgexf | a25355db141a1d4e178553f42e37acfd9f485e3e | [
"MIT"
]
| null | null | null | binding/python/setup.py | pmateusz/libgexf | a25355db141a1d4e178553f42e37acfd9f485e3e | [
"MIT"
]
| null | null | null | binding/python/setup.py | pmateusz/libgexf | a25355db141a1d4e178553f42e37acfd9f485e3e | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
setup.py file for Libgexf
"""
from setuptools import Extension, setup
#from distutils.core import Extension, setup
libgexf_module = Extension(
'_libgexf', # genere un _libgexf.so
include_dirs=['/usr/include/libxml2'],
sources=[
# 'libgexf.i', # genere un libgexf.py (ne fonctionne que pour les sources C et pas C++)
# sources C: les .o seront automatiquement généré,
# et automatiquement linké avec le module
#io::input
'../../libgexf/filereader.cpp',
'../../libgexf/abstractparser.cpp',
'../../libgexf/gexfparser.cpp',
'../../libgexf/legacyparser.cpp',
'../../libgexf/rngvalidator.cpp',
'../../libgexf/schemavalidator.cpp',
#io::output
'../../libgexf/filewriter.cpp',
'../../libgexf/legacywriter.cpp',
#io::utils
'../../libgexf/conv.cpp',
#db::topo
'../../libgexf/graph.cpp',
'../../libgexf/dynamicgraph.cpp',
'../../libgexf/directedgraph.cpp',
'../../libgexf/undirectedgraph.cpp',
'../../libgexf/nodeiter.cpp',
'../../libgexf/edgeiter.cpp',
#db::data
'../../libgexf/data.cpp',
'../../libgexf/metadata.cpp',
'../../libgexf/attributeiter.cpp',
'../../libgexf/attvalueiter.cpp',
#main
'../../libgexf/gexf.cpp',
'../../libgexf/memoryvalidator.cpp',
# chemin du wrapper généré automatiquement par SWIG (ce wrapper doit déjà exister donc)
'libgexf_wrap.cpp',
],
# eventuellement, les librairies à "linker"
# par exemple si on a besoin de libxml2, c'est ici qu'on le spécifie au compilateur
# attention aux habitués de gcc et de la compilation en ligne de commande:
# ici inutile de donner le format spécifique à gcc ("-lpthread") ou spécifique à visual studio etc..
# il suffit de mettre "pthread" et le script python va rajouter le "-l" devant si nécessaire
libraries=[
'stdc++',
'xml2' #see xml2-config --libs to get the linker flags
#'z', # zlib (compression) (inutile sous ubuntu par exemple, car déjà intégré au packaging de base pour développer)
#'pthread' # Posix Threads (multithreading posix) (inutile sous linux, car posix fait déjà partie du système)
]
)
setup (
name='libgexf', # important, c'est le vrai nom du module, qui sera utilisé quand on fera un "import libgexf;" par exemple
# metadonnees diverses
version='0.1.2',
author="Sebastien Heymann",
author_email="[email protected]",
url="http://gexf.net",
description="""Toolkit library for GEXF file format.""",
long_description="""""",
# liste des modules à compiler.
# le module "libgexf_module" a été défini ligne 12
#
ext_modules=[ libgexf_module, ],
# si on veut rajouter un package python
# par exemple
# packages = ["monpackage"]
# va rajouter le packag
# monpackage/
# puisqu'en python les packages sont enfait tout simplement des répertoires contenant
# un fichier "constructeur" __init__.py (c'est un peu du système de fichier orienté objet)
# cela aura pour effet de rajouter de manière récursive
# monpackage/__init__.py
# monpackage/sous/sous/sous/package/fichier.py
# etc..
#packages= ["monpackage", ], #
# si on veut rajouter des scripts python en plus
# par exemple
# py_modules = ["monmodule"]
# va rajouter le fichier
# monmodule.py (dans le répertoire courant)
# dans le package
py_modules = ["libgexf"], # UNCOMMENT TO USE THE SWIG WRAPPER
# on peut rajouter des fichiers divers aussi (readme, examples, licences, doc html etc..)
#data_files = [('share/libgexf-python/',['readme.txt']),],
# encore des meta donnees, pour la base de donnees en ligne des modules python (python.org)
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: Free for non-commercial use",
"Operating System :: POSIX :: Linux",
"Topic :: Software Development :: Libraries :: Python Modules", ],
)
| 36.049587 | 128 | 0.618294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,480 | 0.791269 |
7da9f98f6db4dd526d7eaf26e1220f285a37877a | 7,933 | bzl | Python | util/import/raze/crates.bzl | silas-enf/rules_rust | 41b39f0c9951dfda3bd0a95df31695578dd3f5ea | [
"Apache-2.0"
]
| 1 | 2017-06-12T02:10:48.000Z | 2017-06-12T02:10:48.000Z | util/import/raze/crates.bzl | silas-enf/rules_rust | 41b39f0c9951dfda3bd0a95df31695578dd3f5ea | [
"Apache-2.0"
]
| null | null | null | util/import/raze/crates.bzl | silas-enf/rules_rust | 41b39f0c9951dfda3bd0a95df31695578dd3f5ea | [
"Apache-2.0"
]
| null | null | null | """
@generated
cargo-raze generated Bazel file.
DO NOT EDIT! Replaced on runs of cargo-raze
"""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository") # buildifier: disable=load
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # buildifier: disable=load
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") # buildifier: disable=load
def rules_rust_util_import_fetch_remote_crates():
"""This function defines a collection of repos and should be called in a WORKSPACE file"""
maybe(
http_archive,
name = "rules_rust_util_import__aho_corasick__0_7_15",
url = "https://crates.io/api/v1/crates/aho-corasick/0.7.15/download",
type = "tar.gz",
sha256 = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5",
strip_prefix = "aho-corasick-0.7.15",
build_file = Label("//util/import/raze/remote:BUILD.aho-corasick-0.7.15.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__cfg_if__1_0_0",
url = "https://crates.io/api/v1/crates/cfg-if/1.0.0/download",
type = "tar.gz",
sha256 = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd",
strip_prefix = "cfg-if-1.0.0",
build_file = Label("//util/import/raze/remote:BUILD.cfg-if-1.0.0.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__env_logger__0_8_4",
url = "https://crates.io/api/v1/crates/env_logger/0.8.4/download",
type = "tar.gz",
sha256 = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3",
strip_prefix = "env_logger-0.8.4",
build_file = Label("//util/import/raze/remote:BUILD.env_logger-0.8.4.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__getrandom__0_2_3",
url = "https://crates.io/api/v1/crates/getrandom/0.2.3/download",
type = "tar.gz",
sha256 = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753",
strip_prefix = "getrandom-0.2.3",
build_file = Label("//util/import/raze/remote:BUILD.getrandom-0.2.3.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__lazy_static__1_4_0",
url = "https://crates.io/api/v1/crates/lazy_static/1.4.0/download",
type = "tar.gz",
sha256 = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646",
strip_prefix = "lazy_static-1.4.0",
build_file = Label("//util/import/raze/remote:BUILD.lazy_static-1.4.0.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__libc__0_2_112",
url = "https://crates.io/api/v1/crates/libc/0.2.112/download",
type = "tar.gz",
sha256 = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125",
strip_prefix = "libc-0.2.112",
build_file = Label("//util/import/raze/remote:BUILD.libc-0.2.112.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__log__0_4_14",
url = "https://crates.io/api/v1/crates/log/0.4.14/download",
type = "tar.gz",
sha256 = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710",
strip_prefix = "log-0.4.14",
build_file = Label("//util/import/raze/remote:BUILD.log-0.4.14.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__memchr__2_4_1",
url = "https://crates.io/api/v1/crates/memchr/2.4.1/download",
type = "tar.gz",
sha256 = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a",
strip_prefix = "memchr-2.4.1",
build_file = Label("//util/import/raze/remote:BUILD.memchr-2.4.1.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__proc_macro2__1_0_33",
url = "https://crates.io/api/v1/crates/proc-macro2/1.0.33/download",
type = "tar.gz",
sha256 = "fb37d2df5df740e582f28f8560cf425f52bb267d872fe58358eadb554909f07a",
strip_prefix = "proc-macro2-1.0.33",
build_file = Label("//util/import/raze/remote:BUILD.proc-macro2-1.0.33.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__quickcheck__1_0_3",
url = "https://crates.io/api/v1/crates/quickcheck/1.0.3/download",
type = "tar.gz",
sha256 = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6",
strip_prefix = "quickcheck-1.0.3",
build_file = Label("//util/import/raze/remote:BUILD.quickcheck-1.0.3.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__quote__1_0_10",
url = "https://crates.io/api/v1/crates/quote/1.0.10/download",
type = "tar.gz",
sha256 = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05",
strip_prefix = "quote-1.0.10",
build_file = Label("//util/import/raze/remote:BUILD.quote-1.0.10.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__rand__0_8_4",
url = "https://crates.io/api/v1/crates/rand/0.8.4/download",
type = "tar.gz",
sha256 = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8",
strip_prefix = "rand-0.8.4",
build_file = Label("//util/import/raze/remote:BUILD.rand-0.8.4.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__rand_core__0_6_3",
url = "https://crates.io/api/v1/crates/rand_core/0.6.3/download",
type = "tar.gz",
sha256 = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7",
strip_prefix = "rand_core-0.6.3",
build_file = Label("//util/import/raze/remote:BUILD.rand_core-0.6.3.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__regex__1_4_6",
url = "https://crates.io/api/v1/crates/regex/1.4.6/download",
type = "tar.gz",
sha256 = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759",
strip_prefix = "regex-1.4.6",
build_file = Label("//util/import/raze/remote:BUILD.regex-1.4.6.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__regex_syntax__0_6_25",
url = "https://crates.io/api/v1/crates/regex-syntax/0.6.25/download",
type = "tar.gz",
sha256 = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b",
strip_prefix = "regex-syntax-0.6.25",
build_file = Label("//util/import/raze/remote:BUILD.regex-syntax-0.6.25.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__syn__1_0_82",
url = "https://crates.io/api/v1/crates/syn/1.0.82/download",
type = "tar.gz",
sha256 = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59",
strip_prefix = "syn-1.0.82",
build_file = Label("//util/import/raze/remote:BUILD.syn-1.0.82.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__unicode_xid__0_2_2",
url = "https://crates.io/api/v1/crates/unicode-xid/0.2.2/download",
type = "tar.gz",
sha256 = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3",
strip_prefix = "unicode-xid-0.2.2",
build_file = Label("//util/import/raze/remote:BUILD.unicode-xid-0.2.2.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__wasi__0_10_2_wasi_snapshot_preview1",
url = "https://crates.io/api/v1/crates/wasi/0.10.2+wasi-snapshot-preview1/download",
type = "tar.gz",
sha256 = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6",
strip_prefix = "wasi-0.10.2+wasi-snapshot-preview1",
build_file = Label("//util/import/raze/remote:BUILD.wasi-0.10.2+wasi-snapshot-preview1.bazel"),
)
| 41.103627 | 103 | 0.658767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,891 | 0.616539 |
7dab84050bffe62a65b369edcbc5f292e22e4734 | 747 | py | Python | scripts/print_thread_name.py | Satheeshcharon/Multithreading-python | 4dcc18d5d417701d8f67f4d92ffa915e5c051a60 | [
"MIT"
]
| null | null | null | scripts/print_thread_name.py | Satheeshcharon/Multithreading-python | 4dcc18d5d417701d8f67f4d92ffa915e5c051a60 | [
"MIT"
]
| null | null | null | scripts/print_thread_name.py | Satheeshcharon/Multithreading-python | 4dcc18d5d417701d8f67f4d92ffa915e5c051a60 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
## This program creates a thread,
## officially names it and
## tries to print the name
import threading
import time
def ThreadFunction():
print(threading.currentThread().getName(), "Starting")
time.sleep(2)
print(threading.currentThread().getName(), "Exiting")
def ServiceFunction():
print(threading.currentThread().getName(), "Starting")
time.sleep(3)
print(threading.currentThread().getName(), "Exiting")
def Main():
myThread = threading.Thread(
name='Service Function',
target=ServiceFunction
)
w = threading.Thread(
name='Thread function',
target=ThreadFunction
)
w2 = threading.Thread(
target=ThreadFunction
)
w.start()
w2.start()
myThread.start()
if __name__ == "__main__":
Main()
| 16.977273 | 55 | 0.710843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.250335 |
7dac2231269fa172423e388357c676a691296ba3 | 6,241 | py | Python | scripts/first_trace_success_test.py | axelzedigh/DLSCA | f4a04bbc008784cb3f48832a2b4394850048f116 | [
"Unlicense"
]
| 9 | 2019-09-23T16:21:50.000Z | 2021-11-23T13:14:27.000Z | scripts/first_trace_success_test.py | axelzedigh/DLSCA | f4a04bbc008784cb3f48832a2b4394850048f116 | [
"Unlicense"
]
| null | null | null | scripts/first_trace_success_test.py | axelzedigh/DLSCA | f4a04bbc008784cb3f48832a2b4394850048f116 | [
"Unlicense"
]
| 7 | 2019-07-12T06:30:23.000Z | 2021-11-23T13:14:29.000Z | import os.path
import sys
import h5py
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
from keras.losses import categorical_crossentropy
import tensorflow as tf
import heapq
import re
modelName = 'CW_validation.h5'
successResultsNPY = []
############################################################################################################
# #
# this test was designed to measure the first attempt success rate of classification, and thus of keybyte #
# recovery from a single trace. It plots this in terms of keybyte values to investigate if there is a #
# difference in performance depending on the value of the Sbox output. #
# #
############################################################################################################
Sbox = np.array([
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
])
def load_sca_model(model_file):
try:
model = load_model(model_file)
except:
print("Error: can't load Keras model file '%s'" % model_file)
sys.exit(-1)
return model
#create a (256, 2) shaped matrix with "number of checks for each keybyte" as [:,0] and
#"number of successes" for [:,1]
def keytest(model, traces, plaintext, keys):
results = np.zeros((256, 2))
input_layer_shape = model.get_layer(index=0).input_shape
if len(input_layer_shape) == 3:
traces = traces.reshape((traces.shape[0], traces.shape[1], 1))
predictions = model.predict(traces)
maxindices = np.argmax(predictions, axis = 1)
for i in range(traces.shape[0]):
if Sbox[plaintext[i]^keys[i]] == maxindices[i]:
results[maxindices[i], 1] += 1
results[Sbox[plaintext[i]^keys[i]], 0] += 1
return results
#check first try accuracy of model against XMega2 test data
def check_model(model_file, traces, plaintext, keys):
global successResultsNPY
#Load model
model = load_sca_model(model_file)
#calculate first guess performance on random dataset and give results for each keybyte value
performance = keytest(model, traces, plaintext, keys)
index = np.arange(performance.shape[0])
successRate = performance[:,1]/performance[:,0]
filename = re.search('([^/]+$)', model_file).group(0)[:-3]
successResultsNPY += [(filename, np.mean(successRate))]
print("*"*30, "\n")
print(filename)
# print("best Sbox values: ", heapq.nlargest(9, range(len(successRate)), successRate.take))
print("mean success rate", np.mean(successRate))
print("_"*30)
#todo: label keybyte value charts
plt.xlabel('label value')
plt.ylabel('success rate')
plt.title(filename)
plt.bar(index, successRate)
filename = 'results/pdfresults/' + filename + '_first_try_keybyte#' + sys.argv[5] + '.pdf'
plt.savefig(filename)
plt.show(block=False)
plt.figure()
def load_traces(tracefile, ptfile, keyfile):
traces = np.load(tracefile)
plaintext = np.load(ptfile)
keys = np.load(keyfile)
return traces, plaintext, keys
############################
#CODE STARTS EXECUTING HERE#
############################
#=========================================#
#the interval size is by default set to 96
#which corresponds to the interval size
#of an ATxmega128D4 traces captured using
#ChipWhisperer. Analyze the trace if you
#are using something different and change
#this value!
#=========================================#
#******************
INTERVAL_SIZE = 96
#******************
#model can be hard coded here, but I recommend using the terminal instead
to_check_all = []
if len(sys.argv) >= 3:
numtraces = int(sys.argv[1])
numiter = int(sys.argv[2])
tracestart = int(sys.argv[3])
traceend = int(sys.argv[4])
keybytepos = int(sys.argv[5])
tracefile = sys.argv[6]
ptfile = sys.argv[7]
keyfile = sys.argv[8]
to_check_all = [i for i in sys.argv][9:]
to_check_all = [i for i in to_check_all if i[-3:] == ".h5"]
traces, plaintext, keys = load_traces(tracefile, ptfile, keyfile)
interval = slice(tracestart+INTERVAL_SIZE*keybytepos, traceend+INTERVAL_SIZE*keybytepos)
print(traces.shape)
print(plaintext.shape)
print(keys.shape)
traces = traces[:,interval]
plaintext = plaintext[:,keybytepos]
keys = keys[:,keybytepos]
# No argument: check all the trained models
for m in to_check_all:
check_model(m, traces, plaintext, keys)
try:
np.save("results/npyresults/first_trace_success_rates.npy",np.array(successResultsNPY))
print("results stored in the ./results folder")
input("Test finished, press enter to continue ...")
except SyntaxError:
pass
| 39.751592 | 108 | 0.628425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,921 | 0.307803 |
7dacf9f865f47f80badfe339d0f2b8574ea5fb66 | 360 | py | Python | raptrcontainer/appropriated/admin.py | richard-parks/RAPTR | ff1342af4ee6447ab9cc21735e79efb7623df805 | [
"Unlicense"
]
| null | null | null | raptrcontainer/appropriated/admin.py | richard-parks/RAPTR | ff1342af4ee6447ab9cc21735e79efb7623df805 | [
"Unlicense"
]
| 2 | 2018-11-29T21:03:54.000Z | 2018-12-02T04:41:36.000Z | raptrcontainer/appropriated/admin.py | NOAA-PMEL/Admin_RAPTR | 2353aaa9500dce2e2e65a8d21e802b37c6990054 | [
"Unlicense"
]
| null | null | null | from django.contrib import admin
from .models import AppropriatedHistory
@admin.register(AppropriatedHistory)
class AppropriatedHistoryAdmin(admin.ModelAdmin):
list_display = [
'fiscal_year',
'source',
'dollars_received'
]
list_editable = [
'dollars_received',
]
ordering = [
'fiscal_year',
]
| 18.947368 | 49 | 0.644444 | 246 | 0.683333 | 0 | 0 | 283 | 0.786111 | 0 | 0 | 70 | 0.194444 |
7dad1f1269de17e831dca23ea74171d92ef7258b | 34,626 | py | Python | ringallreduce_simulator.py | hgao10/horovod_simulation | 3678a7d1d424931f48af4b53ef3293073af71c2e | [
"Apache-2.0"
]
| null | null | null | ringallreduce_simulator.py | hgao10/horovod_simulation | 3678a7d1d424931f48af4b53ef3293073af71c2e | [
"Apache-2.0"
]
| null | null | null | ringallreduce_simulator.py | hgao10/horovod_simulation | 3678a7d1d424931f48af4b53ef3293073af71c2e | [
"Apache-2.0"
]
| null | null | null | import collections
import time
import heapq
from horovod_simulator_config import SimulatorConfig, SchedulingDisc
from utils.logger import get_logger
import typing
from queue import PriorityQueue
class Packet():
def __init__(self, iteration_idx, layer_idx, packet_idx, packet_size_MB):
# global packet_size_MB
self.logger = get_logger("Packet", "DEBUG")
self.iteration_idx = iteration_idx
self.layer_idx = layer_idx
self.packet_idx = packet_idx
self.priority = self.layer_idx
self.size = packet_size_MB
def __lt__(self, other):
return self.priority < other.priority or ((self.priority == other.priority) and self.packet_idx < other.packet_idx)
def __str__(self):
return (f'Packet.priority, {self.priority}, Packet.id, {self.packet_idx}, Packet.iteration, {self.iteration_idx}, Packet.layer, {self.layer_idx}')
def set_priority(self, priority):
self.priority = priority
class Event():
def __init__(self, name, end_time, start_time):
self.name = name
self.time = end_time
self.start_time = start_time
self.duration = self.time - self.start_time
def __lt__(self, other):
return self.time < other.time
def __str__(self):
return (f'Time_ms, {self.time}, Event, {self.name}')
class Compute_Event(Event):
def __init__(self, time, start_time, direction, layer, iteration, state):
# Forward or Backward
name = direction + '_computation_' + state
super().__init__(name, time, start_time)
self.direction = direction
self.iteration = iteration
self.layer = layer
# start or done
self.state = state
def __str__(self):
return (f'Time_ms, {self.time}, Event, {self.name}, Iter, {self.iteration}, Layer, {self.layer}')
class Transmit_Event(Event):
def __init__(self, time,start_time, state, iteration, layer, packet_idx):
# start or done
self.state = state
name = 'Tensor_transimission_' + state
super().__init__(name, time, start_time)
self.iteration = iteration
self.layer = layer
self.packet_idx = packet_idx
def __str__(self):
return (f'Time_ms, {self.time}, Event, {self.name}, Iter, {self.iteration}, Layer, {self.layer}, Packet_idx, {self.packet_idx}')
class RingAllReduce_Event(Event):
def __init__(self, time, start_time, state, iteration, priority):
self.state = state
name = "RingAllReduce_" + state
super().__init__(name, time, start_time)
self.iteration = iteration
self.priority = priority
self.layer = priority
def __str__(self):
return (f'Time_ms, {self.time}, Event, {self.name}, Iter, {self.iteration}, Priority, {self.priority}')
class Gradients_Event(Event):
def __init__(self, time, start_time, iteration, layer):
super().__init__("Gradients_received", time, start_time)
self.iteration = iteration
self.layer = layer
def __str__(self):
return (f'Time_ms, {self.time}, Event, {self.name}, Iter, {self.iteration}, Layer, {self.layer}')
class Tensor():
def __init__(self, layer, size):
self.layer = layer
self.size = size
def __lt__(self, other):
return self.layer < other.layer
class SingleReduce():
def __init__(self):
self.tensors = [] # a list of tensors
self.priority = 0
self.size = 0
self.progress = 0 # 2(n-1) times where n is the number of workers
self.gradient_computed_status = {}
self.logger = get_logger("SingleReduce", "DEBUG")
self.iteration = 0
self.allreduce_time = 0
def add_tensor(self, tensor):
self.tensors.append(tensor)
self.size += tensor.size
# update priority
self.priority = min(self.tensors, key=lambda k:k.layer).layer
self.logger.debug(f"add tensor {tensor.layer}")
self.logger.debug(f"SingleReduce priority: {self.priority}")
self.gradient_computed_status[tensor.layer] = False
def set_gradient_available(self, layer):
self.gradient_computed_status[layer] = True
def ready_to_be_sent(self):
if sum(self.gradient_computed_status.values()) == len(self.gradient_computed_status):
return True
return False
def set_allreduce_time(self, time):
self.allreduce_time = time
def clear_compute_status(self):
for key in self.gradient_computed_status.keys():
self.gradient_computed_status[key] = False
self.logger.debug("Clearing gradient compute status")
def __lt__(self, other):
return self.priority < other.priority
class RingAllReduce():
# tensors: key layer, value = size in MB
def __init__(self, num_partitions) -> None:
self.num_partitions = num_partitions #num of workers
# key: priority of each reduce operation, value: layer of tensors
self.reducelists = {}
self.logger = get_logger("RingAllReduce", "DEBUG")
self.fusion_reduce_lookup = {}
def map_tensors(self, tensors: typing.List, fusion_buffer_size_MB):
# map tensor i to one ring allreduce operation j
one_reduce = SingleReduce()
added_t_count = 0
# TODO: refactor to be more straightforward!
while added_t_count < len(tensors):
tensor = tensors[added_t_count]
if one_reduce.size + tensor.size < fusion_buffer_size_MB:
one_reduce.add_tensor(tensor)
added_t_count += 1
if added_t_count < len(tensors) - 1:
# add last reduce operation to the list if its the last tensor
continue
self.logger.debug(f"one_reduce size {one_reduce.size} tensor.size {tensor.size}")
if one_reduce.size == 0 or one_reduce.size == 0.0:
self.logger.error("Reduce operation is of size ZERO")
for t in one_reduce.tensors:
self.logger.debug(f"Tensor {t.layer} in reduce {one_reduce.priority} ")
self.fusion_reduce_lookup[t.layer] = one_reduce
self.reducelists[one_reduce.priority] = one_reduce
one_reduce = SingleReduce()
class HorovodSimulator():
def __init__(self, config):
self.logger = get_logger("HorovodSimulator", "DEBUG")
# key: event name value: event obj
self.record = collections.defaultdict(list)
self.config = config
# FIFO queue
self.transmission_queue = collections.deque()
# minheap sorted by priority
self.PerfectPQ_transmission_queue = []
# event queue is used as a minheap sorted by timestamp
self.event_queue = []
self.curr_time = 0
self.fp_total_time_ms = (1/3) * self.config.compute_time_per_iteration_ms
self.bp_total_time_ms = (2/3) * self.config.compute_time_per_iteration_ms
# To simplify computation time in FP: assum each layer takes less then d ms to compute than previous layer and the last layer takes 0 ms
self.fp_diff_per_layer_ms = 2 * self.fp_total_time_ms / (self.config.num_layers * (self.config.num_layers-1))
self.logger.debug(f"self.fp_diff_per_layer_ms: {self.fp_diff_per_layer_ms}")
self.fp_first_layer_ms = 2 * self.fp_total_time_ms / self.config.num_layers
# Same simplification applies to BP except its in ascending order
self.bp_diff_per_layer_ms = 2 * self.bp_total_time_ms / (self.config.num_layers * (self.config.num_layers-1))
self.logger.debug(f"self.bp_diff_per_layer_ms: {self.bp_diff_per_layer_ms}")
self.fp_layers = {layer: self.fp_first_layer_ms - layer * self.fp_diff_per_layer_ms for layer in range(self.config.num_layers)}
self.fp_layers[self.config.num_layers -1] = self.fp_diff_per_layer_ms
self.logger.debug(f"self.fp_layers: {self.fp_layers}")
self.bp_layers = {layer: layer * self.bp_diff_per_layer_ms for layer in range(self.config.num_layers)}
self.bp_layers[0] = self.bp_diff_per_layer_ms
self.logger.debug(f"self.bp_layers:{self.bp_layers}")
self.check_computation_time_per_layer()
# To simplify layer size, first half of the layers are size S and the next 1/4 half of the layers are of size 4s and the last 1/4 of size 12s
self.min_layer_size_MB = 2 * self.config.model_size_MB / ( 9 * self.config.num_layers)
if self.min_layer_size_MB == 0.0:
self.logger.warn("Min layer size in MB is zero")
self.config.packet_size_MB = self.min_layer_size_MB/self.config.min_packet_per_layer
self.logger.debug(f"min_layers in MB: {self.min_layer_size_MB}, packet_size_MB: {self.config.packet_size_MB}")
self.layer_size = {}
# number of packets to be sent/received per layer
self.layer_size_in_packets = {}
self.calculate_layer_size()
self.tensors = []
self.construct_tensors()
self.curr_fusion = []
# self._init_layer_size_in_packets()
self.logger.debug(f"layer_size_in packets: {self.layer_size_in_packets}")
self.check_layer_size_in_packets()
# initialize ring all reduce operations
self.ringallreduce = RingAllReduce(self.config.num_workers)
self.ringallreduce.map_tensors(self.tensors, self.config.fusion_buffer_size_MB)
self.calculate_single_ringallreduce_time(self.ringallreduce.reducelists)
self.ringallreduce_pq = PriorityQueue()
self.ringallreduce_fifo = collections.deque()
# Test run specs
self.config.TotalIteration = 2
# horovod simulator status registers
self.gradient_received = {layer: False for layer in range(self.config.num_layers)}
self.received_tensor_count = {layer: 0 for layer in range(self.config.num_layers)}
# tracks computation completion status per layer according to data dependency
# the first layer is always true as it doesn't depend on any previous layer
self.previous_FP_layer_status = {layer: False for layer in range(self.config.num_layers)}
self.previous_FP_layer_status[0] = True
self.increment_iteration_status = {i: False for i in range(self.config.TotalIteration+1)}
self.InTransit = False
# non-essential compute time
self.allReduceComputeTime_ms = 0
self.ApplyLayerGradient_ms = 0
# The transmission delay is the amount of time required for the router to push out the packet.
# The propagation delay, is the time it takes a bit to propagate from one router to the next.
self.tensor_transmittion_time_ms = self.config.packet_size_MB * 8 /self.config.transmission_rate_Gbit_per_sec
self.logger.debug(f"tensor transmission time: {self.tensor_transmittion_time_ms}")
#TODO simplied version, each worker sends the entire amount of gradient per layer at once instead of gradient/num_workers for num_workers times, refer to ring allreduce paper
# parameter server model, send all tensors at once and wait for the PS respond back thus * 2
self.TotalAllReduceTime = self.allReduceComputeTime_ms + self.ApplyLayerGradient_ms + 2* (self.tensor_transmittion_time_ms + self.config.propagation_delay_ms) # compute + network roundtrip time
self.logger.debug(f"totalallreducetime: {self.TotalAllReduceTime}")
# TODO future feature: multiple priority queues
self.config.num_priority_queues = 1
self.priority_queues = {}
# TODO incorperate credit_size in non perfect priority queue situation where packets can only be pre-empted if there is enough credit left
self.config.credit_size = 1
def calculate_single_ringallreduce_time(self, reducelists):
for fusion_reduce in reducelists.values():
partion_size_MB = fusion_reduce.size/self.config.num_workers
one_iteration_transmit_partition_duration_ms = partion_size_MB * 8/self.config.transmission_rate_Gbit_per_sec + self.config.propagation_delay_ms
total_transmit_fusion_time_ms = 2 * (self.config.num_workers - 1) * one_iteration_transmit_partition_duration_ms
fusion_reduce.set_allreduce_time(total_transmit_fusion_time_ms)
def check_layer_size_in_packets(self):
for layer, num in self.layer_size_in_packets.items():
if num == 0 or num == 0.0:
self.logger.warn(f"Layer {layer} contains zero transmission packets")
def check_computation_time_per_layer(self):
for layer, time in self.fp_layers.items():
if time == 0.0:
self.logger.warn(f"FP layer[{layer}] is zero")
for layer, time in self.bp_layers.items():
if time == 0.0:
self.logger.warn(f"BP layer[{layer}] is zero")
def set_model_compute_time_per_iteration_ms(self, time):
self.config.compute_time_per_iteration_ms = time
self.fp_total_time_ms = (1/3) * self.config.compute_time_per_iteration_ms
self.bp_total_time_ms = (2/3) * self.config.compute_time_per_iteration_ms
# To simplify computation time in FP: assum each layer takes less then d ms to compute than previous layer and the last layer takes 0 ms
self.fp_diff_per_layer_ms = 2 * self.fp_total_time_ms // (self.config.num_layers * (self.config.num_layers-1))
self.fp_first_layer_ms = 2 * self.fp_total_time_ms // self.config.num_layers
# Same simplification applies to BP except its in ascending order
self.bp_diff_per_layer_ms = 2 * self.bp_total_time_ms // (self.config.num_layers * (self.config.num_layers-1))
self.fp_layers = {layer: self.fp_first_layer_ms - layer * self.fp_diff_per_layer_ms for layer in range(self.config.num_layers)}
self.bp_layers = {layer: layer * self.bp_diff_per_layer_ms for layer in range(self.config.num_layers)}
def remove_iteration_barrier(self):
self.config.iteration_barrier = False
def calculate_layer_size(self):
for layer in range(self.config.num_layers):
if layer <= self.config.num_layers//2:
self.layer_size[layer] = self.min_layer_size_MB
self.layer_size_in_packets[layer] = self.config.min_packet_per_layer
elif self.config.num_layers//2 <layer <= 3*self.config.num_layers//4:
self.layer_size[layer] = 4 * self.min_layer_size_MB
self.layer_size_in_packets[layer] = 4 * self.config.min_packet_per_layer
else:
self.layer_size[layer] = 12 * self.min_layer_size_MB
self.layer_size_in_packets[layer] = 12 * self.config.min_packet_per_layer
def construct_tensors(self):
for layer, tensor_size in self.layer_size.items():
self.logger.debug(f"construct tensor of layer {layer}, size {tensor_size}")
t = Tensor(layer, tensor_size)
self.tensors.append(t)
def _init_priority_queues(self):
for i in range(self.config.num_priority_queues):
self.priority_queues[i] = collections.deque()
def enque_FP(self, curr_time, iteration):
for layer, compute_time in self.fp_layers.items():
next_event = Compute_Event(compute_time + curr_time, curr_time, "FP", layer, iteration, "done")
heapq.heappush(self.event_queue, next_event)
self.gradient_received[layer] = False
curr_time += compute_time
def transmit_tensor_fusion(self, fusion_reduce):
self.logger.debug(f"fusion_reduce.size {fusion_reduce.size}")
partion_size_MB = fusion_reduce.size/self.config.num_workers
self.logger.debug(f"going to send fusion reduce with priority {fusion_reduce.priority}, p size: {partion_size_MB}")
# assuming there is zero computation time to apply the partial gradients
# artifically included propagation delay to transmission time to indicate the next transmission can't start until gradients from
# neighbors has been received which includes propagation time
one_iteration_transmit_partition_duration_ms = partion_size_MB * 8/self.config.transmission_rate_Gbit_per_sec + self.config.propagation_delay_ms
# receive_one_partition_time_ms = one_iteration_transmit_partition_duration_ms + self.config.propagation_delay_ms
total_transmit_fusion_time_ms = 2 * (self.config.num_workers - 1) * one_iteration_transmit_partition_duration_ms
self.logger.debug(f"total tranmist fusion time ms: {total_transmit_fusion_time_ms}")
self.logger.debug(f"add next ringallreduce event {total_transmit_fusion_time_ms + self.curr_time}")
next_event = RingAllReduce_Event(total_transmit_fusion_time_ms + self.curr_time, self.curr_time, "done", fusion_reduce.iteration, fusion_reduce.priority )
heapq.heappush(self.event_queue, next_event)
self.InTransit = True
self.logger.debug(f"transmit_tensor in RingAllReduce")
return
def transmit_packet(self, packet):
# self.logger.debug(f'transimitting packet: iter:{packet.iteration_idx}, layer: {packet.layer_idx}, id: {packet.packet_idx}')
next_event = Transmit_Event(self.tensor_transmittion_time_ms + self.curr_time, self.curr_time,"done", packet.iteration_idx, packet.layer_idx, packet.packet_idx)
heapq.heappush(self.event_queue, next_event)
if packet.packet_idx == self.layer_size_in_packets[packet.layer_idx] - 1: # last packet in the layer, assume that there is no OOO transmission
if not self.increment_iteration_status[packet.iteration_idx+1]: # any layer that finishes transmitting all gradients will increament the iteration for that layer
packet.iteration_idx += 1
next_event = Gradients_Event(self.TotalAllReduceTime + self.curr_time, self.curr_time,packet.iteration_idx, packet.layer_idx)
heapq.heappush(self.event_queue, next_event)
self.InTransit = True
return
# transmission queue: comprised of packet_id (iteration_idx, layer_idx, packet_idx)
def transmit_tensor(self):
# if self.FIFO_set and self.transmission_queue:
if self.config.qdisc == SchedulingDisc.FIFO and self.transmission_queue:
packet = self.transmission_queue.popleft()
self.transmit_packet(packet)
elif self.config.qdisc == SchedulingDisc.PerfectPQ and self.PerfectPQ_transmission_queue:
packet = heapq.heappop(self.PerfectPQ_transmission_queue)
self.logger.debug(f"Debug, pop packet off PerfectPQ_transmission_queue: {packet}")
self.transmit_packet(packet)
elif self.config.qdisc == SchedulingDisc.RingAllReducePQ and not self.ringallreduce_pq.empty():
self.logger.debug(f"transmit tensor for RingallReducePQ")
# if not self.ringallreduce_pq.empty():
self.logger.debug(f"ringallreduce_pq is not empty: {self.ringallreduce_pq}")
fusion_reduce = self.ringallreduce_pq.get(block=False)
self.transmit_tensor_fusion(fusion_reduce)
elif self.config.qdisc == SchedulingDisc.RingAllReduceFIFO and self.ringallreduce_fifo:
fusion_reduce = self.ringallreduce_fifo.popleft()
self.transmit_tensor_fusion(fusion_reduce)
def add_to_transmission_queue(self, num_packets, layer, iteration):
for i in range(num_packets):
p = Packet(iteration, layer, i, self.config.packet_size_MB)
# if self.FIFO_set:
if self.config.qdisc == SchedulingDisc.FIFO:
self.logger.debug(f'self.FIFO_set: add packets to transmission queue')
self.transmission_queue.append(p)
elif self.config.qdisc == SchedulingDisc.PerfectPQ:
# self.logger.debug(f'PerfectPQ: add packets to transmission queue')
heapq.heappush(self.PerfectPQ_transmission_queue, p)
else:
self.logger.error(f'Packets are not being added to the transmission queue')
def run(self):
# enque all FP events for the first iteration where there is no blocking
self.curr_time = 0
self.record["Start FP"].append(Event("Start FP", self.curr_time, self.curr_time))
self.enque_FP(self.curr_time, 0)
''' main event loop '''
while self.event_queue:
event = heapq.heappop(self.event_queue)
timestamp, layer, iteration = event.time, event.layer, event.iteration
self.record[event.name].append(event)
self.logger.debug(f'event: {event}')
self.curr_time = timestamp
if event.name == "FP_computation_done":
# if self.PerfectPQ_set:
if self.config.iteration_barrier == False:
if self.config.qdisc == SchedulingDisc.PerfectPQ or self.config.qdisc == SchedulingDisc.RingAllReducePQ:
if iteration != 0: # all FP events have been pushed for iteration 0
# 2nd iteration onwards
# restore previous FP compute status to not ready for next iteration
if layer != 0: # first layer is execluded because it's always ready to compute once gradients are received
self.previous_FP_layer_status[layer] = False
if layer < self.config.num_layers-1: # unblock the compute for next FP layer
self.logger.debug(f"FP layer {layer} done, check if gradients received for {layer+1}")
self.previous_FP_layer_status[layer+1] = True
if self.gradient_received[layer+1]:
self.logger.debug(f"gradient_received[{layer+1}]: {self.gradient_received[layer+1]}")
next_event = Compute_Event(self.fp_layers[layer+1] + self.curr_time, self.curr_time, "FP", layer+1, iteration, "done")
heapq.heappush(self.event_queue, next_event)
# heapq.heappush(self.event_queue, [self.fp_layers[layer+1] + self.curr_time, "FP_computation_done", layer+1, iteration])
self.gradient_received[layer] = False
# no need to handle self.FIFO_set case cause all FP events have been pushed once at the start of the new iteration
if layer == self.config.num_layers - 1: #last layer
# self.record.append([self.curr_time, "Start BP"])
self.record["Start BP"].append(Event("Start BP", self.curr_time, self.curr_time))
next_event = Compute_Event(self.bp_layers[layer]+self.curr_time, self.curr_time,"BP", layer, iteration, "done")
heapq.heappush(self.event_queue, next_event)
# heapq.heappush(self.event_queue,[self.bp_layers[layer]+self.curr_time,"BP_computation_done", layer, iteration] )
elif (event.name == "BP_computation_done"):
# ready to send gradient
# look up which reduce operation this layer belongs to
if self.config.qdisc == SchedulingDisc.RingAllReducePQ or self.config.qdisc == SchedulingDisc.RingAllReduceFIFO:
if iteration == self.config.TotalIteration - 1 :
self.logger.debug(f'break out of while loop : iteration: {iteration}')
# exit while loops
break
fusion_reduce = self.ringallreduce.fusion_reduce_lookup[layer]
fusion_reduce.iteration = iteration
self.logger.debug(f"BP computation done for ring all reduce {fusion_reduce.priority} layer {layer}")
fusion_reduce.set_gradient_available(layer)
# if all tensors in the reduce operation are computed, move it to allreduce priority queue
if fusion_reduce.ready_to_be_sent():
if self.config.qdisc == SchedulingDisc.RingAllReducePQ:
self.ringallreduce_pq.put(fusion_reduce)
else:
self.ringallreduce_fifo.append(fusion_reduce)
fusion_reduce.clear_compute_status()
elif self.config.qdisc == SchedulingDisc.PerfectPQ or self.config.qdisc == SchedulingDisc.FIFO:
num_packets = self.layer_size_in_packets[layer]
self.add_to_transmission_queue(num_packets, layer, iteration)
if not self.InTransit: # nothing is being transimitted
self.transmit_tensor()
# start BP for next layer
if layer > 0:
self.logger.debug(f"Debug: add next BP layer to the queue: {self.bp_layers[layer-1]+self.curr_time}")
next_event = Compute_Event(self.bp_layers[layer-1]+self.curr_time, self.curr_time, "BP", layer-1, iteration, "done")
heapq.heappush(self.event_queue, next_event)
elif event.name == "Tensor_transimission_done":
self.InTransit = False
self.transmit_tensor()
elif event.name == "RingAllReduce_done":
self.InTransit = False
# set gradients received for layers included in the ringallreduce to ready
received_layers = []
for tensor in self.ringallreduce.reducelists[event.priority].tensors:
self.logger.debug(f"RingAllReduce done: set gradient received [{tensor.layer}]")
self.gradient_received[tensor.layer] = True
received_layers.append(tensor.layer)
received_layers.sort()
# only need to kickstart the lowest layer of FP in the received fusion, the rest will be evoked once the lower FP is done
layer = received_layers[0]
if self.config.iteration_barrier == True:
if sum(self.gradient_received.values()) == self.config.num_layers: # all gradients have received
self.logger.debug(f'{self.curr_time},Start FP computation in new iteration in RingAllReduce mode,{iteration}')
self.record["Start FP computation in new iteration in RingAllReduce mode"].append(Event("Start FP computation in new iteration in RingAllReduce mode", self.curr_time, self.curr_time))
self.enque_FP(self.curr_time, iteration+1)
else:
self.logger.debug(f'Have not received all gradients')
else: # start FP whenever previous FP layer has finished computation and gradients have been received and updated this layer
if self.previous_FP_layer_status[layer]:
self.logger.debug(f"start FP layer computation: {layer}")
compute_time = self.fp_layers[layer]
if layer == 0:
self.logger.debug(f'{self.curr_time},Start FP computation in new iteration in Perfect PQ mode,{iteration}')
self.record["Start FP computation in new iteration in RingAllReduce mode"].append(Event("Start FP computation in new iteration in RingAllReduce mode", self.curr_time,self.curr_time))
next_event = Compute_Event(compute_time+self.curr_time, self.curr_time,"FP", layer, iteration + 1, "done")
heapq.heappush(self.event_queue, next_event)
self.transmit_tensor()
elif event.name == "Gradients_received":
self.gradient_received[layer] = True
# Barrier between each iteration, current implementation
if iteration == self.config.TotalIteration:
self.logger.debug(f'break out of while loop : iteration: {iteration}')
# exit while loops
break
if self.config.iteration_barrier == True:
if sum(self.gradient_received.values()) == self.config.num_layers: # all gradients have received
self.logger.debug(f'{self.curr_time},Start FP computation in new iteration in FIFO mode,{iteration}')
self.record["Start FP computation in new iteration in FIFO mode"].append(Event("Start FP computation in new iteration in FIFO mode", self.curr_time, self.curr_time))
self.enque_FP(self.curr_time, iteration)
else:
self.logger.debug(f'Have not received all gradients')
else: # start FP whenever previous FP layer has finished computation and gradients have been received and updated this layer
self.logger.debug(f'self.previous_FP_layer_status[{layer}]: {self.previous_FP_layer_status[layer]}')
if self.previous_FP_layer_status[layer]:
# start computation of FP layer
self.logger.debug(f"start FP layer computation: {layer}")
compute_time = self.fp_layers[layer]
if layer == 0:
self.logger.debug(f'{self.curr_time},Start FP computation in new iteration in Perfect PQ mode,{iteration}')
self.record["Start FP computation in new iteration in Perfect PQ mode"].append(Event("Start FP computation in new iteration in Perfect PQ mode", self.curr_time,self.curr_time))
next_event = Compute_Event(compute_time+self.curr_time, self.curr_time,"FP", layer, iteration, "done")
heapq.heappush(self.event_queue, next_event)
# heapq.heappush(self.event_queue, [compute_time+self.curr_time, "FP_computation_done", layer, iteration])
else:
self.logger.error(f"Error: Non-existing Event: {event}")
break
# self.logger.debug(self.record)
# compute iteration time from records
def compute_iteration_time(record, simulator):
logger = get_logger("compute_iteration_time", "DEBUG")
iteration_time_ms = 0
iteration_start_time = 0
for event in record["FP_computation_done"]:
if event.layer == simulator.config.num_layers -1:
if event.iteration == 0:
iteration_start_time = event.time
if event.iteration == 1:
iteration_time_ms = event.time - iteration_start_time
break
logger.debug(f'iteration_time_ms: {iteration_time_ms}')
return iteration_time_ms
def compute_slack_time_FIFO(record, simulator):
'''
compute slack per layer for FIFO
Time difference between when gradients are computed to when gradients are needed
Gradients computed timestamp @ layer i = BP computation time done @ layer i
Gradients consumed timestamp @ layer i = FP computation start @ layer i
= FP computation done @ layer i - FP computation duration @ layer i
'''
logger = get_logger("compute_slack_time_FIFO", "DEBUG")
slack_per_layer_in_ms = {layer: 0 for layer in range(simulator.config.num_layers)}
BP_computation_done_timestamp = {layer: 0 for layer in range(simulator.config.num_layers)}
for event in record["BP_computation_done"]:
if event.iteration == 0:
BP_computation_done_timestamp[event.layer] = event.time
for event in record["FP_computation_done"]:
if event.iteration == 1:
# print(f'layer: {event.layer}, FP_computation_done, {event.time}, fp_layers, {fp_layers[event.layer]}, BP compute done: { BP_computation_done_timestamp[event.layer]}')
slack_per_layer_in_ms[event.layer] = event.time - simulator.fp_layers[event.layer] - BP_computation_done_timestamp[event.layer]
logger.debug(f'slack_per_layer_in_ms: {slack_per_layer_in_ms}')
return slack_per_layer_in_ms
def compute_iteration_and_slack(record, simulator):
compute_iteration_time(record, simulator)
compute_slack_time_FIFO(record, simulator)
def test_run(config):
horovod_simulator = HorovodSimulator(config)
horovod_simulator.run()
compute_iteration_and_slack(horovod_simulator.record, horovod_simulator)
if __name__ == "__main__":
def test1():
test_FIFO_s = SimulatorConfig(**{"num_layers":10, "propagation_delay_ms":5})
horovod_simulator = HorovodSimulator(test_FIFO_s)
horovod_simulator.run()
compute_iteration_and_slack(horovod_simulator.record, horovod_simulator)
def test2():
test_PerfectPQ_s = SimulatorConfig(**{"iteration_barrier": False, "qdisc": SchedulingDisc.PerfectPQ, "num_layers":10, "propagation_delay_ms":5})
horovod_simulator = HorovodSimulator(test_PerfectPQ_s)
horovod_simulator.run()
compute_iteration_and_slack(horovod_simulator.record, horovod_simulator)
def test3():
network_bd = 50
test_FIFO_s = SimulatorConfig(**{"qidsc": SchedulingDisc.FIFO, "transmission_rate_Gbit_per_sec": network_bd})
horovod_simulator = HorovodSimulator(test_FIFO_s)
horovod_simulator.run()
compute_iteration_and_slack(horovod_simulator.record, horovod_simulator)
def test4():
network_bd = 50
test_PerfectPQ_s = SimulatorConfig(**{"iteration_barrier": False, "qdisc": SchedulingDisc.PerfectPQ, "transmission_rate_Gbit_per_sec": network_bd })
horovod_simulator = HorovodSimulator(test_PerfectPQ_s)
horovod_simulator.run()
compute_iteration_and_slack(horovod_simulator.record, horovod_simulator)
def test_ring_allreduce_pq():
config = SimulatorConfig(**{"iteration_barrier": False, "qdisc": SchedulingDisc.RingAllReducePQ,"num_layers":10, "propagation_delay_ms":5})
test_run(config)
def test_ring_allreduce_fifo():
# fifo explicitly has iteration barrier in place
config = SimulatorConfig(**{"iteration_barrier": True, "qdisc": SchedulingDisc.RingAllReduceFIFO,"num_layers":10, "propagation_delay_ms":5})
test_run(config)
# test1()
test_ring_allreduce_fifo()
| 55.313099 | 210 | 0.6591 | 30,188 | 0.87183 | 0 | 0 | 0 | 0 | 0 | 0 | 10,040 | 0.289956 |
7daef8b7f43d19ad4b4a4241d53911344a3bad74 | 675 | py | Python | ABNOOrchestrator/ABNOParameters.py | HPNLAB/ABNO-FUTEBOL | 3a1dbee11abd9a808d337a6bbdccba052671d33c | [
"Apache-2.0"
]
| null | null | null | ABNOOrchestrator/ABNOParameters.py | HPNLAB/ABNO-FUTEBOL | 3a1dbee11abd9a808d337a6bbdccba052671d33c | [
"Apache-2.0"
]
| null | null | null | ABNOOrchestrator/ABNOParameters.py | HPNLAB/ABNO-FUTEBOL | 3a1dbee11abd9a808d337a6bbdccba052671d33c | [
"Apache-2.0"
]
| null | null | null | __author__ = 'alejandroaguado'
from xml.etree import ElementTree
class ABNOParameters:
def __init__(self, filename):
self.document = ElementTree.parse(filename)
root = self.document.getroot()
tag = self.document.find('abnoconfig')
self.address=tag.attrib['address']
self.port = int(tag.attrib['port'])
tag = self.document.find('pceconfig')
self.pceaddress = tag.attrib['address']
self.pceport = int(tag.attrib['port'])
tag = self.document.find('pmconfig')
self.pmaddress = tag.attrib['address']
self.pmport = int(tag.attrib['port'])
#tag = self.document.find('properties') | 35.526316 | 51 | 0.638519 | 607 | 0.899259 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.198519 |
7daf7e347025b6adafd5d8ff1bdd20e4296b68c6 | 15,533 | py | Python | gcloud/bigtable/_generated/bigtable_table_service_messages_pb2.py | scrapinghub/gcloud-python | 1ec6d636ebf2c4d618aca6b2485fbbfa5f0fde29 | [
"Apache-2.0"
]
| null | null | null | gcloud/bigtable/_generated/bigtable_table_service_messages_pb2.py | scrapinghub/gcloud-python | 1ec6d636ebf2c4d618aca6b2485fbbfa5f0fde29 | [
"Apache-2.0"
]
| null | null | null | gcloud/bigtable/_generated/bigtable_table_service_messages_pb2.py | scrapinghub/gcloud-python | 1ec6d636ebf2c4d618aca6b2485fbbfa5f0fde29 | [
"Apache-2.0"
]
| 2 | 2017-07-30T16:18:23.000Z | 2020-10-14T11:24:18.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/table/v1/bigtable_table_service_messages.proto',
package='google.bigtable.admin.table.v1',
syntax='proto3',
serialized_pb=b'\nDgoogle/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\"\x86\x01\n\x12\x43reateTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x34\n\x05table\x18\x03 \x01(\x0b\x32%.google.bigtable.admin.table.v1.Table\x12\x1a\n\x12initial_split_keys\x18\x04 \x03(\t\"!\n\x11ListTablesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"K\n\x12ListTablesResponse\x12\x35\n\x06tables\x18\x01 \x03(\x0b\x32%.google.bigtable.admin.table.v1.Table\"\x1f\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"2\n\x12RenameTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06new_id\x18\x02 \x01(\t\"\x88\x01\n\x19\x43reateColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_family_id\x18\x02 \x01(\t\x12\x43\n\rcolumn_family\x18\x03 \x01(\x0b\x32,.google.bigtable.admin.table.v1.ColumnFamily\")\n\x19\x44\x65leteColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\tBI\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CREATETABLEREQUEST = _descriptor.Descriptor(
name='CreateTableRequest',
full_name='google.bigtable.admin.table.v1.CreateTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.CreateTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='table_id', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='table', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='initial_split_keys', full_name='google.bigtable.admin.table.v1.CreateTableRequest.initial_split_keys', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=297,
)
_LISTTABLESREQUEST = _descriptor.Descriptor(
name='ListTablesRequest',
full_name='google.bigtable.admin.table.v1.ListTablesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.ListTablesRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=299,
serialized_end=332,
)
_LISTTABLESRESPONSE = _descriptor.Descriptor(
name='ListTablesResponse',
full_name='google.bigtable.admin.table.v1.ListTablesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tables', full_name='google.bigtable.admin.table.v1.ListTablesResponse.tables', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=334,
serialized_end=409,
)
_GETTABLEREQUEST = _descriptor.Descriptor(
name='GetTableRequest',
full_name='google.bigtable.admin.table.v1.GetTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.GetTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=411,
serialized_end=442,
)
_DELETETABLEREQUEST = _descriptor.Descriptor(
name='DeleteTableRequest',
full_name='google.bigtable.admin.table.v1.DeleteTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.DeleteTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=444,
serialized_end=478,
)
_RENAMETABLEREQUEST = _descriptor.Descriptor(
name='RenameTableRequest',
full_name='google.bigtable.admin.table.v1.RenameTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.RenameTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_id', full_name='google.bigtable.admin.table.v1.RenameTableRequest.new_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=480,
serialized_end=530,
)
_CREATECOLUMNFAMILYREQUEST = _descriptor.Descriptor(
name='CreateColumnFamilyRequest',
full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_family_id', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_family', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=533,
serialized_end=669,
)
_DELETECOLUMNFAMILYREQUEST = _descriptor.Descriptor(
name='DeleteColumnFamilyRequest',
full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=671,
serialized_end=712,
)
_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE
_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE
_CREATECOLUMNFAMILYREQUEST.fields_by_name['column_family'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._COLUMNFAMILY
DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST
DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST
DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE
DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST
DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST
DESCRIPTOR.message_types_by_name['RenameTableRequest'] = _RENAMETABLEREQUEST
DESCRIPTOR.message_types_by_name['CreateColumnFamilyRequest'] = _CREATECOLUMNFAMILYREQUEST
DESCRIPTOR.message_types_by_name['DeleteColumnFamilyRequest'] = _DELETECOLUMNFAMILYREQUEST
CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATETABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateTableRequest)
))
_sym_db.RegisterMessage(CreateTableRequest)
ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTTABLESREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesRequest)
))
_sym_db.RegisterMessage(ListTablesRequest)
ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTTABLESRESPONSE,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesResponse)
))
_sym_db.RegisterMessage(ListTablesResponse)
GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict(
DESCRIPTOR = _GETTABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GetTableRequest)
))
_sym_db.RegisterMessage(GetTableRequest)
DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETETABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteTableRequest)
))
_sym_db.RegisterMessage(DeleteTableRequest)
RenameTableRequest = _reflection.GeneratedProtocolMessageType('RenameTableRequest', (_message.Message,), dict(
DESCRIPTOR = _RENAMETABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.RenameTableRequest)
))
_sym_db.RegisterMessage(RenameTableRequest)
CreateColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('CreateColumnFamilyRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATECOLUMNFAMILYREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateColumnFamilyRequest)
))
_sym_db.RegisterMessage(CreateColumnFamilyRequest)
DeleteColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('DeleteColumnFamilyRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETECOLUMNFAMILYREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteColumnFamilyRequest)
))
_sym_db.RegisterMessage(DeleteColumnFamilyRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.early_adopter import implementations as early_adopter_implementations
from grpc.framework.alpha import utilities as alpha_utilities
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
| 39.224747 | 1,208 | 0.771905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,962 | 0.319449 |
7dafc11fd8fb86ab44db99cb63fe8f3a5c118843 | 277 | py | Python | influencer-detection/src/api/influencers/api/v1.py | luisblazquezm/influencer-detection | bd8aec83cbd8e5fbb3231824b5e274c47f491501 | [
"Apache-2.0"
]
| 4 | 2021-05-22T16:33:41.000Z | 2021-11-22T23:44:40.000Z | influencer-detection/src/api/influencers/api/v1.py | Alburrito/influencer-detection | bd8aec83cbd8e5fbb3231824b5e274c47f491501 | [
"Apache-2.0"
]
| null | null | null | influencer-detection/src/api/influencers/api/v1.py | Alburrito/influencer-detection | bd8aec83cbd8e5fbb3231824b5e274c47f491501 | [
"Apache-2.0"
]
| 2 | 2021-05-21T16:34:14.000Z | 2021-09-29T12:59:49.000Z | #!flask/bin/python
# Copyright 2021 Luis Blazquez Miñambres (@luisblazquezm)
# See LICENSE for details.
from flask_restx import Api
api = Api(version='1.0',
title='Influencer Detection Project',
description="**PORBI Influencer Detection project's Flask RESTX API**") | 27.7 | 75 | 0.747292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.701439 |
7db08f8c76f09b843e5f713087e6a5fa445f6755 | 7,543 | py | Python | core/models/sparse_bp_cnn.py | JeremieMelo/L2ight | 67f93b66ddf8bb5a365834b84ed6acdbc4f48eaf | [
"MIT"
]
| 7 | 2021-11-02T16:21:47.000Z | 2022-03-09T06:01:25.000Z | core/models/sparse_bp_cnn.py | JeremieMelo/L2ight | 67f93b66ddf8bb5a365834b84ed6acdbc4f48eaf | [
"MIT"
]
| null | null | null | core/models/sparse_bp_cnn.py | JeremieMelo/L2ight | 67f93b66ddf8bb5a365834b84ed6acdbc4f48eaf | [
"MIT"
]
| null | null | null | '''
Description:
Author: Jiaqi Gu ([email protected])
Date: 2021-10-24 16:23:50
LastEditors: Jiaqi Gu ([email protected])
LastEditTime: 2021-10-24 16:23:50
'''
from collections import OrderedDict
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from pyutils.general import logger
from torch import Tensor, nn
from torch.types import Device, _size
from .layers.activation import ReLUN
from .layers.custom_conv2d import MZIBlockConv2d
from .layers.custom_linear import MZIBlockLinear
from .sparse_bp_base import SparseBP_Base
__all__ = ["SparseBP_MZI_CNN"]
class ConvBlock(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
kernel_size: int = 3,
miniblock: int = 8,
bias: bool = False,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
act_thres: int = 6,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.conv = MZIBlockConv2d(
in_channel,
out_channel,
kernel_size,
miniblock,
bias,
stride,
padding,
mode=mode,
v_max=v_max,
v_pi=v_pi,
w_bit=w_bit,
in_bit=in_bit,
photodetect=photodetect,
device=device,
)
self.bn = nn.BatchNorm2d(out_channel)
self.activation = ReLUN(act_thres, inplace=True)
def forward(self, x: Tensor) -> Tensor:
return self.activation(self.bn(self.conv(x)))
class LinearBlock(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
miniblock: int = 8,
bias: bool = False,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
activation: bool = True,
act_thres: int = 6,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.linear = MZIBlockLinear(
in_channel, out_channel, miniblock, bias, mode, v_max, v_pi, w_bit, in_bit, photodetect, device
)
self.activation = ReLUN(act_thres, inplace=True) if activation else None
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.activation is not None:
x = self.activation(x)
return x
class SparseBP_MZI_CNN(SparseBP_Base):
"""MZI CNN (Shen+, Nature Photonics 2017). Support sparse backpropagation. Blocking matrix multiplication."""
def __init__(
self,
img_height: int,
img_width: int,
in_channel: int,
n_class: int,
kernel_list: List[int] = [32],
kernel_size_list: List[int] = [3],
pool_out_size: int = 5,
stride_list=[1],
padding_list=[1],
hidden_list: List[int] = [32],
block_list: List[int] = [8],
in_bit: int = 32,
w_bit: int = 32,
mode: str = "usv",
v_max: float = 10.8,
v_pi: float = 4.36,
act_thres: float = 6.0,
photodetect: bool = True,
bias: bool = False,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.img_height = img_height
self.img_width = img_width
self.in_channel = in_channel
self.n_class = n_class
self.kernel_list = kernel_list
self.kernel_size_list = kernel_size_list
self.stride_list = stride_list
self.padding_list = padding_list
self.pool_out_size = pool_out_size
self.hidden_list = hidden_list
self.block_list = block_list
self.in_bit = in_bit
self.w_bit = w_bit
self.mode = mode
self.v_max = v_max
self.v_pi = v_pi
self.act_thres = act_thres
self.photodetect = photodetect
self.bias = bias
self.device = device
self.build_layers()
self.drop_masks = None
self.reset_parameters()
self.gamma_noise_std = 0
self.crosstalk_factor = 0
def build_layers(self):
self.features = OrderedDict()
for idx, out_channel in enumerate(self.kernel_list, 0):
layer_name = "conv" + str(idx + 1)
in_channel = self.in_channel if (idx == 0) else self.kernel_list[idx - 1]
self.features[layer_name] = ConvBlock(
in_channel,
out_channel,
self.kernel_size_list[idx],
self.block_list[idx],
self.bias,
self.stride_list[idx],
self.padding_list[idx],
self.mode,
self.v_max,
self.v_pi,
self.w_bit,
self.in_bit,
self.photodetect,
self.act_thres,
self.device,
)
self.features = nn.Sequential(self.features)
if self.pool_out_size > 0:
self.pool2d = nn.AdaptiveAvgPool2d(self.pool_out_size)
feature_size = self.kernel_list[-1] * self.pool_out_size * self.pool_out_size
else:
self.pool2d = None
img_height, img_width = self.img_height, self.img_width
for layer in self.modules():
if isinstance(layer, MZIBlockConv2d):
img_height, img_width = layer.get_output_dim(img_height, img_width)
feature_size = img_height * img_width * self.kernel_list[-1]
self.classifier = OrderedDict()
for idx, hidden_dim in enumerate(self.hidden_list, 0):
layer_name = "fc" + str(idx + 1)
in_channel = feature_size if idx == 0 else self.hidden_list[idx - 1]
out_channel = hidden_dim
self.classifier[layer_name] = LinearBlock(
in_channel,
out_channel,
miniblock=self.block_list[idx + len(self.kernel_list)],
bias=self.bias,
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
activation=True,
act_thres=self.act_thres,
device=self.device,
)
layer_name = "fc" + str(len(self.hidden_list) + 1)
self.classifier[layer_name] = MZIBlockLinear(
self.hidden_list[-1] if len(self.hidden_list) > 0 else feature_size,
self.n_class,
miniblock=self.block_list[-1],
bias=self.bias,
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=self.device,
)
self.classifier = nn.Sequential(self.classifier)
def forward(self, x: Tensor) -> Tensor:
x = self.features(x)
if self.pool2d is not None:
x = self.pool2d(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
| 31.298755 | 113 | 0.56158 | 6,901 | 0.914888 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.058465 |
7db09573c7b7fdf192db6f472bcb81e9222dc388 | 9,017 | py | Python | lite/demo/python/mobilenetv1_full_api.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
]
| 3 | 2021-06-17T11:00:13.000Z | 2021-08-10T10:28:59.000Z | lite/demo/python/mobilenetv1_full_api.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
]
| null | null | null | lite/demo/python/mobilenetv1_full_api.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Paddle-Lite full python api demo
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from paddlelite.lite import *
import numpy as np
import platform
# Command arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", default="", type=str, help="Non-combined Model dir path")
parser.add_argument("--model_file", default="", type=str, help="Model file")
parser.add_argument(
"--param_file", default="", type=str, help="Combined model param file")
parser.add_argument(
"--input_shape",
default=[1, 3, 224, 224],
nargs='+',
type=int,
required=False,
help="Model input shape, eg: 1 3 224 224. Defalut: 1 3 224 224")
parser.add_argument(
"--backend",
default="",
type=str,
help="To use a particular backend for execution. Should be one of: arm|opencl|x86|x86_opencl|metal|nnadapter"
)
parser.add_argument(
"--image_path", default="", type=str, help="The path of test image file")
parser.add_argument(
"--label_path", default="", type=str, help="The path of label file")
parser.add_argument(
"--print_results",
type=bool,
default=False,
help="Print results. Default: False")
parser.add_argument(
"--nnadapter_device_names",
default="",
type=str,
help="Set nnadapter device names")
parser.add_argument(
"--nnadapter_context_properties",
default="",
type=str,
help="Set nnadapter context properties")
parser.add_argument(
"--nnadapter_model_cache_dir",
default="",
type=str,
help="Set nnadapter model cache dir")
parser.add_argument(
"--nnadapter_subgraph_partition_config_path",
default="",
type=str,
help="Set nnadapter subgraph partition config path")
parser.add_argument(
"--nnadapter_mixed_precision_quantization_config_path",
default="",
type=str,
help="Set nnadapter mixed precision quantization config path")
def RunModel(args):
# 1. Set config information
config = CxxConfig()
if args.model_file != '' and args.param_file != '':
config.set_model_file(args.model_file)
config.set_param_file(args.param_file)
else:
config.set_model_dir(args.model_dir)
if platform.machine() in ["x86_64", "x64", "AMD64"]:
platform_place = Place(TargetType.X86, PrecisionType.FP32)
else:
platform_place = Place(TargetType.ARM, PrecisionType.FP32)
if args.backend.upper() in ["ARM"]:
places = [Place(TargetType.ARM, PrecisionType.FP32)]
elif args.backend.upper() in ["X86"]:
places = [Place(TargetType.X86, PrecisionType.FP32)]
elif args.backend.upper() in ["OPENCL", "X86_OPENCL"]:
places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
platform_place, Place(TargetType.Host, PrecisionType.FP32)
]
'''
Set opencl kernel binary.
Large addtitional prepare time is cost due to algorithm selecting and
building kernel from source code.
Prepare time can be reduced dramitically after building algorithm file
and OpenCL kernel binary on the first running.
The 1st running time will be a bit longer due to the compiling time if
you don't call `set_opencl_binary_path_name` explicitly.
So call `set_opencl_binary_path_name` explicitly is strongly
recommended.
Make sure you have write permission of the binary path.
We strongly recommend each model has a unique binary name.
'''
bin_path = "./"
bin_name = "lite_opencl_kernel.bin"
config.set_opencl_binary_path_name(bin_path, bin_name)
'''
opencl tune option:
CL_TUNE_NONE
CL_TUNE_RAPID
CL_TUNE_NORMAL
CL_TUNE_EXHAUSTIVE
'''
tuned_path = "./"
tuned_name = "lite_opencl_tuned.bin"
config.set_opencl_tune(CLTuneMode.CL_TUNE_NORMAL, tuned_path,
tuned_name, 4)
'''
opencl precision option:
CL_PRECISION_AUTO, first fp16 if valid, default
CL_PRECISION_FP32, force fp32
CL_PRECISION_FP16, force fp16
'''
config.set_opencl_precision(CLPrecisionType.CL_PRECISION_AUTO)
elif args.backend.upper() in ["METAL"]:
# set metallib path
import paddlelite, os
module_path = os.path.dirname(paddlelite.__file__)
config.set_metal_lib_path(module_path + "/libs/lite.metallib")
config.set_metal_use_mps(True)
# set places for Metal
places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray), platform_place,
Place(TargetType.Host, PrecisionType.FP32)
]
elif args.backend.upper() in ["NNADAPTER"]:
places = [
Place(TargetType.NNAdapter, PrecisionType.FP32), platform_place,
Place(TargetType.Host, PrecisionType.FP32)
]
if args.nnadapter_device_names == "":
print(
"Please set nnadapter_device_names when backend = nnadapter!")
return
config.set_nnadapter_device_names(
args.nnadapter_device_names.split(","))
config.set_nnadapter_context_properties(
args.nnadapter_context_properties)
config.set_nnadapter_model_cache_dir(args.nnadapter_model_cache_dir)
config.set_nnadapter_subgraph_partition_config_path(
args.nnadapter_subgraph_partition_config_path)
config.set_nnadapter_mixed_precision_quantization_config_path(
args.nnadapter_mixed_precision_quantization_config_path)
else:
raise ValueError("Unsupported backend: %s." % args.backend)
config.set_valid_places(places)
# 2. Create paddle predictor
predictor = create_paddle_predictor(config)
optimized_model_dir = "opt_" + args.backend
predictor.save_optimized_model(optimized_model_dir)
# 3. Set input data
input_tensor = predictor.get_input(0)
c, h, w = args.input_shape[1], args.input_shape[2], args.input_shape[3]
read_image = len(args.image_path) != 0 and len(args.label_path) != 0
if read_image == True:
import cv2
with open(args.label_path, "r") as f:
label_list = f.readlines()
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
image_data = cv2.imread(args.image_path)
image_data = cv2.resize(image_data, (h, w))
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
image_data = image_data.transpose((2, 0, 1)) / 255.0
image_data = (image_data - np.array(image_mean).reshape(
(3, 1, 1))) / np.array(image_std).reshape((3, 1, 1))
image_data = image_data.reshape([1, c, h, w]).astype('float32')
input_tensor.from_numpy(image_data)
else:
input_tensor.from_numpy(np.ones((1, c, h, w)).astype("float32"))
# 4. Run model
predictor.run()
# 5. Get output data
output_tensor = predictor.get_output(0)
output_data = output_tensor.numpy()
if args.print_results == True:
print("result data:\n{}".format(output_data))
print("mean:{:.6e}, std:{:.6e}, min:{:.6e}, max:{:.6e}".format(
np.mean(output_data),
np.std(output_data), np.min(output_data), np.max(output_data)))
# 6. Post-process
if read_image == True:
output_data = output_data.flatten()
class_id = np.argmax(output_data)
class_name = label_list[class_id]
score = output_data[class_id]
print("class_name: {} score: {}".format(class_name, score))
if __name__ == '__main__':
args = parser.parse_args()
RunModel(args)
| 38.046414 | 113 | 0.663303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,008 | 0.333592 |
7db0ea31fec60827935b4f0a8b82bcb2927b7b8f | 2,218 | py | Python | discord_ui/errors.py | brotherelric/discord-ui | 6f8cd994aff66fb51f385d23907b3a612546cbe9 | [
"MIT"
]
| 26 | 2021-08-21T15:34:13.000Z | 2022-03-26T13:50:48.000Z | discord_ui/errors.py | brotherelric/discord-ui | 6f8cd994aff66fb51f385d23907b3a612546cbe9 | [
"MIT"
]
| 12 | 2021-08-21T15:24:00.000Z | 2022-02-25T02:49:01.000Z | discord_ui/errors.py | brotherelric/discord-ui | 6f8cd994aff66fb51f385d23907b3a612546cbe9 | [
"MIT"
]
| 10 | 2021-09-03T12:31:13.000Z | 2022-03-29T06:24:06.000Z | from discord.ext.commands import BadArgument
class InvalidLength(BadArgument):
"""This exception is thrown whenever a invalid length was provided"""
def __init__(self, my_name, _min=None, _max=None, *args: object) -> None:
if _min is not None and _max is not None:
err = "Length of '" + my_name + "' must be between " + str(_min) + " and " + str(_max)
elif _min is None and _max is not None:
err = "Length of '" + my_name + "' must be less than " + str(_max)
elif _min is not None and _max is None:
err = "Lenght of '" + my_name + "' must be more than " + str(_min)
super().__init__(err)
class OutOfValidRange(BadArgument):
"""This exception is thrown whenever a value was ot of its valid range"""
def __init__(self, name, _min, _max, *args: object) -> None:
super().__init__("'" + name + "' must be in range " + str(_min) + " and " + str(_max))
class WrongType(BadArgument):
"""This exception is thrown whenever a value is of the wrong type"""
def __init__(self, name, me, valid_type, *args: object) -> None:
super().__init__("'" + name + "' must be of type " + (str(valid_type) if not isinstance(valid_type, list) else ' or '.join(valid_type)) + ", not " + str(type(me)))
class InvalidEvent(BadArgument):
"""This exception is thrown whenever a invalid eventname was passed"""
def __init__(self, name, events, *args: object) -> None:
super().__init__("Invalid event name, event must be " + " or ".join(events) + ", not " + str(name))
class MissingListenedComponentParameters(BadArgument):
"""This exception is thrown whenever a callback for a listening component is missing parameters"""
def __init__(self, *args: object) -> None:
super().__init__("Callback function for listening components needs to accept one parameter (the used component)", *args)
class CouldNotParse(BadArgument):
"""This exception is thrown whenever the libary was unable to parse the data with the given method"""
def __init__(self, data, type, method, *args: object) -> None:
super().__init__("Could not parse '" + str(data) + " [" + str(type) + "]' with method " + str(method), *args) | 69.3125 | 171 | 0.656898 | 2,167 | 0.977006 | 0 | 0 | 0 | 0 | 0 | 0 | 842 | 0.379621 |
7db12d202da616ad47115b53db18fc8f8d4df1a8 | 178 | py | Python | Python/bot_2.py | maurovasconcelos/Ola-Mundo | 526c6c271fbe916c4f9f22153828e4d8c726a544 | [
"MIT"
]
| 1 | 2021-02-16T17:36:53.000Z | 2021-02-16T17:36:53.000Z | Python/bot_2.py | maurovasconcelos/Ola-Mundo | 526c6c271fbe916c4f9f22153828e4d8c726a544 | [
"MIT"
]
| null | null | null | Python/bot_2.py | maurovasconcelos/Ola-Mundo | 526c6c271fbe916c4f9f22153828e4d8c726a544 | [
"MIT"
]
| null | null | null | from selenium import webdriver
navegador = webdriver.Chrome()
navegador.get("https://webstatic-sea.mihoyo.com/ys/event/signin-sea/index.html?act_id=e202102251931481&lang=pt-pt") | 44.5 | 115 | 0.808989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.561798 |
7db1db7e3a4320d312306a5a9f6265803704a70b | 18,177 | py | Python | vendor/istio.io/api/python/istio_api/envoy/config/filter/http/jwt_auth/v2alpha1/config_pb2.py | PinZhang/istio | dce455456d77ca5af34ba5848f9704577349c6bd | [
"Apache-2.0"
]
| 794 | 2018-07-12T06:08:10.000Z | 2019-05-12T20:06:00.000Z | vendor/istio.io/api/python/istio_api/envoy/config/filter/http/jwt_auth/v2alpha1/config_pb2.py | PinZhang/istio | dce455456d77ca5af34ba5848f9704577349c6bd | [
"Apache-2.0"
]
| 25 | 2018-07-20T08:53:42.000Z | 2019-05-05T06:23:11.000Z | vendor/istio.io/api/python/istio_api/envoy/config/filter/http/jwt_auth/v2alpha1/config_pb2.py | PinZhang/istio | dce455456d77ca5af34ba5848f9704577349c6bd | [
"Apache-2.0"
]
| 116 | 2018-07-12T15:24:17.000Z | 2019-05-10T10:11:32.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: envoy/config/filter/http/jwt_auth/v2alpha1/config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='envoy/config/filter/http/jwt_auth/v2alpha1/config.proto',
package='istio.envoy.config.filter.http.jwt_auth.v2alpha1',
syntax='proto3',
serialized_pb=_b('\n7envoy/config/filter/http/jwt_auth/v2alpha1/config.proto\x12\x30istio.envoy.config.filter.http.jwt_auth.v2alpha1\x1a\x1egoogle/protobuf/duration.proto\"k\n\x07HttpUri\x12\x0b\n\x03uri\x18\x01 \x01(\t\x12\x11\n\x07\x63luster\x18\x02 \x01(\tH\x00\x12*\n\x07timeout\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\x14\n\x12http_upstream_type\"^\n\nDataSource\x12\x12\n\x08\x66ilename\x18\x01 \x01(\tH\x00\x12\x16\n\x0cinline_bytes\x18\x02 \x01(\x0cH\x00\x12\x17\n\rinline_string\x18\x03 \x01(\tH\x00\x42\x0b\n\tspecifier\"\x87\x03\n\x07JwtRule\x12\x0e\n\x06issuer\x18\x01 \x01(\t\x12\x11\n\taudiences\x18\x02 \x03(\t\x12S\n\x0bremote_jwks\x18\x03 \x01(\x0b\x32<.istio.envoy.config.filter.http.jwt_auth.v2alpha1.RemoteJwksH\x00\x12R\n\nlocal_jwks\x18\x04 \x01(\x0b\x32<.istio.envoy.config.filter.http.jwt_auth.v2alpha1.DataSourceH\x00\x12\x0f\n\x07\x66orward\x18\x05 \x01(\x08\x12Q\n\x0c\x66rom_headers\x18\x06 \x03(\x0b\x32;.istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtHeader\x12\x13\n\x0b\x66rom_params\x18\x07 \x03(\t\x12\x1e\n\x16\x66orward_payload_header\x18\x08 \x01(\tB\x17\n\x15jwks_source_specifier\"\x8c\x01\n\nRemoteJwks\x12K\n\x08http_uri\x18\x01 \x01(\x0b\x32\x39.istio.envoy.config.filter.http.jwt_auth.v2alpha1.HttpUri\x12\x31\n\x0e\x63\x61\x63he_duration\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"/\n\tJwtHeader\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0cvalue_prefix\x18\x02 \x01(\t\"~\n\x11JwtAuthentication\x12H\n\x05rules\x18\x01 \x03(\x0b\x32\x39.istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule\x12\x1f\n\x17\x61llow_missing_or_failed\x18\x02 \x01(\x08\x42\x39Z7istio.io/api/envoy/config/filter/http/jwt_auth/v2alpha1b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,])
_HTTPURI = _descriptor.Descriptor(
name='HttpUri',
full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.HttpUri',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uri', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.HttpUri.uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.HttpUri.cluster', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timeout', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.HttpUri.timeout', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='http_upstream_type', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.HttpUri.http_upstream_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=141,
serialized_end=248,
)
_DATASOURCE = _descriptor.Descriptor(
name='DataSource',
full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.DataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='filename', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.DataSource.filename', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inline_bytes', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.DataSource.inline_bytes', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inline_string', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.DataSource.inline_string', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='specifier', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.DataSource.specifier',
index=0, containing_type=None, fields=[]),
],
serialized_start=250,
serialized_end=344,
)
_JWTRULE = _descriptor.Descriptor(
name='JwtRule',
full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='issuer', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule.issuer', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='audiences', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule.audiences', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remote_jwks', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule.remote_jwks', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='local_jwks', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule.local_jwks', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='forward', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule.forward', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='from_headers', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule.from_headers', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='from_params', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule.from_params', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='forward_payload_header', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule.forward_payload_header', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='jwks_source_specifier', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule.jwks_source_specifier',
index=0, containing_type=None, fields=[]),
],
serialized_start=347,
serialized_end=738,
)
_REMOTEJWKS = _descriptor.Descriptor(
name='RemoteJwks',
full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.RemoteJwks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='http_uri', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.RemoteJwks.http_uri', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cache_duration', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.RemoteJwks.cache_duration', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=741,
serialized_end=881,
)
_JWTHEADER = _descriptor.Descriptor(
name='JwtHeader',
full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtHeader.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value_prefix', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtHeader.value_prefix', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=883,
serialized_end=930,
)
_JWTAUTHENTICATION = _descriptor.Descriptor(
name='JwtAuthentication',
full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtAuthentication',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtAuthentication.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_missing_or_failed', full_name='istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtAuthentication.allow_missing_or_failed', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=932,
serialized_end=1058,
)
_HTTPURI.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_HTTPURI.oneofs_by_name['http_upstream_type'].fields.append(
_HTTPURI.fields_by_name['cluster'])
_HTTPURI.fields_by_name['cluster'].containing_oneof = _HTTPURI.oneofs_by_name['http_upstream_type']
_DATASOURCE.oneofs_by_name['specifier'].fields.append(
_DATASOURCE.fields_by_name['filename'])
_DATASOURCE.fields_by_name['filename'].containing_oneof = _DATASOURCE.oneofs_by_name['specifier']
_DATASOURCE.oneofs_by_name['specifier'].fields.append(
_DATASOURCE.fields_by_name['inline_bytes'])
_DATASOURCE.fields_by_name['inline_bytes'].containing_oneof = _DATASOURCE.oneofs_by_name['specifier']
_DATASOURCE.oneofs_by_name['specifier'].fields.append(
_DATASOURCE.fields_by_name['inline_string'])
_DATASOURCE.fields_by_name['inline_string'].containing_oneof = _DATASOURCE.oneofs_by_name['specifier']
_JWTRULE.fields_by_name['remote_jwks'].message_type = _REMOTEJWKS
_JWTRULE.fields_by_name['local_jwks'].message_type = _DATASOURCE
_JWTRULE.fields_by_name['from_headers'].message_type = _JWTHEADER
_JWTRULE.oneofs_by_name['jwks_source_specifier'].fields.append(
_JWTRULE.fields_by_name['remote_jwks'])
_JWTRULE.fields_by_name['remote_jwks'].containing_oneof = _JWTRULE.oneofs_by_name['jwks_source_specifier']
_JWTRULE.oneofs_by_name['jwks_source_specifier'].fields.append(
_JWTRULE.fields_by_name['local_jwks'])
_JWTRULE.fields_by_name['local_jwks'].containing_oneof = _JWTRULE.oneofs_by_name['jwks_source_specifier']
_REMOTEJWKS.fields_by_name['http_uri'].message_type = _HTTPURI
_REMOTEJWKS.fields_by_name['cache_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_JWTAUTHENTICATION.fields_by_name['rules'].message_type = _JWTRULE
DESCRIPTOR.message_types_by_name['HttpUri'] = _HTTPURI
DESCRIPTOR.message_types_by_name['DataSource'] = _DATASOURCE
DESCRIPTOR.message_types_by_name['JwtRule'] = _JWTRULE
DESCRIPTOR.message_types_by_name['RemoteJwks'] = _REMOTEJWKS
DESCRIPTOR.message_types_by_name['JwtHeader'] = _JWTHEADER
DESCRIPTOR.message_types_by_name['JwtAuthentication'] = _JWTAUTHENTICATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HttpUri = _reflection.GeneratedProtocolMessageType('HttpUri', (_message.Message,), dict(
DESCRIPTOR = _HTTPURI,
__module__ = 'envoy.config.filter.http.jwt_auth.v2alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.envoy.config.filter.http.jwt_auth.v2alpha1.HttpUri)
))
_sym_db.RegisterMessage(HttpUri)
DataSource = _reflection.GeneratedProtocolMessageType('DataSource', (_message.Message,), dict(
DESCRIPTOR = _DATASOURCE,
__module__ = 'envoy.config.filter.http.jwt_auth.v2alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.envoy.config.filter.http.jwt_auth.v2alpha1.DataSource)
))
_sym_db.RegisterMessage(DataSource)
JwtRule = _reflection.GeneratedProtocolMessageType('JwtRule', (_message.Message,), dict(
DESCRIPTOR = _JWTRULE,
__module__ = 'envoy.config.filter.http.jwt_auth.v2alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtRule)
))
_sym_db.RegisterMessage(JwtRule)
RemoteJwks = _reflection.GeneratedProtocolMessageType('RemoteJwks', (_message.Message,), dict(
DESCRIPTOR = _REMOTEJWKS,
__module__ = 'envoy.config.filter.http.jwt_auth.v2alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.envoy.config.filter.http.jwt_auth.v2alpha1.RemoteJwks)
))
_sym_db.RegisterMessage(RemoteJwks)
JwtHeader = _reflection.GeneratedProtocolMessageType('JwtHeader', (_message.Message,), dict(
DESCRIPTOR = _JWTHEADER,
__module__ = 'envoy.config.filter.http.jwt_auth.v2alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtHeader)
))
_sym_db.RegisterMessage(JwtHeader)
JwtAuthentication = _reflection.GeneratedProtocolMessageType('JwtAuthentication', (_message.Message,), dict(
DESCRIPTOR = _JWTAUTHENTICATION,
__module__ = 'envoy.config.filter.http.jwt_auth.v2alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.envoy.config.filter.http.jwt_auth.v2alpha1.JwtAuthentication)
))
_sym_db.RegisterMessage(JwtAuthentication)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z7istio.io/api/envoy/config/filter/http/jwt_auth/v2alpha1'))
# @@protoc_insertion_point(module_scope)
| 45.329177 | 1,704 | 0.760301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,066 | 0.333718 |
7db2d15a3db81041f88feba1273d33752a9d0183 | 1,730 | py | Python | filestream.py | ziyua/filestream | b79e9dc550d39c6bd5685eb0311f11d3a63537d9 | [
"Apache-2.0"
]
| null | null | null | filestream.py | ziyua/filestream | b79e9dc550d39c6bd5685eb0311f11d3a63537d9 | [
"Apache-2.0"
]
| null | null | null | filestream.py | ziyua/filestream | b79e9dc550d39c6bd5685eb0311f11d3a63537d9 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
# -*- coding: gb2312 -*-
import fileinput
import os
class FileStream:
def __init__(self, filename, cutsize=2048):
self.filename = filename
self.cutsize = cutsize # 2048 byte
self.size = os.path.getsize(self.filename)
self.file = fileinput.input(filename)
self.Buff = ''
self.fileStream = self._filestream()
def cuttimes(self):
if self.lastsize() == 0:
return self.size / self.cutsize
elif self.lastsize() >= 0:
return self.size / self.cutsize + 1
def lastsize(self):
return self.size % self.cutsize
def _bytestream(self):
for line in self.file:
for byte in line:
yield byte
def _filestream(self):
bytestream = self._bytestream()
for k in range(self.size):
byte = bytestream.next()
self.Buff += byte
if len(self.Buff) == self.cutsize:
data = self.Buff
self.Buff = ''
yield data
else:
if len(self.Buff) != 0:
data = self.Buff
self.Buff = ''
yield data
def getstream(self):
# have not more content, return <type 'None'>.
try:
content = self.fileStream.next()
except StopIteration:
self.file.close()
return
else:
return content
if __name__ == '__main__':
fs = FileStream('1.txt', 1024)
print fs.cuttimes()
print fs.lastsize()
while 1:
fby = fs.getstream()
if fby is not None:
print '--------'
print fby, len(fby)
else:
break
| 25.441176 | 54 | 0.514451 | 1,382 | 0.798844 | 553 | 0.319653 | 0 | 0 | 0 | 0 | 131 | 0.075723 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.