id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
3338560
|
from .main import get_y_pred_true
from .logging import save_train_val_figures
|
StarcoderdataPython
|
6654374
|
<filename>tracking/combined/webcam.py
from __future__ import absolute_import, division, print_function
import os
import sys
import glob
import argparse
import numpy as np
import PIL.Image as pil
import matplotlib as mpl
import matplotlib.cm as cm
import cv2
from torchvision import transforms, datasets
import networks
from utils import download_model_if_doesnt_exist
from timeit import time
import math
import torch
import requests
from imutils.video import FPS, WebcamVideoStream
from data import BaseTransform
from ssd import build_ssd
# Config
webcam_index = 1 # 0 for built-in webcam, 1 for external webcam (generally)
max_face_size = 220 # based on the actual_face_size of people very close to the camera
min_x = 300
max_x = 750 # basically the webcam frame width
DEPTH_MODEL_NAME = 'mono_1024x320'
print_fps = True # print FPS to stdout
show_webcam = True
show_depth_map = True
show_map = True # show a map of the people in window while running
map_width = 400
map_height = 400
min_distance = 0.05
max_distance = 0.13
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
FONT = cv2.FONT_HERSHEY_SIMPLEX
THRESHOLD = 0.25
def get_bounding_boxes(frame):
x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
x = torch.autograd.Variable(x.unsqueeze(0))
y = net(x) # forward pass
return y.data
def get_depth_map(frame):
input_image = transforms.ToTensor()(frame).unsqueeze(0)
# PREDICTION
input_image = input_image.to(device)
features = encoder(input_image)
outputs = depth_decoder(features)
disp = outputs[("disp", 0)]
disp_resized = torch.nn.functional.interpolate(
disp, (frame.shape[0], frame.shape[1]), mode="bilinear", align_corners=False)
return disp_resized.squeeze().cpu().numpy()
# Initialize SSD
net = build_ssd('test', 300, 21)
net.load_state_dict(torch.load('data/weights/ssd_300_VOC0712.pth'))
transform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0))
# Initialize Monodepth2
if torch.cuda.is_available():
device = torch.device("cuda")
print("GPU BOiii")
else:
device = torch.device("cpu")
download_model_if_doesnt_exist(DEPTH_MODEL_NAME)
model_path = os.path.join("models", DEPTH_MODEL_NAME)
print("-> Loading model from ", model_path)
encoder_path = os.path.join(model_path, "encoder.pth")
depth_decoder_path = os.path.join(model_path, "depth.pth")
# LOADING PRETRAINED MODEL
print(" Loading pretrained encoder")
encoder = networks.ResnetEncoder(18, False)
loaded_dict_enc = torch.load(encoder_path, map_location=device)
# extract the height and width of image that this model was trained with
feed_height = loaded_dict_enc['height']
feed_width = loaded_dict_enc['width']
filtered_dict_enc = {k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()}
encoder.load_state_dict(filtered_dict_enc)
encoder.to(device)
encoder.eval()
print(" Loading pretrained decoder")
depth_decoder = networks.DepthDecoder(
num_ch_enc=encoder.num_ch_enc, scales=range(4))
loaded_dict = torch.load(depth_decoder_path, map_location=device)
depth_decoder.load_state_dict(loaded_dict)
depth_decoder.to(device)
depth_decoder.eval()
video_capture = cv2.VideoCapture(webcam_index)
fps = 0.0
with torch.no_grad():
while True:
ret, frame = video_capture.read() # frame shape 640*480*3
if frame.shape[0] == 0:
break
t1 = time.time()
input_image = cv2.resize(frame, (feed_width, feed_height))
bboxes = get_bounding_boxes(input_image)
depth_map = get_depth_map(input_image)
bg = np.zeros((map_height, map_width, 3))
# scale each detection back up to the image
height, width = input_image.shape[:2]
scale = torch.Tensor([width, height, width, height])
# 15 is the index of the person class in the VOC label map
person_class_idx = 15
data = {}
j = 0
while bboxes[0, person_class_idx, j, 0] >= THRESHOLD:
pt = (bboxes[0, person_class_idx, j, 1:] * scale).cpu().numpy()
# distance = depth_map[int(min(depth_map.shape[1], pt[1] + 0.5 * pt[3]))][int(min(depth_map.shape[0], pt[0] + 0.5 * pt[2]))] # closest faces should be lowest values
distance = depth_map[int(pt[1])][int(pt[0])] # closest faces should be lowest values
# y_distance = int(pt[1]) + int(pt[2])
# if distance < max_distance:
# if distance > min_distance:
scaled_distance = (max(min(distance, max_distance), min_distance) - min_distance) / (max_distance - min_distance)
cv2.rectangle(input_image, (int(pt[0]), int(pt[1])), (int(pt[2]), int(pt[3])), (255, 128, 0), 1)
cv2.putText(input_image, str(distance), (int(pt[0]), int(pt[1])), FONT, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
x = (max(min(pt[0], max_x), min_x) - min_x) / (max_x - min_x) # scale to 0-1
y = 1.0 - scaled_distance # scale to 0-1
data[j] = [x, y, int(pt[2]), int(pt[3])]
cv2.circle(bg, (int(map_width * x), int((map_height - map_height * y))), 4, (255, 255, 255), -1)
j += 1
try:
requests.put("http://localhost:3000/people", data=data)
except requests.exceptions.RequestException as e:
print('Failed to send people data to the webserver')
if show_map:
cv2.imshow('map', bg)
if show_depth_map:
depth_map_scaled = cv2.resize(depth_map, (640, 480))
cv2.imshow('depth', depth_map_scaled)
if show_webcam:
# ann_frame = annotate_image(frame, bboxes)
input_image = cv2.resize(input_image, (640, 480)) / 255.
cv2.imshow('annotated_webcam', input_image)
if print_fps:
fps = (fps + (1. / (time.time() - t1))) / 2
print("fps = %f"%(fps))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
349732
|
<gh_stars>0
import pyttsx
from gtts import gTTS
import vlc
import time
import wave
import contextlib
class Mic:
def __init__(self):
self.engine = pyttsx.init()
def say(self, text_to_say):
self.engine(text_to_say)
self.engine.runAndWait()
def stop(self):
self.engine.stop()
def check_runtime(self, fname):
with contextlib.closing(wave.open(fname,'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
return duration
def google_say(self, text_to_say, fname="1.mp3"):
tts = gTTS(text=text_to_say, lang="en")
tts.save(fname)
runtime = self.check_runtime(fname)
self.player = vlc.MediaPlayer(fname)
self.player.play()
time.sleep(runtime)
self.player.stop()
os.remove(fname)
|
StarcoderdataPython
|
5173397
|
n1 = float(input('Digite sua primeira nota: '))
n2 = float(input('Digite sua segunda nota: '))
m = (n1+n2)/2
if(m<3):
print('Você foi reprovado')
elif(m>3 or m<7):
print('Você vai para final')
else:
print('Você passou. Parabéns!!!')
|
StarcoderdataPython
|
12835211
|
<reponame>atigerboy/PythonCookBook<gh_stars>0
'''
weakref
weakref.WeakValueDictionary
'''
import logging
a = logging.getLogger('foo')
b = logging.getLogger('bar')
print( a is b )
c = logging.getLogger('foo')
print( a is c ) #True.same name logger is same instance
# The class in question
class Spam:
def __init__(self, name):
self.name = name
# Caching support
import weakref
_spam_cache = weakref.WeakValueDictionary()
def get_spam(name):
if name not in _spam_cache:
s = Spam(name)
_spam_cache[name] = s
else:
s = _spam_cache[name]
return s
a = get_spam('foo')
b = get_spam('bar')
print( a is b )
c = get_spam('foo')
print( a is c )
# use new
class Spam:
_spam_cache = weakref.WeakValueDictionary()
def __new__(cls, name):
if name in cls._spam_cache:
return cls._spam_cache[name]
else:
self = super().__new__(cls)
cls._spam_cache[name] = self
return self
def __init__(self, name):
print('Initializing Spam')
self.name = name
s = Spam('Dave')
t = Spam('Dave')
print( s is t )
|
StarcoderdataPython
|
6643230
|
<reponame>kraupn3r/intranet
from django.contrib import admin
from django.urls import path, include
from rest_framework_jwt.views import refresh_jwt_token, obtain_jwt_token
from .views import *
urlpatterns = [
path('', BoardCategoryListAPIView.as_view()),
path('postlist/', PostListAPIView.as_view()),
path('post/<int:pk>/', PostDetailAPIView.as_view()),
path('comment/', CommentAPIView.as_view()),
]
|
StarcoderdataPython
|
3516255
|
<gh_stars>1-10
from .dataset import XrayDataset
from .model import XrayModel
|
StarcoderdataPython
|
1956685
|
import sys
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from tqdm.auto import tqdm
from sacrebleu.metrics import BLEU
from anlp_project.datasets.europarl import EuroParlRaw
if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} CHECKPOINT_PATH")
sys.exit(1)
checkpoint_name = sys.argv[1]
if "en-de" in checkpoint_name:
prompt = "translate from English to German: "
source_idx = 1
elif "de-en" not in checkpoint_name:
prompt = "translate from German to English: "
source_idx = 0
else:
print("Checkpoint name doesn't have en-de or de-en. Can't figure out direction")
sys.exit(1)
# Initialize the tokenizer
tokenizer = AutoTokenizer.from_pretrained("/scratch/en-de/")
# Initialize the model
model = AutoModelForSeq2SeqLM.from_pretrained("/scratch/en-de/")
bleu = BLEU(lowercase=True)
ds = EuroParlRaw()
N = len(ds)
batch_size = 100
score = 0
refs = [[]]
hyps = []
for i in tqdm(range(N // batch_size)):
batch = []
for x in range(i * batch_size, (i + 1) * batch_size):
batch.append(f"{prompt}: {ds[x][source_idx]}")
refs[0].append(ds[x][1 - source_idx])
tokenized_text = tokenizer(
batch, padding="max_length", truncation=True, return_tensors="pt"
)
# Perform translation and decode the output
translation = model.generate(**tokenized_text)
translated_text = tokenizer.batch_decode(translation, skip_special_tokens=True)
hyps += translated_text
for i in range(len(hyps)):
print(hyps[i], refs[0][i], sep="\n")
print("---------------------------")
print(bleu.corpus_score(hyps, refs))
|
StarcoderdataPython
|
3241106
|
#!/usr/bin/env python
"""Client actions dealing with memory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import io
import os
import re
import shutil
from future.builtins import str
import psutil
from typing import Iterable
from typing import List
import yara
from grr_response_client import actions
from grr_response_client import client_utils
from grr_response_client import streaming
from grr_response_client.client_actions import tempfiles
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import memory as rdf_memory
from grr_response_core.lib.rdfvalues import paths as rdf_paths
def ProcessIterator(pids, process_regex_string, ignore_grr_process, error_list):
"""Yields all (psutil-) processes that match certain criteria.
Args:
pids: A list of pids. If given, only the processes with those pids are
returned.
process_regex_string: If given, only processes whose name matches the regex
are returned.
ignore_grr_process: If True, the grr process itself will not be returned.
error_list: All errors while handling processes are appended to this list.
Type is repeated ProcessMemoryError.
Yields:
psutils.Process objects matching all criteria.
"""
pids = set(pids)
if ignore_grr_process:
grr_pid = psutil.Process().pid
else:
grr_pid = -1
if process_regex_string:
process_regex = re.compile(process_regex_string)
else:
process_regex = None
if pids:
process_iterator = []
for pid in pids:
try:
process_iterator.append(psutil.Process(pid=pid))
except Exception as e: # pylint: disable=broad-except
error_list.Append(
rdf_memory.ProcessMemoryError(
process=rdf_client.Process(pid=pid), error=str(e)))
else:
process_iterator = psutil.process_iter()
for p in process_iterator:
if process_regex and not process_regex.search(p.name()):
continue
if p.pid == grr_pid:
continue
yield p
class YaraProcessScan(actions.ActionPlugin):
"""Scans the memory of a number of processes using Yara."""
in_rdfvalue = rdf_memory.YaraProcessScanRequest
out_rdfvalues = [rdf_memory.YaraProcessScanResponse]
def _ScanRegion(self, rules, chunks, deadline):
for chunk in chunks:
if not chunk.data:
break
self.Progress()
time_left = (deadline - rdfvalue.RDFDatetime.Now()).ToInt(
rdfvalue.SECONDS)
for m in rules.match(data=chunk.data, timeout=time_left):
# Note that for regexps in general it might be possible to
# specify characters at the end of the string that are not
# part of the returned match. In that case, this algorithm
# might miss results in unlikely scenarios. We doubt that the
# Yara library even allows such constructs but it's good to be
# aware that this can happen.
for offset, _, s in m.strings:
if offset + len(s) > chunk.overlap:
# We haven't seen this match before.
rdf_match = rdf_memory.YaraMatch.FromLibYaraMatch(m)
for string_match in rdf_match.string_matches:
string_match.offset += chunk.offset
yield rdf_match
break
def _GetMatches(self, psutil_process, scan_request):
if scan_request.per_process_timeout:
deadline = rdfvalue.RDFDatetime.Now() + scan_request.per_process_timeout
else:
deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration.From(
1, rdfvalue.WEEKS)
rules = scan_request.yara_signature.GetRules()
process = client_utils.OpenProcessForMemoryAccess(pid=psutil_process.pid)
with process:
streamer = streaming.Streamer(
chunk_size=scan_request.chunk_size,
overlap_size=scan_request.overlap_size)
matches = []
try:
for region in client_utils.MemoryRegions(process, scan_request):
chunks = streamer.StreamMemory(
process, offset=region.start, amount=region.size)
for m in self._ScanRegion(rules, chunks, deadline):
matches.append(m)
if 0 < scan_request.max_results_per_process <= len(matches):
return matches
except yara.Error as e:
# Yara internal error 30 is too many hits (obviously...). We
# need to report this as a hit, not an error.
if "internal error: 30" in str(e):
return matches
raise
return matches
# We don't want individual response messages to get too big so we send
# multiple responses for 100 processes each.
_RESULTS_PER_RESPONSE = 100
def _ScanProcess(self, process, scan_request, scan_response):
rdf_process = rdf_client.Process.FromPsutilProcess(process)
start_time = rdfvalue.RDFDatetime.Now()
try:
matches = self._GetMatches(process, scan_request)
scan_time = rdfvalue.RDFDatetime.Now() - start_time
scan_time_us = scan_time.ToInt(rdfvalue.MICROSECONDS)
except yara.TimeoutError:
scan_response.errors.Append(
rdf_memory.ProcessMemoryError(
process=rdf_process,
error="Scanning timed out (%s)." %
(rdfvalue.RDFDatetime.Now() - start_time)))
return
except Exception as e: # pylint: disable=broad-except
scan_response.errors.Append(
rdf_memory.ProcessMemoryError(process=rdf_process, error=str(e)))
return
if matches:
scan_response.matches.Append(
rdf_memory.YaraProcessScanMatch(
process=rdf_process, match=matches, scan_time_us=scan_time_us))
else:
scan_response.misses.Append(
rdf_memory.YaraProcessScanMiss(
process=rdf_process, scan_time_us=scan_time_us))
def _SaveSignatureShard(self, scan_request):
"""Writes a YaraSignatureShard received from the server to disk.
Args:
scan_request: The YaraProcessScanRequest sent by the server.
Returns:
The full Yara signature, if all shards have been received. Otherwise,
None is returned.
"""
def GetShardName(shard_index, num_shards):
return "shard_%02d_of_%02d" % (shard_index, num_shards)
signature_dir = os.path.join(tempfiles.GetDefaultGRRTempDirectory(),
"Sig_%s" % self.session_id.Basename())
# Create the temporary directory and set permissions, if it does not exist.
tempfiles.EnsureTempDirIsSane(signature_dir)
shard_path = os.path.join(
signature_dir,
GetShardName(scan_request.signature_shard.index,
scan_request.num_signature_shards))
with io.open(shard_path, "wb") as f:
f.write(scan_request.signature_shard.payload)
dir_contents = set(os.listdir(signature_dir))
all_shards = [
GetShardName(i, scan_request.num_signature_shards)
for i in range(scan_request.num_signature_shards)
]
if dir_contents.issuperset(all_shards):
# All shards have been received; delete the temporary directory and
# return the full signature.
full_signature = io.BytesIO()
for shard in all_shards:
with io.open(os.path.join(signature_dir, shard), "rb") as f:
full_signature.write(f.read())
shutil.rmtree(signature_dir, ignore_errors=True)
return full_signature.getvalue().decode("utf-8")
else:
return None
def Run(self, args):
if args.yara_signature or not args.signature_shard.payload:
raise ValueError(
"A Yara signature shard is required, and not the full signature.")
if args.num_signature_shards == 1:
# Skip saving to disk if there is just one shard.
yara_signature = args.signature_shard.payload.decode("utf-8")
else:
yara_signature = self._SaveSignatureShard(args)
if yara_signature is None:
# We haven't received the whole signature yet.
return
scan_request = args.Copy()
scan_request.yara_signature = yara_signature
scan_response = rdf_memory.YaraProcessScanResponse()
processes = ProcessIterator(scan_request.pids, scan_request.process_regex,
scan_request.ignore_grr_process,
scan_response.errors)
for process in processes:
self.Progress()
num_results = (
len(scan_response.errors) + len(scan_response.matches) +
len(scan_response.misses))
if num_results >= self._RESULTS_PER_RESPONSE:
self.SendReply(scan_response)
scan_response = rdf_memory.YaraProcessScanResponse()
self._ScanProcess(process, scan_request, scan_response)
self.SendReply(scan_response)
def _PrioritizeRegions(
regions,
prioritize_offsets
):
"""Returns reordered `regions` to prioritize regions containing offsets.
Args:
regions: Iterable of ProcessMemoryRegions.
prioritize_offsets: List of integers containing prioritized offsets. Pass
pre-sorted regions and offsets to improve this functions performance from
O(n * log n) to O(n) respectively.
Returns:
An iterable of first all ProcessMemoryRegions that contain a prioritized
offset, followed by all regions that do not contain a prioritized offset.
All prioritized regions and all unprioritized regions are sorted by their
starting address.
"""
# Sort regions and offsets to be mononotically increasing and insert sentinel.
all_regions = collections.deque(sorted(regions, key=lambda r: r.start))
all_regions.append(None)
region = all_regions.popleft()
all_offsets = collections.deque(sorted(prioritize_offsets))
all_offsets.append(None)
offset = all_offsets.popleft()
prio_regions = []
nonprio_regions = []
# This loop runs in O(max(|regions|, |offsets|)) with use of invariants:
# - offset is increasing monotonically.
# - region[n+1] end >= region[n+1] start >= region[n] start
# Because memory regions could theoretically overlap, no relationship exists
# between the end of region[n+1] and region[n].
while region is not None and offset is not None:
if offset < region.start:
# Offset is before the first region, thus cannot be contained in any
# region. This could happen when some memory regions are unreadable.
offset = all_offsets.popleft()
elif offset >= region.start + region.size:
# Offset comes after the first region. The first region can not contain
# any following offsets, because offsets increase monotonically.
nonprio_regions.append(region)
region = all_regions.popleft()
else:
# The first region contains the offset. Mark it as prioritized and
# proceed with the next offset. All following offsets that are contained
# in the current region are skipped with the first if-branch.
prio_regions.append(region)
region = all_regions.popleft()
offset = all_offsets.popleft()
all_regions.appendleft(region) # Put back the current region or sentinel.
all_regions.pop() # Remove sentinel.
# When there are fewer offsets than regions, remaining regions can be present
# in `all_regions`.
return prio_regions + nonprio_regions + list(all_regions)
def _ApplySizeLimit(regions,
size_limit):
"""Truncates regions so that the total size stays in size_limit."""
total_size = 0
regions_in_limit = []
for region in regions:
total_size += region.size
if total_size > size_limit:
break
regions_in_limit.append(region)
return regions_in_limit
class YaraProcessDump(actions.ActionPlugin):
"""Dumps a process to disk and returns pathspecs for GRR to pick up."""
in_rdfvalue = rdf_memory.YaraProcessDumpArgs
out_rdfvalues = [rdf_memory.YaraProcessDumpResponse]
def _SaveMemDumpToFile(self, fd, chunks):
bytes_written = 0
for chunk in chunks:
if not chunk.data:
return 0
fd.write(chunk.data)
bytes_written += len(chunk.data)
return bytes_written
def _SaveMemDumpToFilePath(self, filename, chunks):
with open(filename, "wb") as fd:
bytes_written = self._SaveMemDumpToFile(fd, chunks)
# When getting read errors, we just delete the file and move on.
if not bytes_written:
try:
os.remove(filename)
except OSError:
pass
return bytes_written
def _SaveRegionToDirectory(self, psutil_process, process, region, tmp_dir,
streamer):
end = region.start + region.size
# _ReplaceDumpPathspecsWithMultiGetFilePathspec in DumpProcessMemory
# flow asserts that MemoryRegions can be uniquely identified by their
# file's basename.
filename = "%s_%d_%x_%x.tmp" % (psutil_process.name(), psutil_process.pid,
region.start, end)
filepath = os.path.join(tmp_dir.path, filename)
chunks = streamer.StreamMemory(
process, offset=region.start, amount=region.size)
bytes_written = self._SaveMemDumpToFilePath(filepath, chunks)
if not bytes_written:
return None
# TODO: Remove workaround after client_utils are fixed.
canonical_path = client_utils.LocalPathToCanonicalPath(filepath)
if not canonical_path.startswith("/"):
canonical_path = "/" + canonical_path
return rdf_paths.PathSpec(
path=canonical_path, pathtype=rdf_paths.PathSpec.PathType.TMPFILE)
def DumpProcess(self, psutil_process, args):
response = rdf_memory.YaraProcessDumpInformation()
response.process = rdf_client.Process.FromPsutilProcess(psutil_process)
streamer = streaming.Streamer(chunk_size=args.chunk_size)
with client_utils.OpenProcessForMemoryAccess(psutil_process.pid) as process:
regions = list(client_utils.MemoryRegions(process, args))
if args.prioritize_offsets:
regions = _PrioritizeRegions(regions, args.prioritize_offsets)
if args.size_limit:
total_regions = len(regions)
regions = _ApplySizeLimit(regions, args.size_limit)
if len(regions) < total_regions:
response.error = ("Byte limit exceeded. Writing {} of {} "
"regions.").format(len(regions), total_regions)
regions = sorted(regions, key=lambda r: r.start)
with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
for region in regions:
self.Progress()
pathspec = self._SaveRegionToDirectory(psutil_process, process,
region, tmp_dir, streamer)
if pathspec is not None:
region.file = pathspec
response.memory_regions.Append(region)
return response
def Run(self, args):
if args.prioritize_offsets and len(args.pids) != 1:
raise ValueError(
"Supplied prioritize_offsets {} for PIDs {} in YaraProcessDump. "
"Required exactly one PID.".format(args.prioritize_offsets,
args.pids))
result = rdf_memory.YaraProcessDumpResponse()
for p in ProcessIterator(args.pids, args.process_regex,
args.ignore_grr_process, result.errors):
self.Progress()
start = rdfvalue.RDFDatetime.Now()
try:
response = self.DumpProcess(p, args)
now = rdfvalue.RDFDatetime.Now()
response.dump_time_us = (now - start).ToInt(rdfvalue.MICROSECONDS)
result.dumped_processes.Append(response)
if response.error:
# Limit exceeded, we bail out early.
break
except Exception as e: # pylint: disable=broad-except
result.errors.Append(
rdf_memory.ProcessMemoryError(
process=rdf_client.Process.FromPsutilProcess(p), error=str(e)))
continue
self.SendReply(result)
|
StarcoderdataPython
|
5161343
|
<gh_stars>1-10
# The following license applies to Flask
# Some of the code on this file have been based off of Flask's documentation.
# Copyright 2010 Pallets
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The following license applies to Flask-SocketIO
# The MIT License (MIT)
# Copyright (c) 2014 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Other code in file written by <NAME>
# Copyright (c) 2021 <NAME>
import os
import click
import time
import random
import json
from flask import Flask, flash, g, redirect, render_template, request, url_for
from flask_socketio import SocketIO, emit, join_room, leave_room, disconnect, close_room
from collections import deque
# ------------------------------------------------------------------------------------------
# Flask app, SocketIO
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY ='dev'
)
def init_app(app):
app.cli.add_command(init_db_command)
def init_db():
Base.metadata.create_all(engine)
@click.command('init-db')
def init_db_command():
init_db()
click.echo('Successfully initialized the database.')
init_app(app)
socketIO = SocketIO(app)
# ------------------------------------------------------------------------------------------
# random games SocketIO
game_rooms_dictionary = dict()
game_rooms = list()
for i in range(50):
game_rooms.append(list())
connectedPlayersList = deque()
# handler for the room request from player in random games page
@socketIO.on('request_room')
def request_game_room():
app.logger.info("Player " + str(request.sid) + " requested room")
if request.sid not in connectedPlayersList:
connectedPlayersList.append(request.sid)
for i in range(len(game_rooms)):
if 2 > len(game_rooms[i]):
app.logger.info("# of players currently in room " + str(i + 1) + " is (before adding): " + str(len(game_rooms[i])))
game_rooms[i].append(request.sid)
join_room(i)
app.logger.info("Player " + str(request.sid) + " joined room number: " + str(i + 1))
game_rooms_dictionary[request.sid] = i
app.logger.info("# of players currently in room " + str(i + 1) + " is (after adding): " + str(len(game_rooms[i])))
break
# notify the two players in the room of their colour (blue [1] or grey [0])
def notifyCurrentSessionPlayerColour():
isPlayerBlue = None
i = game_rooms_dictionary[request.sid]
for sid in game_rooms[i]:
try:
isPlayerBlue = game_rooms[i].index(sid)
app.logger.info("player " + str(sid) + " colour is: " + str(isPlayerBlue))
except:
pass
if isPlayerBlue == 0 or isPlayerBlue == 1:
socketIO.emit('player_colour_assignment', {'isPlayerBlue': isPlayerBlue}, room=sid)
# alert the users in random games page if they have been placed in a room yet or not
@socketIO.on('check_entered_room')
def check_entered_game_room():
app.logger.info("Player " + str(request.sid) + " is requesting check_entered_room")
try:
if len(game_rooms[game_rooms_dictionary[request.sid]]) == 2:
socketIO.emit('check_entered_room_response', {'response': True, 'game_room_number': (game_rooms_dictionary[request.sid] + 1)},
room=game_rooms_dictionary[request.sid])
i = game_rooms_dictionary[request.sid]
app.logger.info("player " + str(game_rooms[i][0]) + " and " + str(game_rooms[i][1]) + " in room")
notifyCurrentSessionPlayerColour()
else:
socketIO.emit('check_entered_room_response', {'response': False}, room=game_rooms_dictionary[request.sid])
i = game_rooms_dictionary[request.sid]
app.logger.info("player " + str(game_rooms[i][0]) + " and " + str(game_rooms[i][1]) + " not in room")
except:
app.logger.error("could not find current game room number")
socketIO.emit('check_entered_room_response', {'response': False}, room=game_rooms_dictionary[request.sid])
# handler for user disconnection, triggered when one of the players closes their page (may not work for some browsers)
@socketIO.on('disconnect_from_room')
def disconnect_from_game_room():
try:
if request.sid in connectedPlayersList:
socketIO.emit('game_session_valid_response', {'session_valid': False}, room=game_rooms_dictionary[request.sid])
i = game_rooms_dictionary[request.sid]
# sleep to allow socketIO emit to reach client before disconnecting client
time.sleep(2)
app.logger.info("clearing room " + str(i + 1))
for sid in game_rooms[i]:
del game_rooms_dictionary[sid]
disconnect(sid)
connectedPlayersList.remove(sid)
close_room(i)
game_rooms[i].clear()
app.logger.info("# of players currently in room " + str(i + 1) + " is (after removing): " + str(len(game_rooms[i])))
except:
app.logger.error("error disconnecting user from game room, perhaps user has already been disconnected")
# emits stone placements of the players to their room
@socketIO.on('stone_placement')
def stone_placement(json):
socketIO.emit('placement_response', json, room=game_rooms_dictionary[request.sid])
# ------------------------------------------------------------------------------------------
# private game rooms SocketIO
sid_private_game_rooms_dictionary = dict()
private_game_rooms_dictionary = dict()
private_room_connected_players = set()
unique_name_list = ['dolphin', 'donguri', 'turtle', 'omok', 'orange', 'monkey', 'cactus', 'game']
# generates a unique room code for private game rooms
def get_unique_room_code():
random_code = unique_name_list[random.randint(0, 7)]
random_code += str(random.randint(0, 100000))
if random_code not in sid_private_game_rooms_dictionary.keys():
return random_code
else:
return get_unique_room_code()
# handler for user disconnection, triggered when one of the players closes their page (may not work for some browsers)
@socketIO.on('disconnect_from_private_room')
def disconnect_from_private_game_room():
try:
if request.sid in private_room_connected_players:
socketIO.emit('game_session_valid_response_private_room', {'session_valid': False},
room=sid_private_game_rooms_dictionary[request.sid])
private_room_name = sid_private_game_rooms_dictionary[request.sid]
# sleep to allow socketIO emit to reach client before disconnecting client
time.sleep(2)
app.logger.info("clearing room " + private_room_name)
for sid in private_game_rooms_dictionary[private_room_name]:
del sid_private_game_rooms_dictionary[sid]
disconnect(sid)
private_room_connected_players.remove(sid)
close_room(private_room_name)
del private_game_rooms_dictionary[private_room_name]
app.logger.info("cleared room " + private_room_name)
except:
app.logger.error("error disconnecting user from game room, perhaps user has already been disconnected")
# handler for the unique room code request from player in create room page
@socketIO.on('request_private_room_code')
def private_room_code():
app.logger.info("Player " + str(request.sid) + " requested room code")
room_code = get_unique_room_code()
socketIO.emit('private_room_code', {'room_code': room_code}, room=request.sid)
sid_private_game_rooms_dictionary[request.sid] = room_code
private_game_rooms_dictionary[room_code] = [request.sid]
private_room_connected_players.add(request.sid)
join_room(room_code)
# notify the two players in the room of their colour (blue [1] or grey [0])
def notifyPrivateRoomPlayerColour():
isPlayerBlue = None
private_room_name = sid_private_game_rooms_dictionary[request.sid]
for sid in private_game_rooms_dictionary[private_room_name]:
try:
isPlayerBlue = private_game_rooms_dictionary[private_room_name].index(sid)
app.logger.info("player " + str(sid) + " colour is: " + str(isPlayerBlue))
except:
pass
if isPlayerBlue == 0 or isPlayerBlue == 1:
socketIO.emit('player_colour_assignment', {'isPlayerBlue': isPlayerBlue}, room=sid)
# handler for the join by room code request from player in join game page
@socketIO.on('join_by_private_room_code')
def private_room_code(json):
try:
app.logger.info("Player " + str(request.sid) + " requesting to join " + json['room_code'])
if json['room_code'] in private_game_rooms_dictionary.keys():
if len(private_game_rooms_dictionary[json['room_code']]) == 1:
socketIO.emit('join_by_private_room_code_response', {'join_status': 1}, room=request.sid)
sid_private_game_rooms_dictionary[request.sid] = json['room_code']
private_game_rooms_dictionary[json['room_code']].append(request.sid)
private_room_connected_players.add(request.sid)
join_room(json['room_code'])
socketIO.emit('start_game_private_room', room=json['room_code'])
notifyPrivateRoomPlayerColour()
elif (len(private_game_rooms_dictionary[json['room_code']])) < 1:
socketIO.emit('join_by_private_room_code_response', {'join_status': 0}, room=request.sid)
else:
socketIO.emit('join_by_private_room_code_response', {'join_status': -1}, room=request.sid)
else:
socketIO.emit('join_by_private_room_code_response', {'join_status': 0}, room=request.sid)
except:
app.logger.error("error joining user by private game code")
# emits stone placements of the players to their room
@socketIO.on('stone_placement_private_room')
def stone_placement(json):
socketIO.emit('placement_response_private_room', json, room=sid_private_game_rooms_dictionary[request.sid])
# ------------------------------------------------------------------------------------------
# shared SocketIO
# handler for user disconnection, triggered by SocketIO
@socketIO.on('disconnect')
def disconnect_handler():
socketIO.emit('game_session_valid_response_private_room', {'session_valid': 0},
room=sid_private_game_rooms_dictionary[request.sid])
socketIO.emit('game_session_valid_response_private_room', {'session_valid': 0},
room=sid_private_game_rooms_dictionary[request.sid])
try:
disconnect_from_game_room()
disconnect_from_private_game_room()
except:
app.logger.error("error disconnecting user from game room, perhaps user has already been disconnected")
# ------------------------------------------------------------------------------------------
# http requests
# main welcome page
@app.route('/', methods=('GET', 'POST'))
def main_home():
if request.method == 'POST':
if 'play_button' in request.form:
if request.form['play_button'] == 'start_game':
return redirect(url_for('modes'))
if 'license_info_button' in request.form:
if request.form['license_info_button'] == 'show_license_info':
return redirect(url_for('license_info'))
return render_template('index.html')
# game modes select page
@app.route('/game_modes', methods=('GET', 'POST'))
def modes():
if request.method == 'POST':
if 'game_mode_button' in request.form:
if request.form['game_mode_button'] == 'random_game':
return redirect(url_for('random_game'))
elif request.form['game_mode_button'] == 'create_room':
return redirect(url_for('create_game_room'))
elif request.form['game_mode_button'] == 'join_room':
return redirect(url_for('join_game_room'))
return render_template('modes.html')
# join game page
@app.route('/license_info', methods=['GET'])
def license_info():
return render_template('license_page.html')
# random game page
@app.route('/random_game', methods=('GET', 'POST'))
def random_game():
if request.method == 'POST':
if 'corner_button' in request.form:
if request.form['corner_button'] == 'exit_queue':
return redirect(url_for('main_home'))
return render_template('random_game.html')
# create game page
@app.route('/create_game', methods=['GET'])
def create_game_room():
return render_template('create_room.html')
# join game page
@app.route('/join_game', methods=['GET'])
def join_game_room():
return render_template('join_room.html')
# ------------------------------------------------------------------------------------------
# run app
if __name__ == '__main__':
socketIO.run(app)
|
StarcoderdataPython
|
11217346
|
<reponame>flucto-gmbh/msb_attitude<filename>src/issue_complementary.py
import numpy as np
from os import path
import sys
# add ahrs directory to PYTHONPATH
SCRIPT_DIR = path.dirname(path.abspath(__file__))
sys.path.append(path.dirname(SCRIPT_DIR))
try:
from ahrs.ahrs.filters import Complementary
except ImportError as e:
print(f'failed to import Complementary filter from ahrs: {e}')
sys.exit(-1)
try:
from ahrs.ahrs.common import Quaternion
except ImportError as e:
print(f'failed to import Quaternion from ahrs: {e}')
sys.exit(-1)
def main():
gyr = np.array([1, 1, 1])
acc = np.array([0, 0, -1])
mag = np.array([2, 22, 2])
q0 = Quaternion(
np.array([1,1,1,1])
)
complementary = Complementary(
gyr=np.array([[1, 1, 1]]),
acc=np.array([[0, 0, -1]]),
mag=np.array([[2,20,2]]),
q0=q0
)
q_attitude = complementary.update(
q = q0,
gyr=gyr,
acc=acc,
mag=mag,
)
print(f'attitude: {q_attitude}')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3269232
|
from .adt import ADT
from .adt import memo as ADTmemo
from .prelude import *
from . import atl_types as T
from .frontend import AST
from fractions import Fraction
from math import gcd as _gcd
def _lcm(x,y):
return (x*y)//_gcd(x,y)
# notes on symbols to use
# Note that BOOL is a type and Bool a formula constructor function
#from pysmt.shortcuts import (
# Symbol, BOOL, INT, REAL,
# Bool, Int, Real,
# TRUE, FALSE, And, Or, Not,
# GT, GE, LE, LT, Equals, NotEquals,
# Plus, Minus, Times, Div
#)
import pysmt
from pysmt import shortcuts as SMT
def _get_smt_solver():
factory = pysmt.factory.Factory(pysmt.shortcuts.get_env())
slvs = factory.all_solvers()
if len(slvs) == 0: raise OSError("Could not find any SMT solvers")
return pysmt.shortcuts.Solver(name=next(iter(slvs)))
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Description of Bounds-Checking Problem
BD = ADT("""
module BD {
sys = VarIntro ( sym name, sys cont, srcinfo srcinfo )
| RelIntro ( sym name, int n_args,
sys cont, srcinfo srcinfo )
| Guard ( pred pred, sys cont, srcinfo srcinfo )
| Both ( sys lhs, sys rhs )
| Check ( pred pred, sys cont, srcinfo srcinfo )
| NullSys ()
pred = GTZ ( affine expr )
| GEZ ( affine expr )
| EQZ ( affine expr )
| Rel ( sym name, sym* args )
| Conj ( pred lhs, pred rhs )
| Disj ( pred lhs, pred rhs )
affine = ( fraction offset, term* terms )
term = ( fraction coeff, sym var )
}
""", {
'sym': lambda x: type(x) is Sym,
'fraction': lambda x: type(x) is Fraction,
'srcinfo': lambda x: type(x) is SrcInfo,
})
ADTmemo(BD,['NullSys'])
BD.null = BD.NullSys()
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Operator Overloading to help construct affine expressions
def _alift_(obj):
if type(obj) is BD.affine:
return obj
elif type(obj) is BD.term:
return BD.affine( Fraction(0), [obj] )
elif type(obj) is Sym:
return BD.affine( Fraction(0), [ BD.term(Fraction(1),obj) ] )
elif type(obj) is int:
return BD.affine( Fraction(obj), [] )
elif type(obj) is Fraction:
return BD.affine( obj, [] )
else: assert False, f"unsupported affine lifting for type {type(obj)}"
@extclass(BD.affine)
def __add__(lhs,rhs):
rhs = _alift_(rhs)
off = lhs.offset + rhs.offset
terms = lhs.terms.copy()
terms.extend(rhs.terms)
return BD.affine( off, terms )
@extclass(BD.affine)
def __radd__(rhs,lhs): return _alift_(lhs) + rhs
@extclass(BD.affine)
def __neg__(arg):
off = -arg.offset
terms = [ BD.term( -t.coeff, t.var ) for t in arg.terms ]
return BD.affine( off, terms )
@extclass(BD.affine)
def __sub__(lhs,rhs): return lhs + (-_alift_(rhs))
@extclass(BD.affine)
def __rsub__(rhs,lhs): return _alift_(lhs) + (-rhs)
@extclass(BD.affine)
def __mul__(lhs,rhs):
assert type(rhs) is Fraction, "expected fraction to scale by"
off = lhs.offset * rhs
terms = [ BD.term( t.coeff * rhs, t.var ) for t in lhs.terms ]
return BD.affine( off, terms )
@extclass(BD.affine)
def __rmul__(rhs,lhs): return rhs * lhs
@extclass(BD.affine)
def __gt__(lhs,rhs):
return BD.GTZ( lhs - rhs )
@extclass(BD.affine)
def __ge__(lhs,rhs):
return BD.GEZ( lhs - rhs )
@extclass(BD.affine)
def __lt__(lhs,rhs):
return BD.GTZ( rhs - lhs )
@extclass(BD.affine)
def __le__(lhs,rhs):
return BD.GEZ( rhs - lhs )
@extclass(BD.affine)
def eq(lhs,rhs):
return BD.EQZ( lhs - rhs )
del __add__, __radd__, __neg__, __sub__, __rsub__, __mul__, __rmul__
del __gt__, __lt__, __ge__, __le__, eq
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Extraction of Bounds-Checking Problem
class BoundsExtraction:
def __init__(self, f):
self._ctxt = Context()
# pack context with relation sizes
for rd in f.relations:
self._ctxt.set(rd.name, rd.sizes)
# construct the system corresponding to the expression body
sys = self.extract(f.body)
# and then wrap that in variable declarations for sizes/relations
for rd in reversed(f.relations):
sys = BD.RelIntro( rd.name, len(rd.sizes), sys, rd.srcinfo )
for sz in reversed(f.sizes):
sys = BD.VarIntro( sz.name, sys, sz.srcinfo )
self._sys = sys
def system(self): return self._sys
def _get_rel_sizes(self,rname):
szs = self._ctxt.get(rname)
assert szs is not None, "Expected all relation lookups to succeed"
return szs
def extract(self, e):
eclass = type(e)
# do not perform bounds-checks on code that was already checked.
if hasattr(e, 'func_call_sub'):
return BD.null
elif eclass is AST.Var or eclass is AST.Const:
return BD.null
elif eclass is AST.BinOp:
lhs = self.extract(e.lhs)
rhs = self.extract(e.rhs)
if lhs is BD.null: return rhs
elif rhs is BD.null: return lhs
else: return BD.Both(lhs,rhs)
elif eclass is AST.Tuple:
sys = BD.null
for a in e.args:
s = self.extract(a)
if sys is BD.null:
sys = s
elif s is not BD.null:
sys = BD.Both( s, sys )
return sys
elif eclass is AST.Proj:
return self.extract(e.arg)
elif eclass is AST.TensorLit:
sys = BD.null
for a in e.args:
s = self.extract(a)
if sys is BD.null:
sys = s
elif s is not BD.null:
sys = BD.Both( s, sys )
return sys
elif eclass is AST.Gen or eclass is AST.Sum:
# a sanity check; maybe not necessary, but if true
# this greatly simplifies naming issues, so let's assert for now
assert self._ctxt.get(e.name) is None, "Expected unique name symbols"
# build guard predicate (let x be e.name)
lo_bd = (_alift_(e.name) >= 0)
hi_bd = (_alift_(e.name) < e.range)
guard = BD.Conj( lo_bd, hi_bd )
# assemble system
body = self.extract(e.body)
if body is BD.null:
return body
else:
body = BD.Guard( guard, body, e.srcinfo )
return BD.VarIntro( e.name, body, e.srcinfo )
elif eclass is AST.Access:
# handle multiple accesses and w.r.t the tensor type...
sys = self.extract(e.base)
typ = e.base.type
for i_ast in e.idx:
i = self.index(i_ast)
rng = typ.range
typ = typ.type
lo_chk = (i >= 0)
hi_chk = (i < rng)
sys = BD.Check( BD.Conj(lo_chk, hi_chk), sys, e.srcinfo )
return sys
elif eclass is AST.BuiltIn:
# the built-in itself has no effect
sys = BD.null
for a in e.args:
s = self.extract(a)
if sys is BD.null:
sys = s
elif s is not BD.null:
sys = BD.Both( s, sys )
return sys
elif eclass is AST.Indicate:
# relational predicates may require introducing
# additional bounds checks on their arguments
guard, checks = self.pred(e.pred)
body = self.extract(e.body)
if body is not BD.null:
body = BD.Guard( guard, body, e.srcinfo )
# now wrap with any checks regardless of whether body is null
for name,eq,chk,srcinfo in checks:
body = BD.VarIntro( name,
BD.Guard( eq,
BD.Check(chk,body,srcinfo),
srcinfo ),
srcinfo )
return body
elif eclass is AST.Let:
sys = self.extract(e.ret)
for s in e.stmts:
rhs = self.extract(s.rhs)
if sys is BD.null:
sys = rhs
elif rhs is not BD.null:
sys = BD.Both( rhs, sys )
return sys
else: assert False, "unexpected case"
def index(self, e):
eclass = type(e)
if eclass is AST.IdxConst:
return _alift_(e.val)
elif eclass is AST.IdxVar or eclass is AST.IdxSize:
return _alift_(e.name)
elif eclass is AST.IdxAdd:
return self.index(e.lhs) + self.index(e.rhs)
elif eclass is AST.IdxSub:
return self.index(e.lhs) - self.index(e.rhs)
elif eclass is AST.IdxScale:
return e.coeff * self.index(e.idx)
else: assert False, "unexpected case"
# returns a second "checks" list of type
# [(var_name, var_eq, var_bd_chk, err_msg, srcinfo)]
def pred(self, p):
pclass = type(p)
if pclass is AST.Cmp:
lhs = self.index(p.lhs)
rhs = self.index(p.rhs)
if p.op == "<": return lhs < rhs, []
elif p.op == ">": return lhs > rhs, []
elif p.op == "<=": return lhs <= rhs, []
elif p.op == ">=": return lhs >= rhs, []
elif p.op == "==": return lhs.eq(rhs), []
else: assert False, f"Unrecognized Op {p.op}"
elif pclass is AST.Relation:
sizes = self._get_rel_sizes(p.name)
args, checks = [], []
for k,(i_arg,N) in enumerate(zip(p.args,sizes)):
i = self.index(i_arg)
v = Sym(f"{p.name}{k}")
def_eq = i.eq(v)
bd_chk = BD.Conj( i >= 0, i < N )
args.append(v)
checks.append( (v,def_eq,bd_chk,i_arg.srcinfo) )
return BD.Rel(p.name, args), checks
elif pclass is AST.Conj or pclass is AST.Disj:
lhs, lchk = self.pred(p.lhs)
rhs, rchk = self.pred(p.rhs)
lchk.extend(rchk)
ctr = BD.Conj if pclass is AST.Conj else BD.Disj
return ctr(lhs,rhs), lchk
else: assert False, "Impossible Case"
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Execution of Bounds-Checking Problem
class BoundsCheck:
def __init__(self,sys):
assert isinstance(sys, BD.sys), "Expected a bounds-system..."
self._slv = _get_smt_solver()
self._slv.reset_assertions()
self._ctxt = Context()
self._errors = []
self._slv.push()
self.check(sys)
self._slv.pop()
self.report_errors()
def report_errors(self):
if len(self._errors) == 0: return
errs = []
errs.append("Failed Bounds Checks:")
for srcinfo,msg in self._errors:
errs.append(f"{srcinfo}: {msg}")
raise TypeError("\n".join(errs))
def _err(self, node, msg):
self._errors.append((node.srcinfo, msg))
def _get_solution(self, pred):
smt_syms = [ smt_sym for nm,smt_sym in self._ctxt.items()
if smt_sym.get_type() == SMT.INT ]
self._slv.push()
self._slv.add_assertion(pred)
val_map = self._slv.get_py_values(smt_syms)
self._slv.pop()
mapping = []
for nm,smt_sym in self._ctxt.items():
if smt_sym.get_type() == SMT.INT:
mapping.append(f" {nm} = {val_map[smt_sym]}")
return "\n".join(mapping)
def check(self, sys):
styp = type(sys)
if styp is BD.VarIntro:
smtsym = SMT.Symbol(repr(sys.name), SMT.INT)
self._ctxt.set(sys.name, smtsym)
self.check(sys.cont)
elif styp is BD.RelIntro:
Rtyp = SMT.FunctionType(SMT.BOOL,
[ SMT.INT for i in range(0,sys.n_args) ])
smtsym = SMT.Symbol(repr(sys.name), Rtyp)
self._ctxt.set(sys.name, smtsym)
self.check(sys.cont)
elif styp is BD.Guard:
pred = self.formula(sys.pred)
self._slv.add_assertion(pred)
self.check(sys.cont)
elif styp is BD.Both:
# make sure we can backtrack from the first branch
self._slv.push()
self._ctxt.push()
self.check(sys.lhs)
self._ctxt.pop()
self._slv.pop()
# now the second branch we can just proceed
self.check(sys.rhs)
elif styp is BD.Check:
pred = SMT.Not( self.formula(sys.pred) )
failure = self._slv.is_sat(pred)
if failure:
mapping = self._get_solution(pred)
self._err(sys, f"Out of Bounds Access:\n{mapping}")
# continue regardless
self.check(sys.cont)
elif styp is BD.NullSys:
pass
def formula(self, p):
ptyp = type(p)
if ptyp is BD.GTZ or ptyp is BD.GEZ or ptyp is BD.EQZ:
aform = self.affine(p.expr)
if ptyp is BD.GTZ: return SMT.GT( aform, SMT.Int(0) )
elif ptyp is BD.GEZ: return SMT.GE( aform, SMT.Int(0) )
elif ptyp is BD.EQZ: return SMT.Equals( aform, SMT.Int(0) )
else: assert False
elif ptyp is BD.Rel:
rsym = self._ctxt.get(p.name)
assert rsym is not None, f"expected relation name '{p.name}'"
args = []
for a in p.args:
sym = self._ctxt.get(a)
assert sym is not None, f"expected variable name '{a}'"
args.append(sym)
return SMT.Function(rsym,args)
elif ptyp is BD.Conj or ptyp is BD.Disj:
lhs = self.formula(p.lhs)
rhs = self.formula(p.rhs)
smtop = SMT.And if ptyp is BD.Conj else SMT.Or
return smtop(lhs, rhs)
def affine(self, a):
""" return some positive rescaling of the affine expression
s.t. the rescaled expression has integer coefficients
safe, since positive rescaling preserves
all of a >= 0, a > 0, and a == 0 """
# find the lcm of the offset denominator
# and all coefficient denominators
mult = a.offset.denominator
for t in a.terms:
mult = _lcm(mult, t.coeff.denominator)
# now, we can produce an integral affine equation,
# by rescaling through with `mult`
a = a * Fraction(mult)
# Finally, convert this to an SMT formula
assert a.offset.denominator == 1
f = SMT.Int(a.offset.numerator)
for t in a.terms:
assert t.coeff.denominator == 1
sym = self._ctxt.get(t.var)
assert sym is not None, f"expected variable name '{t.var}'"
term = SMT.Times( SMT.Int(t.coeff.numerator), sym )
f = SMT.Plus( f, term )
return f
|
StarcoderdataPython
|
1647621
|
#!/usr/bin/env python
# Part of sniffMyPackets framework.
# Generic pcap tools and utilities that SmP uses
import os
import magic
import datetime
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import rdpcap, conf
def packet_count(pcap):
conf.verb = 0
try:
pkts = rdpcap(pcap)
return len(pkts)
except Exception as e:
return str(e)
def check_pcap(pcap):
try:
bad_magic = 'pcap-ng capture file'
m = magic.open(magic.MAGIC_NONE)
m.load()
f = m.file(pcap)
if bad_magic in f:
return 'BAD'
else:
return f
except Exception as e:
return str(e)
def count_sessions(pcap):
try:
pkts = rdpcap(pcap)
return len(pkts.sessions())
except Exception as e:
return str(e)
def check_size(pcap):
try:
x = os.path.getsize(pcap)
d = "%0.01f MB" % (x / (1024*1024.0))
return str(d)
except Exception as e:
return str(e)
def get_time(pcap):
try:
p = rdpcap(pcap)
c = len(p)
start = datetime.datetime.fromtimestamp(p[0].time).strftime('%Y-%m-%d %H:%M:%S.%f')
end = datetime.datetime.fromtimestamp(p[c -1].time).strftime('%Y-%m-%d %H:%M:%S.%f')
return [start, end]
except Exception as e:
return str(e)
|
StarcoderdataPython
|
4966299
|
<filename>Pyro/ext/remote.py
#############################################################################
#
# simple Pyro connection module, originally written by <NAME>
#
# This is part of "Pyro" - Python Remote Objects
# which is (c) <NAME> - <EMAIL>
#
#############################################################################
import UserDict
import exceptions
import os
import re
import signal
import socket
import sys
import time
import types
import Pyro.errors
import Pyro.naming
import Pyro.core
import Pyro.util
from Pyro.protocol import ProtocolError
true, false = 1, 0
copy_types = false
verbose = false
pyro_nameserver = None
pyro_daemon = None
client_initialized = false
server_initialized = false
daemon_host = ''
daemon_port = 0
daemon_objects = []
daemon_types = []
def tb_info(tb):
codename = tb.tb_frame.f_code.co_filename
lineno = tb.tb_lineno
if not (codename == '<string>' or codename.find(".py") > 0):
lineno = lineno - 2
return lineno, codename
def canonize(e_type, e_val, e_traceback):
"Turn the exception into a textual representation."
# find the last traceback:
tb = e_traceback
lineno, codename = tb_info(tb)
lines = [ "%s %s" % (codename, lineno) ]
found = None
if tb.tb_frame.f_code.co_filename[0] == '<':
found = tb
while tb.tb_next:
tb = tb.tb_next
if tb.tb_frame.f_code.co_filename[0] == '<':
found = tb
lineno, codename = tb_info(tb)
lines.append("%s %s" % (codename, lineno))
if found:
tb = found
lineno, codename = tb_info(tb)
if codename == '<string>':
lines.insert(0, "%s in command: %s" % (e_type, e_val))
elif codename.find(".py") > 0 and e_type == "SyntaxError":
lines.insert(0, "%s in: %s" % (e_type, e_val))
else:
lines.insert(0, "%s in line %s of %s: %s" %
(e_type, lineno, codename, e_val))
return lines
def exception_text():
return sys.exc_value
def format_exception():
return canonize(*sys.exc_info())
def register_type(t):
"""Whenever type T goes in or out, wrap/unwrap the type so that
the user is always interacting with the server object, or the
server interacts with the object directly."""
if t not in daemon_types:
daemon_types.append(t)
def unregister_objects():
if pyro_daemon:
global daemon_objects
for obj in daemon_objects:
try: pyro_daemon.disconnect(obj)
except: pass
daemon_objects = []
sys.exitfunc = unregister_objects
def host_ipaddr(interface = None):
if sys.platform == "win32":
return socket.gethostbyname(socket.gethostname())
cmd = "/sbin/ifconfig"
if interface:
cmd = '%s %s' % (cmd, interface)
fd = os.popen(cmd)
this_host = None
interfaces = {}
name = None
for line in fd.readlines():
match = re.match("(\S+)", line)
if match: name = match.group(1)
match = re.search("inet addr:(\S+)", line)
if match:
addr = match.group(1)
if name:
interfaces[name] = addr
if interfaces.has_key(interface):
this_host = interfaces[interface]
else:
for name, addr in interfaces.items():
if re.match("ppp", name):
this_host = addr
break
elif re.match("eth", name):
this_host = addr
fd.close()
return this_host or socket.gethostbyname(socket.gethostname())
def find_nameserver(hostname = None, portnum = None):
if hostname and hostname.find('://') > 0:
URI = Pyro.core.PyroURI(hostname)
ns = Pyro.naming.NameServerProxy(URI)
else:
try:
if verbose:
print 'Searching for Naming Service on %s:%d...' % \
(hostname or 'BROADCAST',
portnum or Pyro.config.PYRO_NS_BC_PORT)
locator = Pyro.naming.NameServerLocator()
ns = locator.getNS(host = hostname, port = portnum)
except (Pyro.core.PyroError, socket.error), x:
localhost = socket.gethostbyname('localhost')
if verbose:
print "Error:", x
print """
Naming Service not found with broadcast.
Trying local host""", localhost, '...',
ns = locator.getNS(host = localhost, port = portnum)
if verbose: print 'Naming Service found at', ns.URI
return ns
class Error(Exception): pass
class ObjBase(Pyro.core.ObjBase):
"""This extension of Pyro.core.ObjBase makes sure that any values
that get returned to the caller which are of a significant type,
get wrapped first in proxies.
Likewise, if a proxy class comes back to us, and it's in regard to
an object native to this server, unwrap it."""
def __nonzero__(self): return 1
def Pyro_dyncall(self, method, flags, args):
try:
base = Pyro.core.ObjBase.Pyro_dyncall
result = wrap(base(self, method, flags, unwrap(args)))
except:
result = Error('\n'.join(format_exception()))
return result
def _r_ga(self, attr):
return wrap(Pyro.core.ObjBase._r_ga(self, attr))
def _r_sa(self, attr, value):
Pyro.core.ObjBase._r_sa(self, attr, unwrap(value))
class Nameserver:
"""This helper class allows the server to use Pyro's naming
service for publishing certain objects by name. It integrates
better with remote.py, than Pyro.naming.NameServer does."""
def __init__(self, ns, ns_port):
self.ns = ns
self.ns_port = ns_port
def __cmp__(self, other):
return self.ns == other.ns and self.ns_port == other.ns_port
def __str__(self):
if self.ns_port:
return "%s:%s" % (self.ns, self.ns_port)
return self.ns
def resolve(self, name):
return get_remote_object(name, self.ns, self.ns_port)
def register(self, name, object):
return provide_local_object(object, name, self.ns, self.ns_port)
def unregister(self, object):
for obj in daemon_objects[:]:
if obj.delegate is object:
pyro_daemon.disconnect(obj)
daemon_objects.remove(obj)
class DynamicProxy(Pyro.core.DynamicProxyWithAttrs):
"""This version of the proxy just wraps args before making
external calls."""
def __nonzero__(self):
return true
def _invokePYRO(self, *vargs, **kargs):
result = unwrap(apply(Pyro.core.DynamicProxyWithAttrs._invokePYRO,
tuple([self] + wrap(list(vargs))), wrap(kargs)))
if type(result) is types.InstanceType and \
isinstance(result, Error) or \
isinstance(result, Pyro.errors.PyroError) or \
isinstance(result, ProtocolError):
msg = str(result)
type_name = msg[: msg.find(' ')]
if type_name == 'exceptions.IndexError':
try:
real_type = eval(type_name)
msg = msg.split('\n')[0]
result = real_type(msg[msg.find(':') + 2 :])
except:
pass
raise result
else:
return result
def unwrap(value):
t = type(value)
if t is types.InstanceType and isinstance(value, DynamicProxy):
if pyro_daemon:
try:
return pyro_daemon.getLocalObject(value.objectID)
except KeyError:
pass
return value
elif t is types.ListType:
for i in range(len(value)):
value[i] = unwrap(value[i])
elif t is types.TupleType:
value = list(value)
for i in range(len(value)):
value[i] = unwrap(value[i])
return tuple(value)
elif t is types.DictType:
for k, v in value.items():
value[k] = unwrap(v)
return value
def wrap(value):
"""Wrap the argument, returning a copy -- since otherwise we might
alter a local data structure inadvertantly."""
t = type(value)
if t is types.InstanceType:
matched = false
for dt in daemon_types:
if isinstance(value, dt):
matched = true
if not copy_types and not matched and \
not isinstance(value, DynamicProxy):
return provide_local_object(value)
elif t is types.ListType:
value = value[:]
for i in range(len(value)):
value[i] = wrap(value[i])
elif t is types.TupleType:
value = list(value)
for i in range(len(value)):
value[i] = wrap(value[i])
return tuple(value)
elif t is types.DictType:
copy = {}
for k, v in value.items():
copy[k] = wrap(v)
return copy
return value
def get_remote_object(name, hostname = None, portnum = None):
global client_initialized, pyro_nameserver
# initialize Pyro -- Python Remote Objects
if not client_initialized:
Pyro.core.initClient(verbose)
client_initialized = true
if pyro_nameserver is None or hostname:
pyro_nameserver = find_nameserver(hostname, portnum)
if verbose:
print 'Binding object %s' % name
try:
URI = pyro_nameserver.resolve(name)
if verbose:
print 'URI:', URI
return DynamicProxy(URI)
except Pyro.core.PyroError, x:
raise Error("Couldn't bind object, nameserver says:", x)
class Cache(UserDict.UserDict):
"""simple cache that uses least recently accessed time to trim size"""
def __init__(self,data=None,size=100):
UserDict.UserDict.__init__(self,data)
self.size = size
def resize(self):
"""trim cache to no more than 95% of desired size"""
trim = max(0, int(len(self.data)-0.95*self.size))
if trim:
# don't want self.items() because we must sort list by access time
values = map(None, self.data.values(), self.data.keys())
values.sort()
for val,k in values[0:trim]:
del self.data[k]
def __setitem__(self,key,val):
if (not self.data.has_key(key) and
len(self.data) >= self.size):
self.resize()
self.data[key] = (time.time(), val)
def __getitem__(self,key):
"""like normal __getitem__ but updates time of fetched entry"""
val = self.data[key][1]
self.data[key] = (time.time(),val)
return val
def get(self,key,default=None):
"""like normal __getitem__ but updates time of fetched entry"""
try:
return self[key]
except KeyError:
return default
def values(self):
"""return values, but eliminate access times first"""
vals = list(self.data.values())
for i in range(len(vals)):
vals[i] = vals[i][1]
return tuple(vals)
def items(self):
return map(None, self.keys(), self.values())
def copy(self):
return self.__class__(self.data, self.size)
def update(self, otherdict):
for k in otherdict.keys():
self[k] = otherdict[k]
provided_objects = Cache(size = 100)
def provide_local_object(obj, name = None, hostname = None, portnum = None):
global server_initialized, pyro_daemon, pyro_nameserver
proxy_class = DynamicProxy
if not server_initialized:
Pyro.core.initServer(verbose)
server_initialized = true
if pyro_daemon is None:
pyro_daemon = Pyro.core.Daemon(host = daemon_host,
port = daemon_port)
# If no 'name' was specified, don't even bother with the
# nameserver.
if name:
if pyro_nameserver is None or hostname:
pyro_nameserver = find_nameserver(hostname, portnum)
pyro_daemon.useNameServer(pyro_nameserver)
if verbose:
print 'Remoting object', name
# tell nameserver to forget any earlier use of this name
try:
if pyro_nameserver.resolve(name):
pyro_nameserver.unregister(name)
except Pyro.errors.NamingError:
pass
if not isinstance(obj, Pyro.core.ObjBase):
if provided_objects.has_key(obj):
obj = provided_objects[obj]
else:
slave = ObjBase()
slave.delegateTo(obj)
provided_objects[obj] = slave
obj = slave
URI = pyro_daemon.connect(obj, name)
daemon_objects.append(obj)
proxy = proxy_class(URI)
return proxy
abort = false
def interrupt(*args):
global abort
abort = true
if hasattr(signal,'SIGINT'): signal.signal(signal.SIGINT, interrupt)
#if hasattr(signal,'SIGHUP'): signal.signal(signal.SIGHUP, interrupt)
#if hasattr(signal,'SIGQUIT'): signal.signal(signal.SIGQUIT, interrupt)
def handle_requests(wait_time = None, callback = None):
global abort
abort = false
if pyro_daemon is None:
raise Error("There is no daemon with which to handle requests")
if wait_time:
start = time.time()
while not abort:
try:
pyro_daemon.handleRequests(wait_time)
if wait_time:
now = time.time()
if callback and now - start > wait_time:
callback()
start = now
elif callback:
callback()
# ignore socket and select errors, they are often transient
except socket.error: pass
except Exception, msg:
if verbose:
print "Error:", sys.exc_type, msg
abort = true
except:
abort = true
return abort
def handle_requests_unsafe(wait_time = None, callback = None):
global abort
abort = false
if pyro_daemon is None:
raise Error("There is no daemon with which to handle requests")
if wait_time:
start = time.time()
while 1:
pyro_daemon.handleRequests(wait_time)
if wait_time:
now = time.time()
if callback and now - start > wait_time:
callback()
start = now
elif callback:
callback()
return true
def unregister_object(obj):
if pyro_daemon:
try: pyro_daemon.disconnect(obj)
except: pass
global daemon_objects
if obj in daemon_objects:
daemon_objects.remove(obj)
|
StarcoderdataPython
|
5063663
|
#!/usr/bin/env python
"""
Copyright (c) 2015-2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
module = 'i2c_init'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
m_axis_cmd_ready = Signal(bool(0))
m_axis_data_tready = Signal(bool(0))
start = Signal(bool(0))
# Outputs
m_axis_cmd_address = Signal(intbv(0)[7:])
m_axis_cmd_start = Signal(bool(0))
m_axis_cmd_read = Signal(bool(0))
m_axis_cmd_write = Signal(bool(0))
m_axis_cmd_write_multiple = Signal(bool(0))
m_axis_cmd_stop = Signal(bool(0))
m_axis_cmd_valid = Signal(bool(0))
m_axis_data_tdata = Signal(intbv(0)[8:])
m_axis_data_tvalid = Signal(bool(0))
m_axis_data_tlast = Signal(bool(1))
busy = Signal(bool(0))
# sources and sinks
cmd_sink_pause = Signal(bool(0))
data_sink_pause = Signal(bool(0))
cmd_sink = axis_ep.AXIStreamSink()
cmd_sink_logic = cmd_sink.create_logic(
clk,
rst,
tdata=(m_axis_cmd_address, m_axis_cmd_start, m_axis_cmd_read, m_axis_cmd_write, m_axis_cmd_write_multiple, m_axis_cmd_stop),
tvalid=m_axis_cmd_valid,
tready=m_axis_cmd_ready,
pause=cmd_sink_pause,
name='cmd_sink'
)
data_sink = axis_ep.AXIStreamSink()
data_sink_logic = data_sink.create_logic(
clk,
rst,
tdata=m_axis_data_tdata,
tvalid=m_axis_data_tvalid,
tready=m_axis_data_tready,
tlast=m_axis_data_tlast,
pause=data_sink_pause,
name='data_sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
m_axis_cmd_address=m_axis_cmd_address,
m_axis_cmd_start=m_axis_cmd_start,
m_axis_cmd_read=m_axis_cmd_read,
m_axis_cmd_write=m_axis_cmd_write,
m_axis_cmd_write_multiple=m_axis_cmd_write_multiple,
m_axis_cmd_stop=m_axis_cmd_stop,
m_axis_cmd_valid=m_axis_cmd_valid,
m_axis_cmd_ready=m_axis_cmd_ready,
m_axis_data_tdata=m_axis_data_tdata,
m_axis_data_tvalid=m_axis_data_tvalid,
m_axis_data_tready=m_axis_data_tready,
m_axis_data_tlast=m_axis_data_tlast,
busy=busy,
start=start
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: run, no delays")
current_test.next = 1
start.next = 1
yield clk.posedge
start.next = 0
yield clk.posedge
yield clk.posedge
while busy:
yield clk.posedge
# addresses and data for checking
addr = [0x50, 0x50, 0x51, 0x52, 0x53]
data = [0x00, 0x04, 0x11, 0x22, 0x33, 0x44]
# check all issued commands
for a in addr:
first = True
for d in data:
f1 = cmd_sink.recv()
f2 = data_sink.recv()
assert f1.data[0][0] == a # address
assert f1.data[0][1] == first # start
assert f1.data[0][2] == 0 # read
assert f1.data[0][3] == 1 # write
assert f1.data[0][4] == 0 # write multiple
assert f1.data[0][5] == 0 # stop
assert f2.data[0] == d
first = False
# check for stop command
f1 = cmd_sink.recv()
assert f1.data[0][1] == 0 # start
assert f1.data[0][2] == 0 # read
assert f1.data[0][3] == 0 # write
assert f1.data[0][4] == 0 # write multiple
assert f1.data[0][5] == 1 # stop
# make sure we got everything
assert cmd_sink.empty()
assert data_sink.empty()
yield delay(100)
# testbench stimulus
yield clk.posedge
print("test 2: run with delays")
current_test.next = 2
start.next = 1
yield clk.posedge
start.next = 0
yield clk.posedge
yield clk.posedge
cmd_sink_pause.next = 0
data_sink_pause.next = 1
while busy:
yield delay(100)
yield clk.posedge
cmd_sink_pause.next = 0
data_sink_pause.next = 1
yield clk.posedge
cmd_sink_pause.next = 1
data_sink_pause.next = 1
yield delay(100)
yield clk.posedge
cmd_sink_pause.next = 1
data_sink_pause.next = 0
yield clk.posedge
cmd_sink_pause.next = 1
data_sink_pause.next = 1
cmd_sink_pause.next = 0
data_sink_pause.next = 0
# addresses and data for checking
addr = [0x50, 0x50, 0x51, 0x52, 0x53]
data = [0x00, 0x04, 0x11, 0x22, 0x33, 0x44]
# check all issued commands
for a in addr:
first = True
for d in data:
f1 = cmd_sink.recv()
f2 = data_sink.recv()
assert f1.data[0][0] == a # address
assert f1.data[0][1] == first # start
assert f1.data[0][2] == 0 # read
assert f1.data[0][3] == 1 # write
assert f1.data[0][4] == 0 # write multiple
assert f1.data[0][5] == 0 # stop
assert f2.data[0] == d
first = False
# check for stop command
f1 = cmd_sink.recv()
assert f1.data[0][1] == 0 # start
assert f1.data[0][2] == 0 # read
assert f1.data[0][3] == 0 # write
assert f1.data[0][4] == 0 # write multiple
assert f1.data[0][5] == 1 # stop
# make sure we got everything
assert cmd_sink.empty()
assert data_sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
StarcoderdataPython
|
1980222
|
<reponame>SamuelMarks/tensorboard-plugin-example<filename>greeter_plugin/greeter_plugin.py
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import six
from werkzeug import wrappers
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
class GreeterPlugin(base_plugin.TBPlugin):
"""A plugin that serves greetings recorded during model runs."""
# This static property will also be included within routes (URL paths)
# offered by this plugin. This property must uniquely identify this plugin
# from all other plugins.
plugin_name = 'greeter'
def __init__(self, context):
"""Instantiates a GreeterPlugin.
Args:
context: A base_plugin.TBContext instance. A magic container that
TensorBoard uses to make objects available to the plugin.
"""
# We retrieve the multiplexer from the context and store a reference
# to it.
self._multiplexer = context.multiplexer
@wrappers.Request.application
def tags_route(self, request):
"""A route (HTTP handler) that returns a response with tags.
Returns:
A response that contains a JSON object. The keys of the object
are all the runs. Each run is mapped to a (potentially empty)
list of all tags that are relevant to this plugin.
"""
# This is a dictionary mapping from run to (tag to string content).
# To be clear, the values of the dictionary are dictionaries.
all_runs = self._multiplexer.PluginRunToTagToContent(
GreeterPlugin.plugin_name)
# tagToContent is itself a dictionary mapping tag name to string
# content. We retrieve the keys of that dictionary to obtain a
# list of tags associated with each run.
response = {
run: list(tagToContent.keys())
for (run, tagToContent) in all_runs.items()
}
return http_util.Respond(request, response, 'application/json')
def get_plugin_apps(self):
"""Gets all routes offered by the plugin.
This method is called by TensorBoard when retrieving all the
routes offered by the plugin.
Returns:
A dictionary mapping URL path to route that handles it.
"""
# Note that the methods handling routes are decorated with
# @wrappers.Request.application.
return {
'/tags': self.tags_route,
'/greetings': self.greetings_route,
}
def is_active(self):
"""Determines whether this plugin is active.
This plugin is only active if TensorBoard sampled any summaries
relevant to the greeter plugin.
Returns:
Whether this plugin is active.
"""
all_runs = self._multiplexer.PluginRunToTagToContent(
GreeterPlugin.plugin_name)
# The plugin is active if any of the runs has a tag relevant
# to the plugin.
return bool(self._multiplexer and any(six.itervalues(all_runs)))
def _process_string_tensor_event(self, event):
"""Convert a TensorEvent into a JSON-compatible response."""
string_arr = tf.make_ndarray(event.tensor_proto)
text = string_arr.astype(np.dtype(str)).tostring()
return {
'wall_time': event.wall_time,
'step': event.step,
'text': text,
}
@wrappers.Request.application
def greetings_route(self, request):
"""A route that returns the greetings associated with a tag.
Returns:
A JSON list of greetings associated with the run and tag
combination.
"""
run = request.args.get('run')
tag = request.args.get('tag')
# We fetch all the tensor events that contain greetings.
tensor_events = self._multiplexer.Tensors(run, tag)
# We convert the tensor data to text.
response = [self._process_string_tensor_event(ev) for
ev in tensor_events]
return http_util.Respond(request, response, 'application/json')
|
StarcoderdataPython
|
250300
|
# coding=utf-8
"""
Created on Monday 28 March 00:53:34 2020
@author: nkalyan🤠
'''implementing Python scripts to print student and instructor tables'''
"""
from typing import Dict, Iterator, Tuple, KeysView
from prettytable import PrettyTable
from collections import defaultdict
from HW08_nikhil_kalyan import file_reader
class Student(object):
"""A student class that holds the details of students"""
def __init__(self, cwid: int, name: str, dept: str) -> None:
"""Initialize/ construct the student class"""
self._name: str = name
self._cwid: int = cwid
self._department: str = dept
self.student_courses: Dict[str] = dict()
def add_course(self, course: str, grade: str) -> None:
self.student_courses[course] = grade
def get_course(self) -> KeysView[str]:
return self.student_courses.keys()
class Instructor(object):
"""Instructor class that holds the details of instructor, The names of course taught
registered courses"""
def __init__(self, name: str, cwid: int, department: str) -> None:
"""Initialize a constructor of instructor data"""
self._name: str = name
self._cwid: int = cwid
self._department: str = department
self.instructor_courses: Dict[str] = defaultdict(int)
def add_student(self, course: str) -> None:
self.instructor_courses[course] += 1
def get_course(self) -> KeysView[str]:
return self.instructor_courses.keys()
def get_student_count(self, course: str) -> int:
return self.instructor_courses[course]
class University(object):
"""This is the main class the reads the whole data and perform the operations that need to create a table"""
def __init__(self, directory_name) -> None:
"""Initialize a constructor to store the values"""
self._directory: str = directory_name
self._student: Dict[str, Student] = dict()
self._instructor: Dict[str, Instructor] = dict()
def get_students_details(self) -> None:
"""Method that gets the student details"""
try:
student_file: Iterator[Tuple[str]] = file_reader('students.txt', 3, sep='|', header=True)
for cwid, name, dept in student_file:
self._student[cwid] = Student(cwid, name, dept)
except(FileNotFoundError, ValueError) as e:
print(e)
def get_instructors_details(self) -> None:
"""Method that gets the instructor details"""
try:
instructor_file: Iterator[Tuple[str]] = file_reader('instructors.txt', 3, sep='\t', header=True)
for cwid, name, dept in instructor_file:
self._instructor[cwid] = Instructor(cwid, name, dept)
except(FileNotFoundError, ValueError) as e:
print(e)
def get_grades(self) -> None:
"""Method the get the grades """
try:
grade_file: Iterator[Tuple[str]] = file_reader('grades.txt', 4, sep='\t', header=True)
for student_cwid, course, grade, instructor_cwid in grade_file:
if student_cwid in self._student.keys():
self._student[student_cwid].student_courses[course] = grade
if instructor_cwid in self._instructor.keys():
self._instructor[instructor_cwid].instructor_courses[course] += 1
except(FileNotFoundError, ValueError) as e:
print(e)
def student_table(self) -> PrettyTable:
"""returns the pretty table of students with the defined fields"""
print_student_table: PrettyTable = PrettyTable()
print_student_table.field_names = ["CWID", "Name", "Completed Courses"]
for cwid, student in self._student.items():
print_student_table.add_row([cwid, student.name, sorted(list(student.student_courses.keys()))])
return print_student_table
def instructors_table(self) -> PrettyTable:
"""Returns pretty table of instructor with defined fields"""
print_instructor_table: PrettyTable = PrettyTable()
print_instructor_table.field_names = ["CWID", "Name", "Dept", "Course", "Students"]
for cwid, instructor in self._instructor.items():
for course in instructor.instructor_courses:
print_instructor_table.add_row([cwid, instructor.name, instructor.department, course,
instructor.instructor_courses[course]])
return print_instructor_table
def main() -> None:
"""main function"""
directory_name: str = '/Users/nikhilkalyan/PycharmProjects/SSW 810'
result = University(directory_name)
result.get_students_details()
result.get_instructors_details()
result.get_grades()
print("Student Summary")
print(result.student_table())
print("Instructor Summary")
print(result.instructors_table())
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1879306
|
<reponame>johnnykoo84/DS-Unit-3-Sprint-2-SQL-and-Databases
import os
import psycopg2
from dotenv import load_dotenv
load_dotenv()
# Connect to ElephantSQL-hosted PostgreSQL
conn = psycopg2.connect(
dbname=os.getenv("dbname"),
user=os.getenv("user"),
password=os.getenv("password"),
host=os.getenv("host")
)
# A "cursor", a structure to iterate over db records to perform queries
cur = conn.cursor()
# An example query
cur.execute('SELECT * from test_table;')
# Note - nothing happened yet! We need to actually *fetch* from the cursor
print(cur.fetchone())
|
StarcoderdataPython
|
388023
|
import enum
from collections import OrderedDict
from copy import deepcopy
import gym
import numpy as np
from gym.spaces import Box, Dict
def update_obs_space(env, delta):
spaces = env.observation_space.spaces.copy()
for key, shape in delta.items():
spaces[key] = Box(-np.inf, np.inf, (np.prod(shape),), np.float32)
return Dict(spaces)
class BinSpacing(enum.Enum):
"""
An Enum class ti generate action bin spacing arrays.
"""
LINEAR = "linear"
EXPONENTIAL = "exponential" # Exponential binning. Expects a symmetric action space centered around zero
def get_bin_array(self, lower_bound, upper_bound, n_bins) -> np.ndarray:
if self is BinSpacing.LINEAR:
return np.linspace(lower_bound, upper_bound, n_bins)
else:
assert (
lower_bound == -upper_bound and n_bins % 2 == 1
), "Exponential binning is only supported on symmetric action space with an odd number of bins"
half_range = np.array([2 ** (-n) for n in range(n_bins // 2)]) * lower_bound
return np.concatenate([half_range, [0], -half_range[::-1]])
class DiscretizeActionWrapper(gym.ActionWrapper):
"""
A wrapper that maps a continuous gym action space into a discrete action space.
"""
# default action bins for DiscretizeActionWrapper
DEFAULT_BINS = 11
def __init__(
self, env=None, n_action_bins=DEFAULT_BINS, bin_spacing=BinSpacing.LINEAR
):
"""
n_action_bins: can be int or None
if None is passed, then DEFAULT_BINS will be used.
"""
super().__init__(env)
assert isinstance(env.action_space, Box)
self._disc_to_cont = []
if n_action_bins is None:
n_action_bins = self.DEFAULT_BINS
for low, high in zip(env.action_space.low, env.action_space.high):
self._disc_to_cont.append(
bin_spacing.get_bin_array(low, high, n_action_bins)
)
temp = [n_action_bins for _ in self._disc_to_cont]
self.action_space = gym.spaces.MultiDiscrete(temp)
self.action_space.seed(env.action_space.np_random.randint(0, 2 ** 32 - 1))
def action(self, action):
assert len(action) == len(self._disc_to_cont)
return np.array(
[m[a] for a, m in zip(action, self._disc_to_cont)], dtype=np.float32
)
class RewardNameWrapper(gym.Wrapper):
""" Sets the default reward name on the environment """
def __init__(self, env):
super().__init__(env)
unwrapped = self.env.unwrapped
if not hasattr(unwrapped, "reward_names"):
self.env.unwrapped.reward_names = ["env"]
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipObservationWrapper(gym.ObservationWrapper):
"""
Clips observations into a fixed range.
"""
def __init__(self, env=None, clip=100.0):
super().__init__(env)
self._clip = clip
def observation(self, observation):
clipped_observation = OrderedDict()
for key in observation:
clipped_observation[key] = np.clip(
observation[key], -self._clip, self._clip
)
return clipped_observation
def compute_relative_goals(self, *args, **kwargs):
self.env.compute_relative_goals(*args, **kwargs)
def compute_goal_reward(self, *args, **kwargs):
return self.env.compute_goal_reward(*args, **kwargs)
class ClipRewardWrapper(gym.RewardWrapper):
"""
Clips reward values into a fixed range.
"""
def __init__(self, env=None, clip=100.0):
super().__init__(env)
self._clip = clip
def reward(self, reward):
clipped_reward = np.clip(reward, -self._clip, self._clip)
return clipped_reward
def compute_relative_goals(self, *args, **kwargs):
self.env.compute_relative_goals(*args, **kwargs)
def compute_goal_reward(self, *args, **kwargs):
return self.env.compute_goal_reward(*args, **kwargs)
class ClipActionWrapper(gym.ActionWrapper):
""" Clips action values into a normalized space between -1 and 1"""
def action(self, action):
return np.clip(a=action, a_min=-1.0, a_max=1.0)
class IncrementalExpAvg(object):
""" A generic exponential moving average filter. """
def __init__(self, alpha, intial_value=None):
self._value = 0
self._t = 0
self._alpha = alpha
if intial_value is not None:
self.update(intial_value)
def update(self, observation):
self._value = self._value * self._alpha + (1 - self._alpha) * observation
self._t += 1
def get(self):
if self._value is None:
return None
else:
return self._value / (1 - self._alpha ** self._t)
class PreviousActionObservationWrapper(gym.Wrapper):
"""
Wrapper that annotates observations with a cached previous action.
"""
def __init__(self, env=None):
super().__init__(env)
env.observation_space.spaces["previous_action"] = deepcopy(env.action_space)
def reset(self, *args, **kwargs):
self.previous_action = np.zeros(self.env.action_space.shape)
return self.observation(self.env.reset(*args, **kwargs))
def observation(self, observation):
observation["previous_action"] = self.previous_action.copy()
return observation
def step(self, action):
self.previous_action = action.copy()
ob, rew, done, info = self.env.step(action)
return self.observation(ob), rew, done, info
def compute_relative_goals(self, *args, **kwargs):
self.env.compute_relative_goals(*args, **kwargs)
def compute_goal_reward(self, *args, **kwargs):
return self.env.compute_goal_reward(*args, **kwargs)
class SmoothActionWrapper(gym.Wrapper):
"""
Applies smoothing to the current action using an Exponential Moving Average filter.
"""
def __init__(self, env, alpha=0.0):
super().__init__(env)
self._alpha = alpha
delta = OrderedDict([("action_ema", self.env.action_space.shape)])
self.observation_space = update_obs_space(self.env, delta)
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
sim = self.unwrapped.sim
adjusted_alpha = np.power(
self._alpha, (sim.model.opt.timestep * sim.nsubsteps) / 0.08
)
self._ema = IncrementalExpAvg(alpha=adjusted_alpha)
obs["action_ema"] = np.zeros(self.env.action_space.shape)
return obs
def step(self, action):
self._ema.update(action)
action = self._ema.get()
obs, rew, done, info = self.env.step(action)
obs["action_ema"] = action
return obs, rew, done, info
class RelativeGoalWrapper(gym.ObservationWrapper):
"""
Wrapper that computes the 'relative goal' and 'achieved goal' observations for
environments.
"""
def __init__(self, env, obs_prefix=""):
# Prefix to map goal observation to state observation. This is a hack to
# handle inconsistent naming convention for cube environment observations
# e.g. goal_pos goal observation maps to cube_pos state observation.
self.obs_prefix = obs_prefix
super().__init__(env)
self.goal_obs_names = []
delta = OrderedDict()
for name, space in self.env.observation_space.spaces.items():
if name.startswith("goal_"):
delta[f"achieved_{name}"] = space.shape
delta[f"relative_{name}"] = space.shape
delta[f"noisy_achieved_{name}"] = space.shape
delta[f"noisy_relative_{name}"] = space.shape
obs_name = name[len("goal_"):]
assert (
f"{self.obs_prefix}{obs_name}" in self.env.observation_space.spaces
), (
f"Found {name} but not {self.obs_prefix}{obs_name} in observation space. "
f"RelativeGoalWrapper won't work. Available observation space: "
f"{sorted(self.env.observation_space.spaces.keys())}"
)
self.goal_obs_names.append(obs_name)
self.observation_space = update_obs_space(self.env, delta)
def observation(self, observation):
""" Calculate 'relative goal' and 'achieved goal' """
current_state = {
f"{self.obs_prefix}{n}": observation[f"{self.obs_prefix}{n}"]
for n in self.goal_obs_names
}
noisy_goal_state = {
f"{self.obs_prefix}{n}": observation[f"noisy_{self.obs_prefix}{n}"]
for n in self.goal_obs_names
}
relative_goal = self.env.unwrapped.goal_generation.relative_goal(
self.env.unwrapped._goal, current_state
)
noisy_relative_goal = self.env.unwrapped.goal_generation.relative_goal(
self.env.unwrapped._goal, noisy_goal_state
)
for name in self.goal_obs_names:
obs_name = f"{self.obs_prefix}{name}"
observation[f"achieved_goal_{name}"] = observation[obs_name].copy()
observation[f"relative_goal_{name}"] = relative_goal[obs_name]
observation[f"noisy_achieved_goal_{name}"] = observation[
f"noisy_{obs_name}"
].copy()
observation[f"noisy_relative_goal_{name}"] = noisy_relative_goal[obs_name]
return observation
class UnifiedGoalObservationWrapper(gym.ObservationWrapper):
"""Concatenates the pieces of every goal type"""
def __init__(
self, env, goal_keys=["relative_goal", "achieved_goal", "goal"], goal_parts=[],
):
super().__init__(env)
self.delta = OrderedDict()
for goal_key in goal_keys:
goal_len = sum(
[
self.observation_space.spaces[key].shape[0]
for key in self.observation_space.spaces.keys()
if key.startswith(goal_key)
]
)
self.delta[goal_key] = (goal_len,)
if any(
[
key.startswith("noisy_" + goal_key + "_")
for key in self.observation_space.spaces.keys()
]
):
self.delta["noisy_" + goal_key] = (goal_len,)
self.goal_parts = goal_parts
self.observation_space = update_obs_space(self.env, self.delta)
def observation(self, observation):
new_obs = OrderedDict()
for key, value in observation.items():
new_obs[key] = value
# It's a bit hacky to hard code observation key here but we have to do it now
# because we need to keep old policy backward compatible by keep observation order
# the same.
for goal_key in self.delta.keys():
goal_parts = [goal_key + "_" + part for part in self.goal_parts]
goal = np.concatenate(
[observation[key] for key in goal_parts if key in observation]
)
new_obs[goal_key] = goal
return new_obs
class SummedRewardsWrapper(gym.RewardWrapper):
"""
Ensures that reward is a scalar.
"""
def reward(self, reward):
return np.sum([reward])
|
StarcoderdataPython
|
9731993
|
<reponame>maxisoft/BravePatcher<filename>tests/test_cli_integration.py<gh_stars>1-10
import os
import sys
import shlex
import subprocess
from pathlib import Path
from typing import Tuple
import pefile
import pytest
import psutil
def _get_command() -> str:
return os.environ.get("CLI_COMMAND", "{} -m bravepatcher".format(sys.executable.replace('\\', '/')))
def _call(cmd: str, **kwargs) -> Tuple[str, str, int]:
cmd_split = shlex.split(cmd)
kwargs.setdefault("text", True)
kwargs.setdefault("timeout", 60)
p = subprocess.run(cmd_split, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
return p.stdout, p.stderr, p.returncode
def _call_patcher(cmd: str, **kwargs) -> Tuple[str, str, int]:
return _call(f"{_get_command()} {cmd}", **kwargs)
class TestCliIntegration:
def test_launch_without_args(self):
out, err, code = _call(_get_command())
assert code == 0
assert err == ""
assert out.startswith("Usage: ")
assert "help" in out
def test_help(self):
out, err, code = _call_patcher("--help")
assert code == 0
assert err == ""
assert out.startswith("Usage: ")
assert "help" in out
@pytest.mark.slow_integration_test
def test_download_in_cwd(self, tmp_path: Path):
out, err, code = _call_patcher("download-brave", cwd=str(tmp_path.resolve()))
assert out.startswith("downloading")
assert err == ""
assert code == 0
path = Path(tmp_path, "BraveBrowserStandaloneSilentSetup.exe")
assert path.exists()
pe = pefile.PE(str(path))
assert pe.is_exe()
@pytest.mark.slow_integration_test
def test_download_install_patch_run_restore(self, tmp_path: Path):
if os.environ.get("CI") != "true":
pytest.skip("skipping CI only tests")
_, _, code = _call_patcher("download-brave", cwd=str(tmp_path.resolve()))
assert code == 0
path = Path(tmp_path, "BraveBrowserStandaloneSilentSetup.exe")
subprocess.check_call(str(path))
out, err, code = _call_patcher("patch --show-debug-result")
print(out)
print(err, file=sys.stderr)
assert "Done patching brave" in out
assert r"""'errors': []""" in out
assert code == 0
from bravepatcher.utils.brave import get_brave_path
with psutil.Popen([str(get_brave_path())] + shlex.split("--headless --bwsi")) as p:
# start and wait 30 sec assuming there's no crash
try:
p.wait(30)
except subprocess.TimeoutExpired:
pass
else:
assert p.returncode == 0
finally:
if p.is_running():
p.kill()
out, err, code = _call_patcher("restore")
print(out)
print(err, file=sys.stderr)
assert code == 0
|
StarcoderdataPython
|
6573196
|
<filename>matrix/determinant_of_nxn_matrix.py
"""To find determinant of NxN Matrix i have used Row Reduction method
you can see wikipedia of this Method
https://en.wikipedia.org/wiki/Row_echelon_form"""
from __future__ import annotations
from copy import deepcopy
def determinant_of_nxn_matrix(matrix: list[list[float]]) -> float:
""">>> determinant_of_nxn_matrix([[10, 5], [3, 2.5]])
10.0
>>> determinant_of_nxn_matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
-3.0
>>> determinant_of_nxn_matrix([[1, 2, 3], [4, 5, 6], [7, 8, 4]])
15.0
>>> determinant_of_nxn_matrix([[1, 2, 3, 7], [4, 5, 6, 6],
[7, 8, 1, 5], [1, 2, 3, 4]])
-72.0
>>> determinant_of_nxn_matrix([[1, 2, 3, 7, 13], [4, 5, 6, 6, 90],
[7, 8, 1, 5, 76],
[1, 2, 3, 4, 12], [9, 6, 3, 7, 4]])
19848.0
>>> determinant_of_nxn_matrix([[1, 2, 3, 7, 13, 23],
[4, 44, 6, 6, 90, 12],
[7, 8, 1, 5, 6, 98], [1, 2, 3, 4, 12, 4],
[9, 6, 3, 7, 4, 9], [2, 47, 8, 91, 36, 9]])
-20981553.999999993"""
size = int(len(matrix))
matrix_copy = deepcopy(matrix)
n = 0
m = 1
c1 = 1
c2 = 0
for k in range(1, (2 * (size - 1)) + 1):
for i in range(c1, size):
for j in range(0, size):
if (matrix[c1 - 1][c2]) != 0:
matrix_copy[i][j] = matrix[i][j] - (
(matrix[m][n]) * ((matrix[c2][j]) / (matrix[c1 - 1][c2]))
)
m = m + 1
matrix = deepcopy(matrix_copy)
n = n + 1
m = n + 1
c1 = c1 + 1
c2 = c2 + 1
determinant = float(1)
for i in range(0, size):
for j in range(0, size):
if i == j:
determinant = determinant * matrix[i][j]
return determinant
|
StarcoderdataPython
|
382747
|
from . import site
content = 'Another Bad Module'
site._registry.update({
'foo': 'bar',
})
raise Exception('Some random exception.')
|
StarcoderdataPython
|
1676628
|
from __future__ import annotations
import multiprocessing
import os
import random
import re
from typing import Any
from typing import overload
from typing import Sequence
from typing import TYPE_CHECKING
import before_commit.constants as C
from before_commit import parse_shebang
from before_commit.hook import Hook
from before_commit.prefix import Prefix
from before_commit.util import cmd_output_b
from before_commit.xargs import xargs
if TYPE_CHECKING:
from typing import NoReturn
FIXED_RANDOM_SEED = 1542676187
SHIMS_RE = re.compile(r'[/\\]shims[/\\]')
def exe_exists(exe: str) -> bool:
found = parse_shebang.find_executable(exe)
if found is None: # exe exists
return False
homedir = os.path.expanduser('~')
try:
common: str | None = os.path.commonpath((found, homedir))
except ValueError: # on windows, different drives raises ValueError
common = None
return (
# it is not in a /shims/ directory
not SHIMS_RE.search(found) and
(
# the homedir is / (docker, service user, etc.)
os.path.dirname(homedir) == homedir or
# the exe is not contained in the home directory
common != homedir
)
)
def run_setup_cmd(prefix: Prefix, cmd: tuple[str, ...], **kwargs: Any) -> None:
cmd_output_b(*cmd, cwd=prefix.prefix_dir, **kwargs)
@overload
def environment_dir(d: None, language_version: str) -> None: ...
@overload
def environment_dir(d: str, language_version: str) -> str: ...
def environment_dir(d: str | None, language_version: str) -> str | None:
if d is None:
return None
else:
return f'{d}-{language_version}'
def assert_version_default(binary: str, version: str) -> None:
if version != C.DEFAULT:
raise AssertionError(
f'for now, pre-commit requires system-installed {binary} -- '
f'you selected `language_version: {version}`',
)
def assert_no_additional_deps(
lang: str,
additional_deps: Sequence[str],
) -> None:
if additional_deps:
raise AssertionError(
f'for now, pre-commit does not support '
f'additional_dependencies for {lang} -- '
f'you selected `additional_dependencies: {additional_deps}`',
)
def basic_get_default_version() -> str:
return C.DEFAULT
def basic_health_check(prefix: Prefix, language_version: str) -> str | None:
return None
def no_install(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> NoReturn:
raise AssertionError('This type is not installable')
def target_concurrency(hook: Hook) -> int:
if hook.require_serial or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:
return 1
else:
# Travis appears to have a bunch of CPUs, but we can't use them all.
if 'TRAVIS' in os.environ:
return 2
else:
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
def _shuffled(seq: Sequence[str]) -> list[str]:
"""Deterministically shuffle"""
fixed_random = random.Random()
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
fixed_random.shuffle(seq)
return seq
def run_xargs(
hook: Hook,
cmd: tuple[str, ...],
file_args: Sequence[str],
**kwargs: Any,
) -> tuple[int, bytes]:
# Shuffle the files so that they more evenly fill out the xargs partitions,
# but do it deterministically in case a hook cares about ordering.
file_args = _shuffled(file_args)
kwargs['target_concurrency'] = target_concurrency(hook)
return xargs(cmd, file_args, **kwargs)
|
StarcoderdataPython
|
1945132
|
<filename>channels/management/commands/update_managed_channel_memberships.py
"""Update managed channel memberships"""
from django.core.management.base import BaseCommand
from channels import tasks
from open_discussions.utils import now_in_utc
class Command(BaseCommand):
"""Update managed channel memberships"""
help = __doc__
def add_arguments(self, parser):
parser.add_argument("channel_names", metavar="CHANNEL_NAME", nargs="+")
def handle(self, *args, **options):
task = tasks.update_memberships_for_managed_channels.delay(
channel_names=options["channel_names"]
)
self.stdout.write("Waiting on task...")
start = now_in_utc()
task.get()
total_seconds = (now_in_utc() - start).total_seconds()
self.stdout.write(
"Updated user channel memberships, took {} seconds".format(total_seconds)
)
|
StarcoderdataPython
|
6506983
|
<reponame>chuckbutler/redmine-layer
from charms.docker import Docker
from charms.docker.compose import Compose
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charmhelpers.core.hookenv import config
from charmhelpers.core.hookenv import open_port
from charmhelpers.core.hookenv import status_set
from charmhelpers.core.templating import render
@when('docker.available')
def start_redmine():
''' Starts a Redmine application in standalone configuration'''
# Render the formation
cfg = config()
render('docker-compose.yml', 'files/redmine/docker-compose.yml', cfg)
# Initialize the docker compose object, looking @ our work directory
compose = Compose('files/redmine')
# Launch the service(s)
status_set('maintenance', "Fetching / Starting the redmine containers")
compose.up()
open_port(cfg['port'])
status_set("active", "Redmine is running on port {}".format(cfg['port']))
|
StarcoderdataPython
|
249132
|
<reponame>npwebste/UPS_Controller
# Universal Power Supply Controller
# USAID Middle East Water Security Initiative
#
# Developed by: <NAME>
# Primary Investigator: <NAME>
#
# Version History (mm_dd_yyyy)
# 1.00 03_24_2018_NW
#
######################################################
import VFD_Modbus_Wrapper
import VFD_Modbus_Registers
def ProtectionCheck():
Float_Switch() # From float switch
DC_Link_Voltage() # From inverter control card via SPI
Inverter_Voltage() # From inverter control card via SPI
Grid_Voltage() # Not needed, can measure from VFD input
VFD_Freq_In()
VFD_Voltage_In()
VFD_Freq_Out()
VFD_Voltage_Out()
return 0
|
StarcoderdataPython
|
1624732
|
import os
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision.datasets import MNIST
from torchvision import transforms
from test_tube import HyperOptArgumentParser
from pytorch_lightning.root_module.root_module import LightningModule
from pytorch_lightning import data_loader
class LightningValidationStepMixin:
"""
Add val_dataloader and validation_step methods for the case
when val_dataloader returns a single dataloader
"""
@data_loader
def val_dataloader(self):
return self._dataloader(train=False)
def validation_step(self, data_batch, batch_i):
"""
Lightning calls this inside the validation loop
:param data_batch:
:return:
"""
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
val_acc = torch.tensor(val_acc)
if self.on_gpu:
val_acc = val_acc.cuda(loss_val.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_val = loss_val.unsqueeze(0)
val_acc = val_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_i % 1 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
})
return output
if batch_i % 2 == 0:
return val_acc
if batch_i % 3 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
'test_dic': {'val_loss_a': loss_val}
})
return output
class LightningValidationMixin(LightningValidationStepMixin):
"""
Add val_dataloader, validation_step, and validation_end methods for the case
when val_dataloader returns a single dataloader
"""
def validation_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from validation_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
val_loss_mean = 0
val_acc_mean = 0
for output in outputs:
val_loss = output['val_loss']
# reduce manually when using dp
if self.trainer.use_dp:
val_loss = torch.mean(val_loss)
val_loss_mean += val_loss
# reduce manually when using dp
val_acc = output['val_acc']
if self.trainer.use_dp:
val_acc = torch.mean(val_acc)
val_acc_mean += val_acc
val_loss_mean /= len(outputs)
val_acc_mean /= len(outputs)
tqdm_dic = {'val_loss': val_loss_mean.item(), 'val_acc': val_acc_mean.item()}
return tqdm_dic
class LightningValidationStepMultipleDataloadersMixin:
"""
Add val_dataloader and validation_step methods for the case
when val_dataloader returns multiple dataloaders
"""
@data_loader
def val_dataloader(self):
return [self._dataloader(train=False), self._dataloader(train=False)]
def validation_step(self, data_batch, batch_i, dataloader_i):
"""
Lightning calls this inside the validation loop
:param data_batch:
:return:
"""
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
val_acc = torch.tensor(val_acc)
if self.on_gpu:
val_acc = val_acc.cuda(loss_val.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_val = loss_val.unsqueeze(0)
val_acc = val_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_i % 1 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
})
return output
if batch_i % 2 == 0:
return val_acc
if batch_i % 3 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
'test_dic': {'val_loss_a': loss_val}
})
return output
if batch_i % 5 == 0:
output = OrderedDict({
f'val_loss_{dataloader_i}': loss_val,
f'val_acc_{dataloader_i}': val_acc,
})
return output
class LightningValidationMultipleDataloadersMixin(LightningValidationStepMultipleDataloadersMixin):
"""
Add val_dataloader, validation_step, and validation_end methods for the case
when val_dataloader returns multiple dataloaders
"""
def validation_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from validation_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
val_loss_mean = 0
val_acc_mean = 0
for output in outputs:
val_loss = output['val_loss']
# reduce manually when using dp
if self.trainer.use_dp:
val_loss = torch.mean(val_loss)
val_loss_mean += val_loss
# reduce manually when using dp
val_acc = output['val_acc']
if self.trainer.use_dp:
val_acc = torch.mean(val_acc)
val_acc_mean += val_acc
val_loss_mean /= len(outputs)
val_acc_mean /= len(outputs)
tqdm_dic = {'val_loss': val_loss_mean.item(), 'val_acc': val_acc_mean.item()}
return tqdm_dic
class LightningTestStepMixin:
@data_loader
def test_dataloader(self):
return self._dataloader(train=False)
def test_step(self, data_batch, batch_i):
"""
Lightning calls this inside the validation loop
:param data_batch:
:return:
"""
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_test = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
test_acc = torch.tensor(test_acc)
if self.on_gpu:
test_acc = test_acc.cuda(loss_test.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_test = loss_test.unsqueeze(0)
test_acc = test_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_i % 1 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
})
return output
if batch_i % 2 == 0:
return test_acc
if batch_i % 3 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
'test_dic': {'test_loss_a': loss_test}
})
return output
class LightningTestMixin(LightningTestStepMixin):
def test_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from test_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
test_loss_mean = 0
test_acc_mean = 0
for output in outputs:
test_loss = output['test_loss']
# reduce manually when using dp
if self.trainer.use_dp:
test_loss = torch.mean(test_loss)
test_loss_mean += test_loss
# reduce manually when using dp
test_acc = output['test_acc']
if self.trainer.use_dp:
test_acc = torch.mean(test_acc)
test_acc_mean += test_acc
test_loss_mean /= len(outputs)
test_acc_mean /= len(outputs)
tqdm_dic = {'test_loss': test_loss_mean.item(), 'test_acc': test_acc_mean.item()}
return tqdm_dic
class LightningTestStepMultipleDataloadersMixin:
@data_loader
def test_dataloader(self):
return [self._dataloader(train=False), self._dataloader(train=False)]
def test_step(self, data_batch, batch_i, dataloader_i):
"""
Lightning calls this inside the validation loop
:param data_batch:
:return:
"""
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_test = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
test_acc = torch.tensor(test_acc)
if self.on_gpu:
test_acc = test_acc.cuda(loss_test.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_test = loss_test.unsqueeze(0)
test_acc = test_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_i % 1 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
})
return output
if batch_i % 2 == 0:
return test_acc
if batch_i % 3 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
'test_dic': {'test_loss_a': loss_test}
})
return output
if batch_i % 5 == 0:
output = OrderedDict({
f'test_loss_{dataloader_i}': loss_test,
f'test_acc_{dataloader_i}': test_acc,
})
return output
class LightningTestMultipleDataloadersMixin(LightningTestStepMultipleDataloadersMixin):
def test_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from test_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
test_loss_mean = 0
test_acc_mean = 0
for output in outputs:
test_loss = output['test_loss']
# reduce manually when using dp
if self.trainer.use_dp:
test_loss = torch.mean(test_loss)
test_loss_mean += test_loss
# reduce manually when using dp
test_acc = output['test_acc']
if self.trainer.use_dp:
test_acc = torch.mean(test_acc)
test_acc_mean += test_acc
test_loss_mean /= len(outputs)
test_acc_mean /= len(outputs)
tqdm_dic = {'test_loss': test_loss_mean.item(), 'test_acc': test_acc_mean.item()}
return tqdm_dic
|
StarcoderdataPython
|
6416182
|
<filename>Lib/lib-tk/test/test_tkinter/test_misc.py<gh_stars>1000+
import unittest
import Tkinter as tkinter
from test.test_support import requires, run_unittest
from test_ttk.support import AbstractTkTest
requires('gui')
class MiscTest(AbstractTkTest, unittest.TestCase):
def test_after(self):
root = self.root
cbcount = {'count': 0}
def callback(start=0, step=1):
cbcount['count'] = start + step
# Without function, sleeps for ms.
self.assertIsNone(root.after(1))
# Set up with callback with no args.
cbcount['count'] = 0
timer1 = root.after(0, callback)
self.assertIn(timer1, root.tk.call('after', 'info'))
(script, _) = root.tk.splitlist(root.tk.call('after', 'info', timer1))
root.update() # Process all pending events.
self.assertEqual(cbcount['count'], 1)
with self.assertRaises(tkinter.TclError):
root.tk.call(script)
# Set up with callback with args.
cbcount['count'] = 0
timer1 = root.after(0, callback, 42, 11)
root.update() # Process all pending events.
self.assertEqual(cbcount['count'], 53)
# Cancel before called.
timer1 = root.after(1000, callback)
self.assertIn(timer1, root.tk.call('after', 'info'))
(script, _) = root.tk.splitlist(root.tk.call('after', 'info', timer1))
root.after_cancel(timer1) # Cancel this event.
self.assertEqual(cbcount['count'], 53)
with self.assertRaises(tkinter.TclError):
root.tk.call(script)
def test_after_idle(self):
root = self.root
cbcount = {'count': 0}
def callback(start=0, step=1):
cbcount['count'] = start + step
# Set up with callback with no args.
cbcount['count'] = 0
idle1 = root.after_idle(callback)
self.assertIn(idle1, root.tk.call('after', 'info'))
(script, _) = root.tk.splitlist(root.tk.call('after', 'info', idle1))
root.update_idletasks() # Process all pending events.
self.assertEqual(cbcount['count'], 1)
with self.assertRaises(tkinter.TclError):
root.tk.call(script)
# Set up with callback with args.
cbcount['count'] = 0
idle1 = root.after_idle(callback, 42, 11)
root.update_idletasks() # Process all pending events.
self.assertEqual(cbcount['count'], 53)
# Cancel before called.
idle1 = root.after_idle(callback)
self.assertIn(idle1, root.tk.call('after', 'info'))
(script, _) = root.tk.splitlist(root.tk.call('after', 'info', idle1))
root.after_cancel(idle1) # Cancel this event.
self.assertEqual(cbcount['count'], 53)
with self.assertRaises(tkinter.TclError):
root.tk.call(script)
def test_after_cancel(self):
root = self.root
cbcount = {'count': 0}
def callback():
cbcount['count'] += 1
timer1 = root.after(5000, callback)
idle1 = root.after_idle(callback)
# No value for id raises a ValueError.
with self.assertRaises(ValueError):
root.after_cancel(None)
# Cancel timer event.
cbcount['count'] = 0
(script, _) = root.tk.splitlist(root.tk.call('after', 'info', timer1))
root.tk.call(script)
self.assertEqual(cbcount['count'], 1)
root.after_cancel(timer1)
with self.assertRaises(tkinter.TclError):
root.tk.call(script)
self.assertEqual(cbcount['count'], 1)
with self.assertRaises(tkinter.TclError):
root.tk.call('after', 'info', timer1)
# Cancel same event - nothing happens.
root.after_cancel(timer1)
# Cancel idle event.
cbcount['count'] = 0
(script, _) = root.tk.splitlist(root.tk.call('after', 'info', idle1))
root.tk.call(script)
self.assertEqual(cbcount['count'], 1)
root.after_cancel(idle1)
with self.assertRaises(tkinter.TclError):
root.tk.call(script)
self.assertEqual(cbcount['count'], 1)
with self.assertRaises(tkinter.TclError):
root.tk.call('after', 'info', idle1)
tests_gui = (MiscTest, )
if __name__ == "__main__":
run_unittest(*tests_gui)
|
StarcoderdataPython
|
1989029
|
from feature_extraction import features
from classifier_list import classifier
import argparse
import os
parser = argparse.ArgumentParser(description = 'Training model with Traditional Features')
# Paths
parser.add_argument('-tr','--tr_path',type=str,
default = 'data/train/images/',
help = 'Path to the train data')
parser.add_argument('-la','--la_path',type=str,
default = 'data/train/labels/label.csv',
help = 'Path to the label data')
parser.add_argument('-featr','--featr_type',type=str,
default = '#',
help = 'Type of feature selection')
parser.add_argument('-model_type','--model_type',type=str,
default = '#',
help = 'Type of training model selection')
args = parser.parse_args()
train_path = args.tr_path
label_path = args.la_path
featr = args.featr_type
model = args.model_type
features_raw = features.features_extracton(train_path,featr)
classifier.classifier_model(features_raw,label_path,model)
if os.path.exists("Gabor.csv"):
os.remove("Gabor.csv")
if os.path.exists("GLCM.csv"):
os.remove("GLCM.csv")
if os.path.exists("Haralick.csv"):
os.remove("Haralick.csv")
|
StarcoderdataPython
|
1640145
|
from typing import Union, Optional, Any, Dict
import numpy as np
from l5kit.geometry import transform_points
import torch
__all__ = [
'traj_stat', 'classify_traj', 'comp_val', 'filter_traj'
]
def trajectory_stat(
history_positions: np.array,
target_positions: np.array,
centroid: np.array,
world_to_image: np.array,
) -> Any:
history_pixels = transform_points(history_positions + centroid, world_to_image)
history_pixels -= history_pixels[0]
history_y_change = history_pixels[np.argmax(np.abs(history_pixels[:, 1])), 1]
history_x_change = history_pixels[np.argmax(np.abs(history_pixels[:, 0])), 0]
target_pixels = transform_points(target_positions + centroid, world_to_image)
target_pixels -= target_pixels[0]
target_y_change = target_pixels[np.argmax(np.abs(target_pixels[:, 1])), 1]
target_x_change = target_pixels[np.argmax(np.abs(target_pixels[:, 0])), 0]
hist_diff = np.linalg.norm(np.diff(history_positions, axis=0), axis=1)
history_speed = hist_diff.sum() / history_positions.shape[0]
history_acceleration = (hist_diff[-1] - hist_diff[0]) / hist_diff.shape[0]
target_diff = np.linalg.norm(np.diff(target_positions, axis=0), axis=1)
target_speed = target_diff.sum() / target_positions.shape[0]
target_acceleration = (target_diff[-1] - target_diff[0]) / target_diff.shape[0]
total_acceleration = (target_diff[-1] - hist_diff[0]) / (target_diff.shape[0] + hist_diff.shape[0])
return ('history_y_change', history_y_change), ('history_x_change', history_x_change), \
('target_y_change', target_y_change), ('target_x_change', target_x_change), \
('history_speed', history_speed), ('history_acceleration', history_acceleration), \
('target_speed', target_speed), ('target_acceleration', target_acceleration), \
('total_acceleration', total_acceleration)
def traj_stat(traj: dict, predicted_targets=None) -> Any:
targets = predicted_targets if predicted_targets is not None else traj['target_positions']
return trajectory_stat(traj['history_positions'], targets,
traj['centroid'], traj['world_to_image'])
def classify_traj(
hist_y_change: np.array,
tar_y_change: np.array,
speed_change: np.array,
turn_thresh: Optional[float] = 3.,
speed_thresh: Optional[float] = 0.5,
prefix: Optional[Any] = '',
matrix: Optional[bool] = False
) -> Union[tuple, str]:
if np.abs(tar_y_change) > turn_thresh:
target = 'D' if tar_y_change < 0. else 'U'
else:
target = 'N'
if np.abs(hist_y_change) > turn_thresh:
history = 'U' if hist_y_change < 0. else 'D'
else:
history = 'N'
if np.abs(speed_change) > speed_thresh:
speed = 'D' if speed_change < 0. else 'U'
else:
speed = 'N'
if matrix:
conv = lambda x: 1 if x == 'N' else 0 if x == 'U' else 2
return conv(history), conv(target), conv(speed)
return f'{prefix}{history}{target}{speed}'
def comp_val(hist_change, tar_change, speed_change, traj_cls: str):
if traj_cls[1] == 'N':
return abs(hist_change), abs(speed_change)
elif traj_cls[0] == 'N':
return abs(tar_change), abs(speed_change)
return abs(tar_change) + abs(hist_change), abs(speed_change)
def filter_traj(traj: dict, static_hist_thresh: Optional[float] = 1.):
value = traj['target_availabilities'].sum()
if value != traj['target_availabilities'].shape[0]:
return 'target', value
value = traj['history_availabilities'].sum()
if value != traj['history_availabilities'].shape[0]:
return 'history', value
value = np.linalg.norm(np.diff(traj['history_positions'], axis=0), axis=1).sum()
if static_hist_thresh and value < static_hist_thresh:
return 'static', value # filter scenes with static history
return False
|
StarcoderdataPython
|
95615
|
<reponame>Adrien4193/drone_control<filename>src/drone_control/common/__init__.py
from common import Pose, Attitude, Callback, Timer
|
StarcoderdataPython
|
4922113
|
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host='127.0.0.1'
port=12345
s.bind((host,port))
s.listen()
while True:
ts,addr=s.accept()
ts.send(b'Hello')
print(ts.recv(1024))
ts.close()
|
StarcoderdataPython
|
3536004
|
<gh_stars>0
## A module to read data from a DSS, this specifically implements the Remote ID standard as released on Oct-2020
## For more information review: https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/uastech/standards/astm_rid_1.0/remoteid/canonical.yaml
## and this diagram https://github.com/interuss/dss/blob/master/assets/generated/rid_display.png
from rid_operations.rid_utils import SubscriptionResponse
import logging
from datetime import datetime, timedelta
import uuid
from auth_helper import dss_auth_helper
import json
import redis
from auth_helper.common import get_redis
import requests
import hashlib
import tldextract
from os import environ as env
from dotenv import load_dotenv, find_dotenv
from dataclasses import asdict
from datetime import timedelta
from .rid_utils import SubscriberToNotify, SubscriptionState, Volume4D, ISACreationRequest, ISACreationResponse, IdentificationServiceArea
from typing import Dict, Any, Iterable, List
logger = logging.getLogger('django')
load_dotenv(find_dotenv())
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
class RemoteIDOperations():
def __init__(self):
self.dss_base_url = env.get('DSS_BASE_URL')
self.r = get_redis()
def create_dss_isa(self, flight_extents:Volume4D,flights_url :str , expiration_time_seconds: int = 30) -> ISACreationResponse:
''' This method PUTS /dss/subscriptions '''
# subscription_response = {"created": 0, "subscription_id": 0, "notification_index": 0}
isa_creation_response = ISACreationResponse(created=0,service_area= None, subscribers=[])
new_isa_id = str(uuid.uuid4())
my_authorization_helper = dss_auth_helper.AuthorityCredentialsGetter()
audience = env.get("DSS_SELF_AUDIENCE", 0)
error = None
try:
assert audience
except AssertionError as ae:
logger.error("Error in getting Authority Access Token DSS_SELF_AUDIENCE is not set in the environment")
return isa_creation_response
try:
auth_token = my_authorization_helper.get_cached_credentials(audience = audience, token_type='rid')
except Exception as e:
logger.error("Error in getting Authority Access Token %s " % e)
return isa_creation_response
else:
error = auth_token.get("error")
try:
assert error is None
except AssertionError as ae:
return isa_creation_response
else:
# A token from authority was received,
dss_isa_create_url = self.dss_base_url + 'v1/dss/identification_service_areas/' + new_isa_id
# check if a subscription already exists for this view_port
headers = {'content-type': 'application/json', 'Authorization': 'Bearer ' + auth_token['access_token']}
p = ISACreationRequest(extents= flight_extents, flights_url= flights_url)
p_dict = asdict(p)
try:
dss_r = requests.put(dss_isa_create_url, json= json.loads(json.dumps(p_dict)), headers=headers)
except Exception as re:
logger.error("Error in posting to subscription URL %s " % re)
return isa_creation_response
try:
assert dss_r.status_code == 200
isa_creation_response.created = 1
except AssertionError as ae:
logger.error("Error in creating ISA in the DSS %s" % dss_r.text)
return isa_creation_response
else:
dss_response = dss_r.json()
dss_response_service_area = dss_response['service_area']
service_area = IdentificationServiceArea(flights_url= dss_response_service_area['flights_url'], owner=dss_response_service_area['owner'], time_start=dss_response_service_area['time_start'], time_end=dss_response_service_area['time_end'], version= dss_response_service_area['version'], id = dss_response_service_area['id'])
# TODO : Notify subscribers
dss_response_subscribers = dss_response['subscribers']
dss_r_subs:List[SubscriberToNotify] = []
for subscriber in dss_response_subscribers:
subs = subscriber['subscriptions']
all_s = []
for sub in subs:
s = SubscriptionState(subscription_id=sub['subscription_id'],notification_index=sub['notification_index'])
all_s.append(s)
subscriber_to_notify = SubscriberToNotify(url = subscriber['url'],subscriptions=all_s)
dss_r_subs.append(subscriber_to_notify)
for subscriber in dss_r_subs:
url = '{}/{}'.format(subscriber.url, new_isa_id)
try:
ext = tldextract.extract(subscriber.url)
except Exception as e:
uss_audience = 'localhost'
else:
if ext.domain in ['localhost', 'internal']:# for host.docker.internal type calls
uss_audience = 'localhost'
else:
uss_audience = '.'.join(ext[:3]) # get the subdomain, domain and suffix and create a audience and get credentials
uss_auth_token = self.get_auth_token(audience = uss_audience)
# Notify subscribers
payload = {
'service_area': service_area,
'subscriptions': subscriber.subscriptions,
'extents': json.loads(json.dumps(asdict(flight_extents)))
}
auth_credentials = my_authorization_helper.get_cached_credentials(audience = uss_audience, token_type='rid')
headers = {'content-type': 'application/json', 'Authorization': 'Bearer ' + auth_credentials['access_token']}
try:
notification_request = requests.post(url, headers=headers, json =json.loads(json.dumps(payload)))
except Exception as re:
logger.error("Error in sending subscriber notification to %s : %s " % (url, re))
logger.info("Succesfully created a DSS ISA %s" % new_isa_id)
# iterate over the service areas to get flights URL to poll
isa_key = 'isa-' + service_area.id
isa_seconds_timedelta = timedelta(seconds=expiration_time_seconds)
self.r.set(isa_key, 1)
self.r.expire(name = isa_key, time = isa_seconds_timedelta)
isa_creation_response.created =1
isa_creation_response.service_area = service_area
isa_creation_response.subscribers = dss_r_subs
return asdict(isa_creation_response)
def create_dss_subscription(self, vertex_list:list, view:str, request_uuid, subscription_time_delta: int=30):
''' This method PUTS /dss/subscriptions '''
# subscription_response = {"created": 0, "subscription_id": 0, "notification_index": 0}
subscription_response = SubscriptionResponse(created=0,dss_subscription_id = None, notification_index=0)
my_authorization_helper = dss_auth_helper.AuthorityCredentialsGetter()
audience = env.get("DSS_SELF_AUDIENCE", 0)
error = None
try:
assert audience
except AssertionError as ae:
logger.error("Error in getting Authority Access Token DSS_SELF_AUDIENCE is not set in the environment")
return subscription_response
try:
auth_token = my_authorization_helper.get_cached_credentials(audience = audience, token_type='rid')
except Exception as e:
logger.error("Error in getting Authority Access Token %s " % e)
return subscription_response
else:
error = auth_token.get("error")
try:
assert error is None
except AssertionError as ae:
return subscription_response
else:
# A token from authority was received,
new_subscription_id = str(uuid.uuid4())
dss_subscription_url = self.dss_base_url + 'v1/dss/subscriptions/' + new_subscription_id
# check if a subscription already exists for this view_port
callback_url = env.get("BLENDER_FQDN","https://www.flightblender.com") + "/dss/identification_service_areas"
now = datetime.now()
callback_url += '/'+ new_subscription_id
subscription_seconds_timedelta = timedelta(seconds=subscription_time_delta)
current_time = now.isoformat() + 'Z'
fifteen_seconds_from_now = now + subscription_seconds_timedelta
fifteen_seconds_from_now_isoformat = fifteen_seconds_from_now.isoformat() +'Z'
headers = {'content-type': 'application/json', 'Authorization': 'Bearer ' + auth_token['access_token']}
volume_object = {"spatial_volume":{"footprint":{"vertices":vertex_list},"altitude_lo":0.5,"altitude_hi":800},"time_start":current_time,"time_end":fifteen_seconds_from_now_isoformat }
payload = {"extents": volume_object, "callbacks":{"identification_service_area_url":callback_url}}
try:
dss_r = requests.put(dss_subscription_url, json= payload, headers=headers)
except Exception as re:
logger.error("Error in posting to subscription URL %s " % re)
return subscription_response
try:
assert dss_r.status_code == 200
subscription_response.created = 1
except AssertionError as ae:
logger.error("Error in creating subscription in the DSS %s" % dss_r.text)
return subscription_response
else:
dss_response = dss_r.json()
service_areas = dss_response['service_areas']
dss_subscription_details = dss_response['subscription']
subscription_id = dss_subscription_details['id']
notification_index = dss_subscription_details['notification_index']
new_subscription_version = dss_subscription_details['version']
subscription_response.notification_index = notification_index
subscription_response.dss_subscription_id = subscription_id
# logger.info("Succesfully created a DSS subscription ID %s" % subscription_id)
# iterate over the service areas to get flights URL to poll
flights_url_list = ''
for service_area in service_areas:
flights_url = service_area['flights_url']
flights_url_list += flights_url +'?view='+ view + ' '
flights_dict = {'request_id': request_uuid, 'subscription_id': subscription_id,'all_flights_url': flights_url_list, 'notification_index': notification_index, 'view': view, 'expire_at': fifteen_seconds_from_now_isoformat, 'version': new_subscription_version}
subscription_id_flights = "all_uss_flights:" + new_subscription_id
self.r.hmset(subscription_id_flights, flights_dict)
# expire keys in fifteen seconds
self.r.expire(name = subscription_id_flights, time = subscription_seconds_timedelta)
sub_id = 'sub-' + request_uuid
self.r.set(sub_id, view)
self.r.expire(name = sub_id, time = subscription_seconds_timedelta)
view_hash = int(hashlib.sha256(view.encode('utf-8')).hexdigest(), 16) % 10**8
view_sub = 'view_sub-' + str(view_hash)
self.r.set(view_sub, 1)
self.r.expire(name = view_sub, time =subscription_seconds_timedelta)
return subscription_response
def delete_dss_subscription(self,subscription_id):
''' This module calls the DSS to delete a subscription'''
pass
def query_uss_for_rid(self, flights_dict, all_observations, subscription_id:str):
authority_credentials = dss_auth_helper.AuthorityCredentialsGetter()
all_flights_urls_string = flights_dict['all_flights_url']
logging.debug("Flight url list : %s" % all_flights_urls_string)
all_flights_url = all_flights_urls_string.split()
for cur_flight_url in all_flights_url:
try:
ext = tldextract.extract(cur_flight_url)
except Exception as e:
audience == 'localhost'
else:
if ext.domain in ['localhost', 'internal']:# for allowing host.docker.internal setup as well
audience = 'localhost'
else:
audience = '.'.join(ext[:3]) # get the subdomain, domain and suffix and create a audience and get credentials
auth_credentials = authority_credentials.get_cached_credentials(audience = audience, token_type='rid')
headers = {'content-type': 'application/json', 'Authorization': 'Bearer ' + auth_credentials['access_token']}
flights_request = requests.get(cur_flight_url, headers=headers)
if flights_request.status_code == 200:
# https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/uastech/standards/astm_rid_1.0/remoteid/canonical.yaml#tag/p2p_rid/paths/~1v1~1uss~1flights/get
flights_response = flights_request.json()
all_flights = flights_response['flights']
for flight in all_flights:
flight_id = flight['id']
try:
assert flight.get('current_state') is not None
except AssertionError as ae:
logger.error('There is no current_state provided by SP on the flights url %s' % cur_flight_url)
logger.debug(json.dumps(flight))
else:
flight_current_state = flight['current_state']
position = flight_current_state['position']
recent_positions = flight['recent_positions'] if 'recent_positions' in flight.keys() else []
flight_metadata = {'id':flight_id,"simulated": flight["simulated"],"aircraft_type":flight["aircraft_type"],'subscription_id':subscription_id, "current_state":flight_current_state,"recent_positions":recent_positions }
# logger.info("Writing flight remote-id data..")
if {"lat", "lng", "alt"} <= position.keys():
# check if lat / lng / alt existis
single_observation = {"icao_address" : flight_id,"traffic_source" :1, "source_type" : 1, "lat_dd" : position['lat'], "lon_dd" : position['lng'], "altitude_mm" : position['alt'],'metadata':json.dumps(flight_metadata)}
# write incoming data directly
all_observations.add(single_observation)
all_observations.trim(1000)
else:
logger.error("Error in received flights data: %{url}s ".format(**flight))
else:
logs_dict = {'url':cur_flight_url, 'status_code':flights_request.status_code}
logger.info("Received a non 200 error from {url} : {status_code} ".format(**logs_dict))
logger.info("Detailed Response %s" % flights_request.text)
|
StarcoderdataPython
|
1926717
|
<gh_stars>0
import os
import random
import string
import tempfile
import unittest
from grail import Grail
def gen_random_string(N=10):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=N))
class TestGrail(unittest.TestCase):
def test_grail_2(self):
# Получаем случайное имя для файла.
login = next(tempfile._get_candidate_names())
key = gen_random_string(16)
text1 = """I am the very model of a modern Major-General,
My animation's comical, unusual, and whimsical,
I know the kings of England, and I quote the fights historical,
From Marathon to Waterloo, in order categorical."""
text2 = """I am the very model of a cartoon individual,
My animation's comical, unusual, and whimsical,
I'm quite adept at funny gags, comedic theory I have read,
From wicked puns and stupid jokes to anvils that drop on your head."""
g = Grail(tempfile.gettempdir())
g.open(login, key)
grail_file = g.get_grail_file()
self.assertTrue(os.path.exists(grail_file))
g.update(text1)
g.update(text2)
self.assertTrue(g.valid())
self.assertEqual(g.get(), text2)
g.save()
g.open(login, key)
self.assertEqual(g.get(), text2)
g.destroy()
# self.assertFalse(os.path.exists(grail_file))
|
StarcoderdataPython
|
8020475
|
<reponame>bendichter/brainrender
"""
This tutorial shows how to automatically take screenshots of your rendered region
"""
import time
import brainrender
brainrender.SHADER_STYLE = "cartoon"
from brainrender.scene import Scene
screenshot_params = dict(folder="./screenshots", name="tutorial",)
# Create a scene
scene = Scene(
screenshot_kwargs=screenshot_params
) # use screenshot_params to specify where the screenshots will be saved
scene.add_brain_regions(["TH"])
# render
scene.render(
camera="top", interactive=False
) # if interactive is false the program won't stop when the scene is rendered
# which means that the next line will be executed
scene.take_screenshot()
time.sleep(1)
# Take another screenshot from a different angle
scene.render(camera="coronal", interactive=False, zoom=0.5)
scene.take_screenshot() # screenshots are saved with a timestamp in the name so you won't be overwriting the previous one.
# Render interactively. You can then press 's' to take a screenshot
scene.render()
|
StarcoderdataPython
|
3359767
|
import os
import logging
from functools import wraps
MOD = "modification"
PWD = os.getcwd()
modification_logger = logging.getLogger(MOD)
modification_logger.addHandler(
logging.NullHandler()
) # TODO: think whether it should be nullhandler by default
modification_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
def logged():
def wrap(function):
@wraps(function)
def wrapper(*args, **kwargs):
response = function(*args, **kwargs)
return response
return wrapper
return wrap
modlog = modification_logger
|
StarcoderdataPython
|
5194610
|
from iocbuilder import Device, AutoSubstitution, Architecture, SetSimulation
from iocbuilder.arginfo import *
from iocbuilder.modules.asyn import Asyn, AsynPort, AsynIP
from iocbuilder.modules.areaDetector import AreaDetector, _NDPluginBase, _ADBase, _ADBaseTemplate, simDetector
class _SpecsAnalyser(AutoSubstitution):
TemplateFile= "specsAnalyser.template"
SubstitutionOverwrites = [_ADBaseTemplate]
class SpecsAnalyserDLD(AutoSubstitution):
TemplateFile= "specsAnalyserDLD.template"
class SpecsAnalyser(_ADBase):
"""Create a SPECS Analyser detector"""
_BaseTemplate = _ADBaseTemplate
_SpecificTemplate = _SpecsAnalyser
AutoInstantiate = True
def __init__(self, DRIVER="DRV.1", BUFFERS=50, MEMORY=-1, **args):
self.__super.__init__(**args)
self.__dict__.update(locals())
# __init__ arguments
ArgInfo = _ADBase.ArgInfo + _BaseTemplate.ArgInfo + makeArgInfo(__init__,
DRIVER = Simple('Asyn driver port to connect to for communication to the device', str),
BUFFERS = Simple('Maximum number of NDArray buffers to be created for plugin callbacks', int),
MEMORY = Simple('Max memory to allocate, should be maxw*maxh*nbuffer for driver and all attached plugins', int))
LibFileList = ['specs']
DbdFileList = ['specsAnalyserSupport']
SysLibFileList = []
MakefileStringList = []
def Initialise(self):
print '# specsAnalyserConfig(portName, driverName, maxBuffers, maxMemory )'
print 'specsAnalyserConfig( %(PORT)10s, %(DRIVER)12s, %(BUFFERS)10d, %(MEMORY)9d )' % self.__dict__
|
StarcoderdataPython
|
5164016
|
<filename>acestream/ACEStream/Core/BitTornado/BT1/Rerequester.py
#Embedded file name: ACEStream\Core\BitTornado\BT1\Rerequester.pyo
import sys
import socket
import random
import struct
import binascii
import urlparse
from ACEStream.Core.BitTornado.zurllib import urlopen
from urllib import quote
from btformats import check_peers
from ACEStream.Core.BitTornado.bencode import bdecode
from threading import Thread, Lock, currentThread
from cStringIO import StringIO
from traceback import print_exc, print_stack
from ACEStream.Core.Utilities.TSCrypto import sha
from ACEStream.Core.Utilities.utilities import test_network_connection
from time import time
from ACEStream.Core.simpledefs import *
from ACEStream.Core.Utilities.logger import log, log_exc
import ACEStream.Core.DecentralizedTracking.mainlineDHT as mainlineDHT
if mainlineDHT.dht_imported:
from ACEStream.Core.DecentralizedTracking.pymdht.core.identifier import Id, IdError
try:
from os import getpid
except ImportError:
def getpid():
return 1
try:
True
except:
True = 1
False = 0
DEBUG = False
DEBUG_DHT = False
DEBUG_LOCK = False
DEBUG_CHECK_NETWORK_CONNECTION = False
DEBUG_ANNOUNCE = False
mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
keys = {}
basekeydata = str(getpid()) + repr(time()) + 'tracker'
def merge_announce(tracker, params):
if '?' in tracker:
return tracker + '&' + params[1:]
else:
return tracker + params
def add_key(tracker):
key = ''
for i in sha(basekeydata + tracker).digest()[-6:]:
key += mapbase64[ord(i) & 63]
keys[tracker] = key
def get_key(tracker):
try:
return '&key=' + keys[tracker]
except:
add_key(tracker)
return '&key=' + keys[tracker]
class fakeflag():
def __init__(self, state = False):
self.state = state
def wait(self):
pass
def isSet(self):
return self.state
class Rerequester():
def __init__(self, trackerlist, interval, sched, howmany, minpeers, connect, externalsched, amount_left, up, down, port, ip, myid, infohash, timeout, errorfunc, excfunc, maxpeers, doneflag, upratefunc, downratefunc, unpauseflag = fakeflag(True), config = None, am_video_source = False, is_private = False):
self.excfunc = excfunc
newtrackerlist = []
for tier in trackerlist:
if len(tier) > 1:
random.shuffle(tier)
newtrackerlist += [tier]
self.trackerlist = newtrackerlist
self.lastsuccessful = ''
self.rejectedmessage = 'rejected by tracker - '
self.port = port
self.am_video_source = am_video_source
self.network_check_url_list = []
self.network_check_url_list.append(['http://google.com', None])
for t in xrange(len(self.trackerlist)):
for tr in xrange(len(self.trackerlist[t])):
tracker_url = self.trackerlist[t][tr]
if tracker_url != 'http://retracker.local/announce':
self.network_check_url_list.append([tracker_url, None])
self.url = '?info_hash=%s&peer_id=%s&port=%s' % (quote(infohash), quote(myid), str(port))
self.ip = ip
self.myid = myid
self.interval = interval
self.last = None
self.trackerid = None
self.announce_interval = 1800
self.sched = sched
self.howmany = howmany
self.minpeers = minpeers
self.connect = connect
self.externalsched = externalsched
self.amount_left = amount_left
self.up = up
self.down = down
self.timeout = timeout
self.errorfunc = errorfunc
self.maxpeers = maxpeers
self.doneflag = doneflag
self.upratefunc = upratefunc
self.downratefunc = downratefunc
self.unpauseflag = unpauseflag
self.last_failed = True
self.never_succeeded = True
self.errorcodes = {}
self.lock = SuccessLock(infohash)
self.network_lock = Lock()
self.special = None
self.started = False
self.stopped = False
self.schedid = 'rerequest-' + binascii.hexlify(infohash) + '-'
self.infohash = infohash
self.log_prefix = 'rerequester::' + binascii.hexlify(self.infohash) + ':'
if DEBUG:
log(self.log_prefix + '__init__: ip', ip, 'port', self.port, 'myid', myid, 'quoted_id', quote(myid))
if is_private:
if DEBUG:
log(self.log_prefix + '__init__: private torrent, disable DHT')
self.dht = None
else:
self.dht = mainlineDHT.dht
self.config = config
self.notifiers = []
def start(self):
if not self.started:
self.started = True
self.sched(self.c, self.interval / 2, self.schedid + 'c')
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'start: shed(c), self.interval', self.interval)
if self.amount_left():
event = 0
else:
event = 3
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'start: call d(%d)' % event)
self.d(event)
self.init_check_network_connection()
def c(self):
if self.stopped:
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'c: stopped, return')
return
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'c: self.unpauseflag.isSet()', self.unpauseflag.isSet(), 'self.howmany()', self.howmany(), 'self.minpeers', self.minpeers, 'thread', currentThread().name)
if not self.unpauseflag.isSet() and self.howmany() < self.minpeers:
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'c: call announce(3, _c)')
self.announce(3, self._c)
else:
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'c: call _c()')
self._c()
def _c(self):
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + '_c: sched c(), interval', self.interval, 'thread', currentThread().name)
self.sched(self.c, self.interval)
def d(self, event = 3):
if self.stopped:
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'd: stopped, return')
return
if not self.unpauseflag.isSet():
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'd: unpauseflag is set, call _d() and return')
self._d()
return
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'd: call announce(%d, _d)' % event, 'thread', currentThread().name)
self.announce(event, self._d)
def _d(self):
if self.never_succeeded:
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + '_d: never succeeded, shed d() in 60 seconds')
self.sched(self.d, 60)
else:
self.sched(self.d, self.announce_interval)
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + '_d: shed d(): announce_interval', self.announce_interval, 'thread', currentThread().name)
def run_dht(self):
if DEBUG:
print >> sys.stderr, 'Rerequester::run_dht: call rerequest_dht()'
self.rerequest_dht()
self.sched(self.run_dht, 60, self.schedid + 'run_dht')
def encoder_wants_new_peers(self):
if self.am_video_source:
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'encoder_wants_new_peers: do nothing for live source')
return
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'encoder_wants_new_peers: ---')
task = lambda : self.announce()
self.sched(task)
def init_check_network_connection(self):
t = Thread(target=self.check_network_connection, args=[False, 5, True])
t.name = 'RerequestCheckNetwork' + t.name
t.daemon = True
if DEBUG_LOCK or DEBUG_ANNOUNCE:
log(self.log_prefix + 'init_check_network_connection: start check_network_connection(): thread', t.name)
t.start()
def check_network_connection(self, announce = True, interval = 5, populate_url_list = False):
if DEBUG_CHECK_NETWORK_CONNECTION:
log(self.log_prefix + 'check_network_connection: announce', announce, 'populate_url_list', populate_url_list, 'interval', interval)
if not self.network_lock.acquire(False):
if DEBUG_CHECK_NETWORK_CONNECTION:
log(self.log_prefix + 'check_network_connection: locked, return')
return False
for i in xrange(len(self.network_check_url_list)):
url = self.network_check_url_list[i][0]
ip = self.network_check_url_list[i][1]
if DEBUG_CHECK_NETWORK_CONNECTION:
log(self.log_prefix + 'check_network_connection: test', url, 'ip', ip)
success = False
if ip is None:
ip = test_network_connection(url, getip=True)
if DEBUG_CHECK_NETWORK_CONNECTION:
log(self.log_prefix + 'check_network_connection: query ip', ip)
if ip is not None:
self.network_check_url_list[i][1] = ip
success = True
else:
if DEBUG_CHECK_NETWORK_CONNECTION:
log(self.log_prefix + 'check_network_connection: test by ip', ip)
success = test_network_connection(host=ip)
if populate_url_list:
continue
if success:
if DEBUG_CHECK_NETWORK_CONNECTION:
log(self.log_prefix + 'check_network_connection: success', url)
if announce:
announce_lambda = lambda : self.announce()
self.sched(announce_lambda)
self.network_lock.release()
return True
if DEBUG_CHECK_NETWORK_CONNECTION:
log(self.log_prefix + 'check_network_connection: failed', url)
self.network_lock.release()
if populate_url_list:
return True
if DEBUG_CHECK_NETWORK_CONNECTION:
log(self.log_prefix + 'check_network_connection: all failed, possible there is no network, retry in ', interval, 'seconds')
if not populate_url_list:
task = lambda : self.check_network_connection(announce=True, interval=interval)
self.sched(task, interval)
return False
def announce(self, event = 3, callback = lambda : None, specialurl = None):
if ':' in self.ip:
compact = 0
else:
compact = 1
params = {}
if specialurl is not None:
s = self.url + '&uploaded=0&downloaded=0&left=1'
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
params['numwant'] = 0
else:
params['numwant'] = 200
s += '&numwant=200'
s += '&no_peer_id=1'
if compact:
s += '&compact=1'
self.last_failed = True
self.special = specialurl
params['uploaded'] = 0
params['downloaded'] = 0
params['left'] = 1
self.rerequest(s, callback, params)
return
s = '%s&uploaded=%s&downloaded=%s&left=%s' % (self.url,
str(self.up()),
str(self.down()),
str(self.amount_left()))
params['uploaded'] = int(self.up())
params['downloaded'] = int(self.down())
params['left'] = int(self.amount_left())
if self.last is not None:
s += '&last=' + quote(str(self.last))
if self.trackerid is not None:
s += '&trackerid=' + quote(str(self.trackerid))
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
params['numwant'] = 0
else:
params['numwant'] = 200
s += '&numwant=200'
s += '&no_peer_id=1'
if compact:
s += '&compact=1'
if event != 3:
event_name = ['started', 'completed', 'stopped'][event]
s += '&event=' + event_name
params['event'] = event
if event == 2:
self.stopped = True
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'announce: event', event, 'callback', callback, 'params', params, 'thread', currentThread().name)
self.rerequest(s, callback, params)
def snoop(self, peers, callback = lambda : None):
params = {'event': 'stopped',
'port': 0,
'uploaded': 0,
'downloaded': 0,
'left': 1,
'numwant': int(peers)}
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + 'snoop: peers', peers, 'callback', callback, 'params', params, 'thread', currentThread().name)
self.rerequest(self.url + '&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant=' + str(peers), callback, params)
def rerequest(self, s, callback, params):
proxy_mode = self.config.get('proxy_mode', 0)
if proxy_mode == PROXY_MODE_PRIVATE:
if DEBUG:
log(self.log_prefix + 'PROXY_MODE_PRIVATE, rerequest exited')
return
if DEBUG_ANNOUNCE:
print_stack()
if not self.lock.isready():
def retry(self = self, s = s, callback = callback, params = params):
self.rerequest(s, callback, params)
self.sched(retry, 5)
if DEBUG_LOCK or DEBUG_ANNOUNCE:
log(self.log_prefix + 'rerequest: locked, retry in 5 seconds: s', s, 'callback', callback, 'params', params, 'thread', currentThread().name)
return
rq = Thread(target=self._rerequest, args=[s, callback, params])
rq.name = 'TrackerRerequestA' + rq.name
rq.daemon = True
if DEBUG_LOCK or DEBUG_ANNOUNCE:
log(self.log_prefix + 'rerequest: start new request: s', s, 'thread', rq.name)
rq.start()
def rerequest_dht(self):
rq = Thread(target=self._dht_rerequest)
rq.setName('RerequestDHT' + rq.getName())
rq.setDaemon(True)
rq.start()
def _rerequest(self, s, callback, params):
try:
self.lock.start()
def fail(self = self, callback = callback):
self._fail(callback)
if self.ip:
try:
if ':' in self.ip:
urlip = '[' + self.ip + ']'
field = 'ipv6'
else:
urlip = self.ip
field = 'ip'
s += '&' + field + '=' + urlip
except:
self.errorcodes['troublecode'] = 'unable to resolve: ' + self.ip
self.externalsched(fail)
self.errorcodes = {}
if self.special is None:
if not self.dht:
if DEBUG_DHT:
log(self.log_prefix + '_rerequest: no DHT support loaded')
elif self.am_video_source:
if DEBUG_DHT:
log(self.log_prefix + '_rerequest: disable dht for live source')
else:
self._dht_rerequest()
if DEBUG:
log(self.log_prefix + '_rerequest: current tracker list:', self.trackerlist)
new_tracker_list = []
for t in range(len(self.trackerlist)):
for tr in range(len(self.trackerlist[t])):
tracker = self.trackerlist[t][tr]
if DEBUG:
log(self.log_prefix + '_rerequest: trying tracker', tracker)
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest: call rerequest_single(): tracker', tracker, 'thread', currentThread().name)
ret = self.rerequest_single(tracker, s, params)
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest: rerequest_single() finished: ret', ret, 'tracker', tracker, 'thread', currentThread().name)
if ret and not self.last_failed:
new_tracker_list.insert(0, [tracker])
else:
new_tracker_list.append([tracker])
if DEBUG:
log(self.log_prefix + '_rerequest: new tracker list:', new_tracker_list)
self.trackerlist = new_tracker_list[:]
if DEBUG_LOCK or DEBUG_ANNOUNCE:
log(self.log_prefix + '_rerequest: return: thread', currentThread().name)
callback()
return
tracker = self.special
self.special = None
if self.rerequest_single(tracker, s, callback):
callback()
return
self.externalsched(fail)
except:
self.exception(callback)
finally:
self.lock.finish()
def _fail(self, callback):
if self.upratefunc() < 100 and self.downratefunc() < 100 or not self.amount_left():
for f in ['rejected', 'bad_data', 'troublecode']:
if self.errorcodes.has_key(f):
r = self.errorcodes[f]
break
else:
r = 'Problem connecting to tracker - unspecified error:' + `(self.errorcodes)`
self.errorfunc(r)
self.last_failed = True
if DEBUG_LOCK or DEBUG_ANNOUNCE:
log(self.log_prefix + '_fail: give up: thread', currentThread().name)
self.lock.give_up()
self.externalsched(callback)
def rerequest_single(self, t, s, params):
l = self.lock.set()
if t.startswith('udp'):
target = self._rerequest_single_udp
args = [t, params, l]
else:
target = self._rerequest_single
args = [t, s + get_key(t), l]
rq = Thread(target=target, args=args)
rq.name = 'TrackerRerequestB' + rq.name
rq.daemon = True
if DEBUG_LOCK or DEBUG_ANNOUNCE:
log(self.log_prefix + 'rerequest_single: start new thread: t', t, 'set_lock', l, 'thread', rq.name)
rq.start()
if DEBUG_LOCK:
log(self.log_prefix + 'rerequest_single: wait for lock: thread', currentThread().name)
self.lock.wait()
if DEBUG_LOCK or DEBUG_ANNOUNCE:
log(self.log_prefix + 'rerequest_single: wait for lock done: success', self.lock.success, 'thread', currentThread().name)
if self.lock.success:
self.lastsuccessful = t
self.last_failed = False
self.never_succeeded = False
return True
if not self.last_failed and self.lastsuccessful == t:
self.last_failed = True
self.lock.give_up()
return True
return False
def _rerequest_single_udp(self, t, params, l):
try:
if self.ip:
ip = self.ip
else:
ip = 0
e = params.get('event', '')
if e == 'completed':
event = 1
elif e == 'started':
event = 2
elif e == 'stopped':
event = 3
else:
event = 0
url = urlparse.urlparse(t)
host = url.hostname
port = url.port
if port is None:
port = 80
interval, peers = self.udp_announce(host, port, infohash=self.infohash, peerid=self.myid, timeout=self.timeout, downloaded=params.get('downloaded', 0), left=params.get('left', 0), uploaded=params.get('uploaded', 0), event=0, ip=ip, key=0, num_want=params.get('numwant', 0), clport=self.port)
peer_list = []
for nip, port in peers:
aip = socket.inet_ntoa(struct.pack('!I', nip))
peer_list.append({'ip': aip,
'port': port})
resp = {'interval': interval,
'peers': peer_list}
if self.lock.trip(l, True):
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single_udp: trip success, unwait: l', l, 't', t, 'thread', currentThread().name)
self.lock.unwait(l)
elif DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single_udp: trip success, no trip: l', l, 't', t, 'thread', currentThread().name)
if DEBUG:
log(self.log_prefix + '_rerequest_single_udp: resp', resp)
def add(self = self, r = resp):
self.postrequest(r, 'tracker=' + t, self.notifiers)
self.externalsched(add)
except:
if self.lock.trip(l):
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single_udp: exception in udp announce, unwait: l', l, 't', t, 'thread', currentThread().name)
self.lock.unwait(l)
elif DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single_udp: exception in udp announce, no trip: l', l, 't', t, 'thread', currentThread().name)
if DEBUG:
print_exc()
def _rerequest_single(self, t, s, l):
try:
closer = [None]
def timedout(self = self, l = l, closer = closer):
if self.lock.trip(l):
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single:timedout: unwait: l', l, 't', t, 'thread', currentThread().name)
self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded'
self.lock.unwait(l)
elif DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single:timedout: no trip: l', l, 't', t, 'thread', currentThread().name)
try:
closer[0]()
except:
pass
self.externalsched(timedout, self.timeout)
err = None
try:
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + '_rerequest_single: request tracker', merge_announce(t, s), 'thread', currentThread().name)
h = urlopen(merge_announce(t, s), silent=True)
closer[0] = h.close
data = h.read()
except (IOError, socket.error) as e:
err = 'Problem connecting to tracker - ' + str(e)
if DEBUG:
log(self.log_prefix + '_rerequest_single: failed to connect to tracker')
except:
err = 'Problem connecting to tracker'
if DEBUG:
log(self.log_prefix + '_rerequest_single: failed to connect to tracker')
try:
h.close()
except:
pass
if err:
if self.lock.trip(l):
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: got error, unwait: l', l, 't', t, 'thread', currentThread().name, 'err', err)
self.errorcodes['troublecode'] = err
self.lock.unwait(l)
elif DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: got error, no trip: l', l, 't', t, 'thread', currentThread().name, 'err', err)
return
if not data:
if self.lock.trip(l):
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: no date, unwait: l', l, 't', t, 'thread', currentThread().name)
self.errorcodes['troublecode'] = 'no data from tracker'
self.lock.unwait(l)
elif DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: no data, no trip: l', l, 't', t, 'thread', currentThread().name)
return
try:
r = bdecode(data, sloppy=1)
if DEBUG or DEBUG_ANNOUNCE:
log(self.log_prefix + '_rerequest_single: respose from tracker: t', t, 'r', r, 'thread', currentThread().name)
check_peers(r)
except ValueError as e:
if DEBUG:
log_exc()
if self.lock.trip(l):
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: exception while decoding data, unwait: l', l, 't', t, 'thread', currentThread().name)
self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e)
self.lock.unwait(l)
elif DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: exception while decoding data, no trip: l', l, 't', t, 'thread', currentThread().name)
return
if r.has_key('failure reason'):
if self.lock.trip(l):
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: got failure reason, unwait: l', l, 't', t, 'thread', currentThread().name)
self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason']
self.lock.unwait(l)
elif DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: got failure reason, no trip: l', l, 't', t, 'thread', currentThread().name)
return
if self.lock.trip(l, True):
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: trip success, unwait: l', l, 't', t, 'thread', currentThread().name)
self.lock.unwait(l)
elif DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: trip success, no trip: l', l, 't', t, 'thread', currentThread().name)
def add(self = self, r = r):
self.postrequest(r, 'tracker=' + t, self.notifiers)
self.externalsched(add)
except:
print_exc()
if self.lock.trip(l):
if DEBUG_LOCK:
log(self.log_prefix + '_rerequest_single: got exception, unwait: l', l, 't', t, 'thread', currentThread().name)
self.lock.unwait(l)
def udp_announce(self, host, port, infohash, peerid, timeout = 15, downloaded = 0, left = 0, uploaded = 0, event = 0, ip = 0, key = 0, num_want = -1, clport = 1111):
if DEBUG:
log(self.log_prefix + 'udp_announce: host', host, 'port', port, 'infohash', infohash, 'peerid', peerid, 'event', event, 'ip', ip, 'clport', clport, 'num_want', num_want, 'key', key, 'timeout', timeout, 'downloaded', downloaded, 'uploaded', uploaded, 'left', left)
action = {'connect': 0,
'announce': 1,
'scrape': 2,
'error': 3}
conn_head_size = 16
announce_head_size = 20
error_head_size = 8
peer_size = 6
default_connection = 4497486125440L
transaction = random.randint(0, 999999)
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.settimeout(timeout)
connect_request_pack = struct.pack('>QLL', default_connection, action['connect'], transaction)
udp_socket.sendto(connect_request_pack, (host, port))
response = udp_socket.recv(512)
resp_action = struct.unpack('>L', response[:4])[0]
if resp_action != action['connect']:
if resp_action == action['error']:
errmsg_len = len(response) - error_head_size
errmsg = struct.unpack('>' + str(errmsg_len) + 's', response[8:])[0]
raise Exception(errmsg)
raise Exception('got unexpect action')
default_connection = struct.unpack('>Q', response[8:])[0]
transaction = random.randint(0, 999999)
announce_request_pack = struct.pack('>QLL20s20sQQQLLLlH', default_connection, action['announce'], transaction, infohash, peerid, downloaded, left, uploaded, event, ip, key, num_want, clport)
udp_socket.sendto(announce_request_pack, (host, port))
response = udp_socket.recv(1220)
resp_action = struct.unpack('>L', response[:4])[0]
if resp_action != action['announce']:
if resp_action == action['error']:
errmsg_len = len(response) - error_head_size
errmsg = struct.unpack('>' + str(errmsg_len) + 's', response[8:])[0]
raise Exception(errmsg)
raise Exception('got unexpect action')
resp_transaction = struct.unpack('>L', response[4:8])[0]
if resp_transaction != transaction:
raise Exception('got incorrect transaction')
interval = struct.unpack('>L', response[8:12])[0]
peers_in_response = (len(response) - announce_head_size) / peer_size
i = 0
peers = []
while i < peers_in_response:
peer_unpack = struct.unpack('>LH', response[i * peer_size + announce_head_size:(i + 1) * peer_size + announce_head_size])
peers.append(peer_unpack)
i = i + 1
return (interval, peers)
def _dht_rerequest(self):
if DEBUG_DHT:
log(self.log_prefix + '_dht_rerequest: infohash', self.infohash)
try:
info_hash_id = Id(self.infohash)
except IdError:
log(self.log_prefix + '_dht_rerequest: self.info_hash is not a valid identifier')
return
if 'dialback' in self.config and self.config['dialback']:
if DEBUG_DHT:
log(self.log_prefix + '_dht_rerequest: get_peers AND announce')
self.dht.get_peers(self.infohash, info_hash_id, self._dht_got_peers, self.port)
return
if DEBUG_DHT:
log(self.log_prefix + '_dht_rerequest: JUST get_peers, DO NOT announce')
self.dht.get_peers(self.infohash, info_hash_id, self._dht_got_peers)
def _dht_got_peers(self, infohash, peers):
if DEBUG_DHT:
if peers:
log(self.log_prefix + 'DHT: Received', len(peers), 'peers', currentThread().getName())
else:
log(self.log_prefix + 'DHT: Received no peers', currentThread().getName())
if not peers:
return
p = [ {'ip': peer[0],
'port': peer[1]} for peer in peers ]
if p:
r = {'peers': p}
def add(self = self, r = r):
self.postrequest(r, 'dht')
self.externalsched(add)
def add_notifier(self, cb):
self.notifiers.append(cb)
def postrequest(self, r, source, notifiers = []):
try:
if source is None:
source = ''
if r.has_key('warning message'):
if DEBUG:
log(self.log_prefix + 'postrequest: tracker warning:', r['warning message'])
self.errorfunc('warning from tracker - ' + r['warning message'])
self.announce_interval = r.get('interval', self.announce_interval)
self.interval = r.get('min interval', self.interval)
if DEBUG:
log(self.log_prefix + 'postrequest: %s: announce min is' % source, self.announce_interval, self.interval)
self.trackerid = r.get('tracker id', self.trackerid)
self.last = r.get('last', self.last)
peers = []
p = r.get('peers')
if p is not None:
if type(p) == type(''):
for x in xrange(0, len(p), 6):
ip = '.'.join([ str(ord(i)) for i in p[x:x + 4] ])
port = ord(p[x + 4]) << 8 | ord(p[x + 5])
peers.append(((ip, port), 0))
else:
for x in p:
peers.append(((x['ip'].strip(), x['port']), x.get('peer id', 0)))
else:
p = r.get('peers6')
if type(p) == type(''):
for x in xrange(0, len(p), 18):
hexip = binascii.b2a_hex(p[x:x + 16])
ip = ''
for i in xrange(0, len(hexip), 4):
ip += hexip[i:i + 4]
if i + 4 != len(hexip):
ip += ':'
port = ord(p[x + 16]) << 8 | ord(p[x + 17])
peers.append(((ip, port), 0))
else:
for x in p:
peers.append(((x['ip'].strip(), x['port']), x.get('peer id', 0)))
log(self.log_prefix + 'Got IPv6 peer addresses, not yet supported, ignoring.')
peers = []
if DEBUG:
log(self.log_prefix + 'postrequest: %s: Got peers' % source, peers)
ps = len(peers) + self.howmany()
if ps < self.maxpeers:
if self.doneflag.isSet():
if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
self.last = None
elif r.get('num peers', 1000) > ps * 1.2:
self.last = None
if peers:
random.shuffle(peers)
if self.am_video_source:
if DEBUG:
log(self.log_prefix + 'postrequest: do not start connections for live source')
else:
self.connect(peers)
for notifier in notifiers:
notifier(peers)
except:
log(self.log_prefix + 'postrequest: error in postrequest')
log_exc()
def exception(self, callback):
data = StringIO()
print_exc(file=data)
def r(s = data.getvalue(), callback = callback):
if self.excfunc:
self.excfunc(s)
else:
print s
callback()
self.externalsched(r)
class SuccessLock():
def __init__(self, infohash = None):
self.lock = Lock()
self.pause = Lock()
self.ready = Lock()
self.code = 0L
self.success = False
self.finished = True
self.log_prefix = 'rerequester:successlock::'
if infohash is not None:
self.log_prefix += binascii.hexlify(infohash) + ':'
def start(self):
if DEBUG_LOCK:
log(self.log_prefix + 'start: acquire ready lock: thread', currentThread().name)
self.ready.acquire()
if DEBUG_LOCK:
log(self.log_prefix + 'start: acquire ready lock done: thread', currentThread().name)
self.success = False
self.finished = False
def finish(self):
if DEBUG_LOCK:
log(self.log_prefix + 'finish: release ready lock: thread', currentThread().name)
self.ready.release()
def isready(self):
locked = self.ready.locked()
if DEBUG_LOCK:
log(self.log_prefix + 'isready: ready lock status: locked', locked, 'thread', currentThread().name)
return not locked
def set(self):
if DEBUG_LOCK:
log(self.log_prefix + 'set: acquire lock: thread', currentThread().name)
self.lock.acquire()
if DEBUG_LOCK:
log(self.log_prefix + 'set: acquire lock done: thread', currentThread().name)
if not self.pause.locked():
if DEBUG_LOCK:
log(self.log_prefix + 'set: pause is not locked, acquire: thread', currentThread().name)
self.pause.acquire()
if DEBUG_LOCK:
log(self.log_prefix + 'set: pause acquire done: thread', currentThread().name)
elif DEBUG_LOCK:
log(self.log_prefix + 'set: pause is locked: thread', currentThread().name)
self.first = True
self.finished = False
self.success = False
self.code += 1L
self.lock.release()
if DEBUG_LOCK:
log(self.log_prefix + 'set: release lock and return: first', self.first, 'code', self.code, 'thread', currentThread().name)
return self.code
def trip(self, code, s = False):
if DEBUG_LOCK:
log(self.log_prefix + 'trip: acquire lock: code', code, 's', s, 'self.code', self.code, 'self.finished', self.finished, 'thread', currentThread().name)
self.lock.acquire()
if DEBUG_LOCK:
log(self.log_prefix + 'trip: acquire lock done: code', code, 's', s, 'self.code', self.code, 'self.finished', self.finished, 'thread', currentThread().name)
try:
if code == self.code and not self.finished:
r = self.first
self.first = False
if s:
self.finished = True
self.success = True
if DEBUG_LOCK:
log(self.log_prefix + 'trip: got match: code', code, 's', s, 'self.code', self.code, 'self.finished', self.finished, 'self.success', self.success, 'r', r, 'thread', currentThread().name)
return r
if DEBUG_LOCK:
log(self.log_prefix + 'trip: no match: code', code, 'self.code', self.code, 'self.finished', self.finished, 'thread', currentThread().name)
finally:
self.lock.release()
def give_up(self):
self.lock.acquire()
self.success = False
self.finished = True
if DEBUG_LOCK:
log(self.log_prefix + 'give_up: self.success', self.success, 'self.finished', self.finished, 'thread', currentThread().name)
self.lock.release()
def wait(self):
if DEBUG_LOCK:
log(self.log_prefix + 'wait: acquire pause: thread', currentThread().name)
self.pause.acquire()
if DEBUG_LOCK:
log(self.log_prefix + 'wait: acquire pause done: thread', currentThread().name)
def unwait(self, code):
if code == self.code and self.pause.locked():
if DEBUG_LOCK:
log(self.log_prefix + 'unwait: release pause: code', code, 'self.code', self.code, 'thread', currentThread().name)
self.pause.release()
elif DEBUG_LOCK:
log(self.log_prefix + 'unwait: do not release pause: code', code, 'self.code', self.code, 'thread', currentThread().name)
def isfinished(self):
self.lock.acquire()
x = self.finished
self.lock.release()
if DEBUG_LOCK:
log(self.log_prefix + 'isfinished: x', x, 'thread', currentThread().name)
return x
|
StarcoderdataPython
|
1832165
|
from django import template
register = template.Library()
from re import compile, UNICODE
#RUBY_TEXT_MARKUP_TEMPLATE = u'<ruby><rb>{expression}</rb><rp>(</rp><rt>{reading}</rt><rp>)</rp></ruby>'
RUBY_TEXT_MARKUP_TEMPLATE = '<span class="ezRuby" title="{reading}">{expression}</span>'
#_LEFT_CARET = u'\<'
#_RIGHT_CARET = u'\>'
#ruby_prog = compile(u'\<(.*)\|(.*)\>')
#unescaped_ruby_prog = compile(u'<(.*)\|(.*)>')
#ruby_prog = compile(u'\s(.*)\[(.*)\]', UNICODE) #for some reason, only [\s^] works, and [^\s] doesn't - so [] must prefer the first option it's given over the second
ruby_prog = compile('\s?(\S*?)\[(.*?)\]', UNICODE) #for some reason, only [\s^] works, and [^\s] doesn't - so [] must prefer the first option it's given over the second
#ruby_prog2 = compile(u'^(.*)\[(.*)\]', UNICODE)
#unescaped_ruby_prog = compile(u'[^\s](.*)\[(.*)\]')
def furiganaize(text):
new_text = ''
last_match_end = 0
for match in ruby_prog.finditer(text):
expression, reading = match.group(1, 2)
start, end = match.start(), match.end()
new_substring = RUBY_TEXT_MARKUP_TEMPLATE.format(expression=expression, reading=reading)
new_text += text[last_match_end:start] + new_substring
last_match_end = end
new_text += text[last_match_end:]
return new_text
#TODO-OLD move below into some other module
def strip_ruby_text(text):
new_text = ''
last_match_end = 0
for match in ruby_prog.finditer(text):
expression = match.group(1)
start, end = match.start(), match.end()
new_substring = expression
new_text += text[last_match_end:start] + new_substring
last_match_end = end
new_text += text[last_match_end:]
return new_text
def strip_ruby_bottom(text):
'''
Returns this field's content, with just the ruby text instead of
what's beneath it, and the other text.
<TA|ta>beru becomes taberu
'''
new_text = ''
last_match_end = 0
for match in ruby_prog.finditer(text):
reading = match.group(2)
start, end = match.start(), match.end()
new_substring = reading
new_text += text[last_match_end:start] + new_substring
last_match_end = end
new_text += text[last_match_end:]
return new_text
@register.tag(name="furiganaize")
def do_furiganaize(parser, token):
nodelist = parser.parse(('endfuriganaize',))
parser.delete_first_token()
return FuriganaizeNode(nodelist)
class FuriganaizeNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
output = self.nodelist.render(context)
#convert everything of the form <kanji|kana> to kanji with CSS ruby text
return furiganaize(output)
|
StarcoderdataPython
|
5124918
|
<gh_stars>0
# -*- coding: utf-8 -*-
import datetime, logging, os, pprint, random
import requests
from illiad_app import settings_app
log = logging.getLogger(__name__)
class CloudCreateUserHandler( object ):
""" Creates new user via official illiad cloud api. """
def __init__( self ):
self.request_id = random.randint( 1111, 9999 ) # to follow logic if simultaneous hits
self.required_elements = [ 'auth_id', 'department', 'email', 'first_name', 'last_name', 'phone', 'status' ]
def data_check( self, request ):
""" Checks data.
Called by views.cloud_create_user() """
## auth-key check
summary_check = 'invalid'
if self.auth_key_good( request ) is True:
## data check
if self.data_good( request ) is True:
summary_check = 'valid'
log.debug( '%s - summary_check, `%s`' % (self.request_id, summary_check) )
return summary_check
def auth_key_good( self, request ):
""" Checks the auth_key and ip.
Called by data_check() """
auth_key_check = False
if 'auth_key' in request.POST.keys():
if request.POST['auth_key'] == settings_app.API_KEY:
log.debug( '%s - auth_key ok' % self.request_id )
source_ip = request.META.get('REMOTE_ADDR', 'unavailable')
log.debug( '%s - source_ip, ```%s```' % (self.request_id, source_ip) )
if source_ip in settings_app.LEGIT_IPS:
log.debug( '%s - source_ip ok' % self.request_id )
auth_key_check = True
log.debug( '%s - auth_key_check, `%s`' % (self.request_id, auth_key_check) )
return auth_key_check
def data_good( self, request ):
""" Checks for required params.
Called by data_check() """
( data_good_check, user_keys, check_flag ) = ( False, list(request.POST.keys()), 'init' )
for element in self.required_elements:
if element not in user_keys:
log.debug( '%s - missing element, `%s`; will return False' % (self.request_id, element) )
check_flag = 'failed'
break
if check_flag == 'init':
data_good_check = True
log.debug( '%s - data_good_check, `%s`' % (self.request_id, data_good_check) )
return data_good_check
def create_user( self, request ):
""" Creates new user via official illiad cloud api.
Called by views.cloud_create_user() """
usr_dct = dict( request.POST.items() )
params = {
## non-user
'DeliveryMethod': 'Hold for Pickup',
'LoanDeliveryMethod': 'Hold for Pickup',
'NotificationMethod': 'Electronic',
'Site': 'Rockefeller Circ. Desk',
'Web': True,
# 'AuthType': 'Default', # don't send, will be set to `Default`
## user
'Username': usr_dct['auth_id'],
'FirstName': usr_dct['first_name'],
'LastName': usr_dct['last_name'],
'EmailAddress': usr_dct['email'],
'Phone': usr_dct['phone'],
'Status': usr_dct['status'], # "type, eg `Undergraduate Student`"
'Department': usr_dct['department'],
'Address': '',
'Address2': '',
'City': '',
'State': '',
'Zip': '',
# 'ExternalUserId': '', # don't send
# 'PlainTextPassword': '', # don't send
}
log.debug( '%s - params, ```%s```' % (self.request_id, pprint.pformat(params)) )
url = '%s%s' % ( settings_app.ILLIAD_API_URL, 'Users' ) # root url contains ending-slash
log.debug( '%s - url, ```%s```' % (self.request_id, url) )
headers = {
'Accept-Type': 'application/json; charset=utf-8',
'ApiKey': os.environ['ILLIAD_WS__OFFICIAL_ILLIAD_API_KEY']
}
try:
r = requests.post( url, data=params, headers=headers, timeout=60, verify=True )
response_dct = r.json()
response_dct['added_status_code'] = r.status_code
log.debug( '%s - response, ```%s```' % (self.request_id, pprint.pformat(response_dct)) )
return response_dct
except Exception as e:
message = 'exception creating new user, ```%s```' % repr(e)
log.error( '%s - ```%s```' % (self.request_id, message) )
return { 'error': message }
def prep_output_dct( self, start_time, request, data_dct ):
""" Preps output-dct.
Called by views.cloud_create_user() """
params = dict( request.POST.items() )
params.pop( 'auth_key', None )
output_dct = {
'request': {
'url': '%s://%s%s' % (
request.scheme, request.META.get('HTTP_HOST', '127.0.0.1'), request.META['PATH_INFO'] ), # HTTP_HOST doesn't exist for client-tests
'params': params,
'timestamp': str( start_time ) },
'response': self.prep_response_segment( start_time, data_dct, params['auth_id'] ) }
log.debug( '%s - output_dct, ```%s```' % (self.request_id, pprint.pformat(output_dct)) )
return output_dct
def prep_response_segment( self, start_time, data_dct, submitted_username ):
""" Returns response part of context dct.
Called by prep_output_dct() """
returned_status_code = data_dct['added_status_code']
if 'UserName' in data_dct and data_dct['UserName'] == submitted_username and returned_status_code == 200:
summary_dct = {'status': 'Registered', 'status_code': 200}
else:
summary_dct = { 'status': 'Failure', 'status_code': returned_status_code }
response_dct = {
'elapsed_time': str( datetime.datetime.now() - start_time ),
'raw_data': data_dct,
'status_data': summary_dct
}
return response_dct
## end class class CloudCreateUserHandler()
# import os, requests
# params = {
# "Username" : "bdoe",
# "ExternalUserId": "123456789",
# "FirstName":"Bailey",
# "LastName":"Doe",
# "EmailAddress" : "<EMAIL>" ,
# "DeliveryMethod" : "Hold for Pickup",
# "LoanDeliveryMethod" : "Mail to Address",
# "NotificationMethod" : "Electronic",
# "Phone" : "123-456-7890",
# "Status" : "Graduate",
# "PlainTextPassword": "<PASSWORD>",
# "AuthType" : "ILLiad",
# "Department" : "Music",
# "Web" : True,
# "Address" : "the address",
# "Address2" : "extra address info",
# "City" : "the city",
# "State" : "RI",
# "Zip" : "12345"
# }
# url = '%s%s' % ( os.environ['ILLIAD_WS__OFFICIAL_ILLIAD_API_URL'], 'Users' ) # root url contains ending-slash
# print( 'url, ```%s```' % url )
# headers = {
# 'Accept-Type': 'application/json; charset=utf-8',
# 'ApiKey': os.environ['ILLIAD_WS__OFFICIAL_ILLIAD_API_KEY']
# }
# r = requests.post( url, data=params, headers=headers, timeout=60, verify=True )
# print( r.status_code )
# print( r.content )
|
StarcoderdataPython
|
1966913
|
"""
Serializers for users app
"""
from django.contrib.auth.models import User
from rest_framework import serializers
from small_small_hr.models import StaffProfile
# pylint: disable=too-many-ancestors
class UserSerializer(serializers.ModelSerializer):
"""
UserSerializer class
"""
class Meta: # pylint: disable=too-few-public-methods
"""
meta options
"""
model = User
fields = ('username', 'first_name', 'last_name', 'email')
class StaffProfileSerializer(serializers.ModelSerializer):
"""
Serializer class for StaffProfile model
"""
first_name = serializers.CharField(source='user.first_name')
last_name = serializers.CharField(source='user.last_name')
id_number = serializers.SerializerMethodField()
phone = serializers.SerializerMethodField()
sex = serializers.SerializerMethodField()
role = serializers.SerializerMethodField()
nhif = serializers.SerializerMethodField()
nssf = serializers.SerializerMethodField()
pin_number = serializers.SerializerMethodField()
address = serializers.SerializerMethodField()
birthday = serializers.SerializerMethodField()
leave_days = serializers.SerializerMethodField()
sick_days = serializers.SerializerMethodField()
overtime_allowed = serializers.SerializerMethodField()
start_date = serializers.SerializerMethodField()
end_date = serializers.SerializerMethodField()
emergency_contact_name = serializers.SerializerMethodField()
emergency_contact_number = serializers.SerializerMethodField()
class Meta: # pylint: disable=too-few-public-methods
"""
class meta options
"""
model = StaffProfile
fields = [
'id',
'first_name',
'last_name',
'created',
'id_number',
'phone',
'sex',
'modified',
'role',
'nhif',
'emergency_contact_name',
'nssf',
'address',
'birthday',
'overtime_allowed',
'leave_days',
'start_date',
'sick_days',
'pin_number',
'end_date',
'emergency_contact_number',
]
def get_id_number(self, obj): # pylint: disable=no-self-use
"""
Get id_number
"""
return obj.data.get('id_number')
def get_phone(self, obj): # pylint: disable=no-self-use
"""
Get phone
"""
return obj.data.get('phone')
def get_sex(self, obj): # pylint: disable=no-self-use
"""
Get sex
"""
return obj.data.get('sex')
def get_role(self, obj): # pylint: disable=no-self-use
"""
Get role
"""
return obj.data.get('role')
def get_nhif(self, obj): # pylint: disable=no-self-use
"""
Get nhhf
"""
return obj.data.get('nhif')
def get_nssf(self, obj): # pylint: disable=no-self-use
"""
Get nssf
"""
return obj.data.get('nssf')
def get_pin_number(self, obj): # pylint: disable=no-self-use
"""
Get pin_number
"""
return obj.data.get('pin_number')
def get_address(self, obj): # pylint: disable=no-self-use
"""
Get address
"""
return obj.data.get('address')
def get_birthday(self, obj): # pylint: disable=no-self-use
"""
Get birthday
"""
return obj.data.get('birthday')
def get_leave_days(self, obj): # pylint: disable=no-self-use
"""
Get leave_days
"""
return obj.data.get('leave_days')
def get_sick_days(self, obj): # pylint: disable=no-self-use
"""
Get sick_days
"""
return obj.data.get('sick_days')
def get_overtime_allowed(self, obj): # pylint: disable=no-self-use
"""
Get overtime_allowed
"""
return obj.data.get('overtime_allowed')
def get_start_date(self, obj): # pylint: disable=no-self-use
"""
Get start_date
"""
return obj.data.get('start_date')
def get_end_date(self, obj): # pylint: disable=no-self-use
"""
Get end_date
"""
return obj.data.get('end_date')
def get_emergency_contact_name(self, obj): # pylint: disable=no-self-use
"""
Get emergency_contact_name
"""
return obj.data.get('emergency_contact_name')
def get_emergency_contact_number(self, obj): # pylint: disable=no-self-use
"""
Get emergency_contact_number
"""
return obj.data.get('emergency_contact_number')
|
StarcoderdataPython
|
5140217
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from contents.dice.DiceCommandProcessor import DiceCommandProcessor, InvalidFormulaException, CalculatorBase
class CallOfCthulhuDice(DiceCommandProcessor):
def __init__(self):
super().__init__()
def percent(self, command):
self.comment_separator.separate(command)
value = self.comment_separator.expression_formula
new_command = f"1d100<={value}"
self.comment_separator.expression_formula = new_command
self.execute_evaluation_expression()
self.execute_replacers()
self.execute_calculate()
return self.generate_dice_result_string()
def versus(self, command):
# :vs 能動側(する側) 受動側(される側)
# (能動側ー受動側)×5=差分を行い、50+差分=成功値で決定。それを1d100で振って成否の判断を行う。
# 例)DEX対抗の場合
# 能動側の数値:9、受動側の数値:10
# (9-10)×5=-5、50+(-5)=45% ⇒ 1d100<=45 #対抗の成功判定
self.comment_separator.separate(command)
command_string = self.comment_separator.expression_formula
command_line = command_string.split(" ")
removal_blank = [word.strip() for word in command_line if word.strip() != ""]
#print("removal_blank: " + str(removal_blank))
if not len(removal_blank) >= 2:
raise InvalidFormulaException()
calculator = CalculatorBase()
active_side = int(calculator.execute_eval(removal_blank[0]))
passive_side = int(calculator.execute_eval(removal_blank[1]))
name = f"{active_side}vs{passive_side}"
value = 50 + ((active_side - passive_side) * 5)
new_command = f"1d100<={value}"
self.comment_separator.expression_formula = new_command
if self.comment_separator.is_comment():
# コメント有り
if "対抗" in self.comment_separator.comment:
new_comment = f'{name} {self.comment_separator.comment}'
else:
new_comment = f'{name} 対抗{self.comment_separator.comment}'
else:
# コメント無し
new_comment = f"{name} 対抗ロール"
self.comment_separator.set_comment(new_comment)
self.execute_evaluation_expression()
self.execute_replacers()
self.execute_calculate()
return self.generate_dice_result_string()
|
StarcoderdataPython
|
9784726
|
from data.data_pipe import de_preprocess, get_train_loader, get_val_data
from model import Backbone, Arcface, MobileFaceNet, Am_softmax, l2_norm
from verifacation import evaluate
import torch
from torch import optim
import numpy as np
from tqdm import tqdm
from tensorboardX import SummaryWriter
from matplotlib import pyplot as plt
plt.switch_backend('agg')
from utils import get_time, gen_plot, hflip_batch, separate_bn_paras
from PIL import Image
from torchvision import transforms as trans
import math
import bcolz
class face_learner(object):
def __init__(self, conf, inference=False):
print(conf)
# conf.use_mobilfacenet = True
if conf.use_mobilfacenet:
self.model = MobileFaceNet(conf.embedding_size).to(conf.device)
print('MobileFaceNet model generated')
else:
self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
if not inference:
self.milestones = conf.milestones
self.loader, self.class_num = get_train_loader(conf)
self.writer = SummaryWriter(conf.log_path)
self.step = 0
self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device)
print('two model heads generated')
paras_only_bn, paras_wo_bn = separate_bn_paras(self.model)
if conf.use_mobilfacenet:
self.optimizer = optim.SGD([
{'params': paras_wo_bn[:-1], 'weight_decay': 4e-5},
{'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4},
{'params': paras_only_bn}
], lr = conf.lr, momentum = conf.momentum)
else:
self.optimizer = optim.SGD([
{'params': paras_wo_bn + [self.head.kernel], 'weight_decay': 5e-4},
{'params': paras_only_bn}
], lr = conf.lr, momentum = conf.momentum)
print(self.optimizer)
# self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)
print('optimizers generated')
self.board_loss_every = len(self.loader)//100
self.evaluate_every = len(self.loader)//10
self.save_every = len(self.loader)//5
self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent)
else:
self.threshold = conf.threshold
def save_state(self, conf, accuracy, to_save_folder=False, extra=None, model_only=False):
if to_save_folder:
save_path = conf.save_path
else:
save_path = conf.model_path
torch.save(
self.model.state_dict(), save_path /
('model_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra)))
if not model_only:
torch.save(
self.head.state_dict(), save_path /
('head_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra)))
torch.save(
self.optimizer.state_dict(), save_path /
('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra)))
def load_state(self, conf, fixed_str, from_save_folder=False, model_only=False):
if from_save_folder:
save_path = conf.save_path
else:
save_path = conf.model_path
self.model.load_state_dict(torch.load(save_path/'model_{}'.format(fixed_str), map_location='cpu'))
if not model_only:
self.head.load_state_dict(torch.load(save_path/'head_{}'.format(fixed_str)))
self.optimizer.load_state_dict(torch.load(save_path/'optimizer_{}'.format(fixed_str)))
def board_val(self, db_name, accuracy, best_threshold, roc_curve_tensor):
self.writer.add_scalar('{}_accuracy'.format(db_name), accuracy, self.step)
self.writer.add_scalar('{}_best_threshold'.format(db_name), best_threshold, self.step)
self.writer.add_image('{}_roc_curve'.format(db_name), roc_curve_tensor, self.step)
# self.writer.add_scalar('{}_val:true accept ratio'.format(db_name), val, self.step)
# self.writer.add_scalar('{}_val_std'.format(db_name), val_std, self.step)
# self.writer.add_scalar('{}_far:False Acceptance Ratio'.format(db_name), far, self.step)
def evaluate(self, conf, carray, issame, nrof_folds = 5, tta = False):
self.model.eval()
idx = 0
embeddings = np.zeros([len(carray), conf.embedding_size])
with torch.no_grad():
while idx + conf.batch_size <= len(carray):
batch = torch.tensor(carray[idx:idx + conf.batch_size])
if tta:
fliped = hflip_batch(batch)
emb_batch = self.model(batch.to(conf.device)) + self.model(fliped.to(conf.device))
embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch)
else:
embeddings[idx:idx + conf.batch_size] = self.model(batch.to(conf.device)).cpu()
idx += conf.batch_size
if idx < len(carray):
batch = torch.tensor(carray[idx:])
if tta:
fliped = hflip_batch(batch)
emb_batch = self.model(batch.to(conf.device)) + self.model(fliped.to(conf.device))
embeddings[idx:] = l2_norm(emb_batch)
else:
embeddings[idx:] = self.model(batch.to(conf.device)).cpu()
tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame, nrof_folds)
buf = gen_plot(fpr, tpr)
roc_curve = Image.open(buf)
roc_curve_tensor = trans.ToTensor()(roc_curve)
return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
def find_lr(self,
conf,
init_value=1e-8,
final_value=10.,
beta=0.98,
bloding_scale=3.,
num=None):
if not num:
num = len(self.loader)
mult = (final_value / init_value)**(1 / num)
lr = init_value
for params in self.optimizer.param_groups:
params['lr'] = lr
self.model.train()
avg_loss = 0.
best_loss = 0.
batch_num = 0
losses = []
log_lrs = []
for i, (imgs, labels) in tqdm(enumerate(self.loader), total=num):
imgs = imgs.to(conf.device)
labels = labels.to(conf.device)
batch_num += 1
self.optimizer.zero_grad()
embeddings = self.model(imgs)
thetas = self.head(embeddings, labels)
loss = conf.ce_loss(thetas, labels)
#Compute the smoothed loss
avg_loss = beta * avg_loss + (1 - beta) * loss.item()
self.writer.add_scalar('avg_loss', avg_loss, batch_num)
smoothed_loss = avg_loss / (1 - beta**batch_num)
self.writer.add_scalar('smoothed_loss', smoothed_loss,batch_num)
#Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > bloding_scale * best_loss:
print('exited with best_loss at {}'.format(best_loss))
plt.plot(log_lrs[10:-5], losses[10:-5])
return log_lrs, losses
#Record the best loss
if smoothed_loss < best_loss or batch_num == 1:
best_loss = smoothed_loss
#Store the values
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
self.writer.add_scalar('log_lr', math.log10(lr), batch_num)
#Do the SGD step
#Update the lr for the next step
loss.backward()
self.optimizer.step()
lr *= mult
for params in self.optimizer.param_groups:
params['lr'] = lr
if batch_num > num:
plt.plot(log_lrs[10:-5], losses[10:-5])
return log_lrs, losses
def train(self, conf, epochs):
self.model.train()
running_loss = 0.
for e in range(epochs):
print('epoch {} started'.format(e))
if e == self.milestones[0]:
self.schedule_lr()
if e == self.milestones[1]:
self.schedule_lr()
if e == self.milestones[2]:
self.schedule_lr()
for imgs, labels in tqdm(iter(self.loader)):
imgs = imgs.to(conf.device)
labels = labels.to(conf.device)
self.optimizer.zero_grad()
embeddings = self.model(imgs)
thetas = self.head(embeddings, labels)
loss = conf.ce_loss(thetas, labels)
loss.backward()
running_loss += loss.item()
self.optimizer.step()
if self.step % self.board_loss_every == 0 and self.step != 0:
loss_board = running_loss / self.board_loss_every
self.writer.add_scalar('train_loss', loss_board, self.step)
running_loss = 0.
if self.step % self.evaluate_every == 0 and self.step != 0:
accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.agedb_30, self.agedb_30_issame)
self.board_val('agedb_30', accuracy, best_threshold, roc_curve_tensor)
accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.lfw, self.lfw_issame)
self.board_val('lfw', accuracy, best_threshold, roc_curve_tensor)
accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.cfp_fp, self.cfp_fp_issame)
self.board_val('cfp_fp', accuracy, best_threshold, roc_curve_tensor)
self.model.train()
if self.step % self.save_every == 0 and self.step != 0:
self.save_state(conf, accuracy)
self.step += 1
self.save_state(conf, accuracy, to_save_folder=True, extra='final')
def schedule_lr(self):
for params in self.optimizer.param_groups:
params['lr'] /= 10
print(self.optimizer)
def infer(self, conf, faces, target_embs, tta=False):
'''
faces : list of PIL Image
target_embs : [n, 512] computed embeddings of faces in facebank
names : recorded names of faces in facebank
tta : test time augmentation (hfilp, that's all)
'''
embs = []
for img in faces:
if tta:
mirror = trans.functional.hflip(img)
emb = self.model(conf.test_transform(img).to(conf.device).unsqueeze(0))
emb_mirror = self.model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
embs.append(l2_norm(emb + emb_mirror))
else:
embs.append(self.model(conf.test_transform(img).to(conf.device).unsqueeze(0)))
source_embs = torch.cat(embs)
diff = source_embs.unsqueeze(-1) - target_embs.transpose(1,0).unsqueeze(0)
dist = torch.sum(torch.pow(diff, 2), dim=1)
minimum, min_idx = torch.min(dist, dim=1)
min_idx[minimum > self.threshold] = -1 # if no match, set idx to -1
return min_idx, minimum
|
StarcoderdataPython
|
6498522
|
<reponame>laundmo/lona-bootstrap-5
from lona_bootstrap_5 import (
ColMd6,
Col,
Row,
TextInput,
TextArea,
PrimaryButton,
SecondaryButton,
SuccessButton,
WarningButton,
DangerButton,
InfoButton,
LightButton,
DarkButton,
LinkButton,
Progress,
Modal,
PrimaryAlert,
DarkAlert,
DarkBadge,
GrowPrimarySpinner,
GrowDangerSpinner,
BorderPrimarySpinner,
BorderSuccessSpinner,
)
from lona import LonaApp, LonaView
from lona.html import H1, H2, HTML
from lona_bootstrap_5.nodes import BorderInfoSpinner, PrimaryBadge
app = LonaApp(__file__)
app.add_static_file('lona/style.css', """
#lona {
max-width: 60em;
margin: 0 auto;
}
""")
@app.route('/')
class MyView(LonaView):
def handle_request(self, request):
# modal
self.modal_trigger = PrimaryButton('Show Modal', _id='show-modal')
self.modal = Modal()
html = HTML(
H1('Bootstrap 5'),
# grid system
H2('Grid System'),
Row(
ColMd6('foo'),
ColMd6('bar'),
),
Row(
Col('foo'),
Col('bar'),
Col('baz'),
),
# inputs
H2('Inputs'),
TextInput(),
TextArea(),
# Buttons
H2('Buttons'),
PrimaryButton('PrimaryButton'),
SecondaryButton('SecondaryButton'),
SuccessButton('SuccessButton'),
WarningButton('WarningButton'),
DangerButton('DangerButton'),
InfoButton('InfoButton'),
LightButton('LightButton'),
DarkButton('DarkButton'),
LinkButton('LinkButton'),
# alerts
PrimaryAlert('PrimaryAlert'),
DarkAlert('DarkAlert'),
# badges
PrimaryBadge('PrimaryBadge'),
DarkBadge('DarkBadge'),
GrowPrimarySpinner('Loading...'),
GrowDangerSpinner('Warning!'),
BorderPrimarySpinner('Test'),
BorderSuccessSpinner('Test2'),
BorderInfoSpinner(),
# Modal
H2('Modal'),
self.modal_trigger,
self.modal,
# Progress
H2('Progress'),
Progress(percentage=35, background='success'),
)
while True:
self.show(html)
input_event = self.await_input_event()
# modal
if input_event.node is self.modal_trigger:
with html.lock:
self.modal.set_title('Test Title')
self.modal.set_body('Test Body')
self.modal.set_buttons(
PrimaryButton('PrimaryButton', _id='primary'),
SecondaryButton('SecondaryButton', _id='secondary'),
)
self.modal.show()
elif input_event.node is self.modal.close_button:
print('modal close button clicked')
self.modal.hide()
elif input_event.node in self.modal.buttons:
print(input_event.node, 'was clicked')
self.modal.hide()
app.run(port=8080)
|
StarcoderdataPython
|
3371176
|
"""
File name: main.py
Author: <NAME>, <NAME>
Date last modified: 21 March, 2021
Python Version: 3.8
This script contains the main function that calls Inference.py to solve
and show solutions a specific inference query. It parses command line
arguments that specify which query to do.
"""
import sys
from Factor import Factor, Sign
from Inference import inference
def main() -> None:
if len(sys.argv) <= 1:
print('Usage: python main.py [1 or 2 for 2b part 1 and 2 respectively]')
exit(-1)
sim_to_run = sys.argv[1]
factors = [Factor(['Trav'], [], [0.05, 0.95]),
Factor(['Fraud'], ['Trav'], [0.01, 0.99, 0.004, 0.996]),
Factor(['OC'], [], [0.7, 0.3]),
Factor(['CRP'], ['OC'], [0.1, 0.9, 0.001, 0.999]),
Factor(['FP'], ['Trav', 'Fraud'], [0.9, 0.1, 0.9, 0.1,
0.1, 0.9, 0.01, 0.99]),
Factor(['IP'], ['OC', 'Fraud'], [0.02, 0.98, 0.01, 0.99,
0.011, 0.989, 0.001, 0.999])]
if sim_to_run == '1':
inference(factors, ['Fraud'], ['Trav', 'FP', 'IP', 'OC', 'CRP'], [])
else:
inference(factors, ['Fraud'], ['Trav', 'OC'],
[('FP', Sign.POSITIVE), ('IP', Sign.NEGATIVE), ('CRP', Sign.POSITIVE)])
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6424404
|
<reponame>crowdbotics-apps/ideapros-llc-viaggi-32125<gh_stars>0
from django.core.mail import EmailMessage
from rest_framework.authtoken.models import Token
from ideapros_llc_viaggi_32125.settings import FCM_SERVER_KEY, PLACES_API_KEY
from notifications.models import Notification
import pyotp
import requests
from requests.structures import CaseInsensitiveDict
url = "https://fcm.googleapis.com/fcm/send"
headers = CaseInsensitiveDict()
headers["Accept"] = "application/json"
headers["Authorization"] = "key={}".format(FCM_SERVER_KEY)
headers["Content-Type"] = "application/json"
def send_notification(user, title, content, data=None, data_type=None):
notif = Notification.objects.create(
user=user,
title=title,
content=content,
data=data,
data_type=data_type
)
notif_id = str(notif)
if user.registration_id:
if data is None:
payload = {
'to': user.registration_id,
'notification': {
"title": title,
"text": content
},
'data': {
"notif_id": notif_id,
}
}
elif data and data_type:
data = str(data)
data_type = str(data_type)
payload = {
'to': user.registration_id,
'notification': {
"title": title,
"text": content
},
'data': {
"type": data_type,
"redirect_user_id": data,
"notif_id": notif_id,
}
}
resp = requests.post(url, headers=headers, json=payload)
def generateOTP(email=None, user=None):
if email and user:
secret = pyotp.random_base32()
totp = pyotp.TOTP(secret)
otp = totp.now()
user.activation_key = secret
user.otp = otp
user.save()
sliced_otp = str(otp)[:4]
email = EmailMessage('OTP Verification', 'Your OTP is {}'.format(sliced_otp), from_email='<EMAIL>', to=[email])
email.send()
return user
def verifyOTP(otp=None, activation_key=None, user=None):
if otp and activation_key and user:
totp = pyotp.TOTP(activation_key)
sliced_otp = user.otp[:4]
if otp == sliced_otp:
return totp.verify(user.otp, valid_window=6)
return False
else:
return False
def send_otp(user):
email = user.email
secret = pyotp.random_base32()
totp = pyotp.TOTP(secret)
otp = totp.now()
user.activation_key = secret
user.otp = otp
user.save()
sliced_otp = str(otp)[:4]
email_body = """\
<html>
<head></head>
<body>
<p>
Hi,<br>
Your OTP is %s<br>
Regards,<br>
<NAME>
</p>
</body>
</html>
""" % (sliced_otp)
email_msg = EmailMessage("Password Reset - Viaggio", email_body, from_email='<EMAIL>', to=[email])
email_msg.content_subtype = "html"
email_msg.send()
def auth_token(user):
token, created = Token.objects.get_or_create(user=user)
return token
def get_photo(result):
try:
reference = result["photos"][0]["photo_reference"]
width = result["photos"][0]["width"]
except KeyError:
photo_response=None
else:
url = "https://maps.googleapis.com/maps/api/place/photo?maxwidth={}&photo_reference={}&key={}" \
.format(width, reference, PLACES_API_KEY)
retrieved_image_header = requests.head(url)
try:
photo_response = retrieved_image_header.headers['location']
except Exception as e:
photo_response = None
return photo_response
def get_photos(result):
photos = result["photos"]
photo_response = []
for photo in photos:
try:
reference = photo["photo_reference"]
width = photo["width"]
except KeyError:
pass
else:
url = "https://maps.googleapis.com/maps/api/place/photo?maxwidth={}&photo_reference={}&key={}" \
.format(width, reference, PLACES_API_KEY)
retrieved_image_header = requests.head(url)
try:
photo_response.append(retrieved_image_header.headers['location'])
except Exception as e:
pass
return photo_response
|
StarcoderdataPython
|
1770364
|
from scipy import signal
from PIL import Image
import cv2
import numpy
import math
import imageio
# Locating the image. If the image is not same then change to relative address.
usedImage = '../../Images/test.jpg'
# Opening the image into an array
img = numpy.array(Image.open(usedImage).convert("L"))
imageio.imwrite('./Outputs/img.jpeg', img)
# Kernel to perform gaussian blur
kernel = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
# CONVOLUTION 1
# Performing gaussain blur by performing convolution with gaussian kernel.
# I could not code the convolution so I got irritated and used a function for
# convolution instead.
gaussian = signal.convolve(img, kernel, mode='same')
# Print array just to check the output. Can uncomment if you want.
# print ('Im: Convolution 1')
# print (gaussian)
# Saving the array with the blurred image as a JPG image
imageio.imwrite('./Outputs/smooth.jpeg', gaussian)
# cv2.imshow('smooth.jpeg', gaussian) # This statement does not work btw
# Kernel for Sobel X (using horizontal transformation)
kernelX = [[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]]
# Kernel for Sobel Y (using vertical transformation)
kernelY = [[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]]
# Kernel for diagonal Kirsch transformation 1
kernelXY = [[0,1,2],
[-1,0,1],
[-2,-1,0]]
# Kernel for diagonal Kirsch transformation 2
kernelYX = [[-2,-1,0],
[-1,0,1],
[0,1,2]]
# CONVOLUTION 2
# Performing convolution over the smoothed image with all the generated kernels.
# Generate output array imX of horizontal convolution
imX = signal.convolve(gaussian, kernelX, mode='same')
# Generate output array imY of vertical convolution
imY = signal.convolve(gaussian, kernelY, mode='same')
# Generate output array imX of horizontal convolution
imXY = signal.convolve(gaussian, kernelXY, mode='same')
# Generate output array imY of vertical convolution
imYX = signal.convolve(gaussian, kernelYX, mode='same')
# Printing arrays to console just to check
# print ('Im X: Convolution 2')
# print (imX)
# print ('Im Y: Convolution 2')
# print (imY)
# print ('Im XY: Convolution 2')
# print (imXY)
# print ('Im YX: Convolution 2')
# print (imYX)
# Saving the arrays created as JPG images
imageio.imwrite('./Outputs/imX.jpeg', imX)
imageio.imwrite('./Outputs/imY.jpeg', imY)
imageio.imwrite('./Outputs/imXY.jpeg', imXY)
imageio.imwrite('./Outputs/imYX.jpeg', imYX)
'''cv2.imshow('imX.jpeg', imX)
cv2.imshow('imY.jpeg', imY)
cv2.imshow('imXY.jpeg', imXY)
cv2.imshow('imYX.jpeg', imYX)'''
# Combining all the horizontal and vertical gradient approximations
# to create the final canny edge detected image
imFinal = numpy.sqrt(imX*imX + imY*imY + imXY*imXY + imYX*imYX)
# Printing the canny edge detected image array just to check
# print ('Im Final: Combining Gradient Approximations')
# print (imFinal)
# Saving the final canny edge detection image as a JPG image
imageio.imwrite('./Outputs/canny.jpeg', imFinal)
# cv2.imshow('canny.jpeg', imFinal)
print ('Finished Canny edge detection')
|
StarcoderdataPython
|
5002024
|
import sys
while sys.stdin.readline() != '':
print sys.stdin.readline()
|
StarcoderdataPython
|
9706605
|
<reponame>shoriwe-upb/blackjack<filename>dependencies/game.py
from .player import Player
from .deck import Deck
from .dealerai import DealerAI
class Game(object):
def __init__(self, number_of_players):
self.__number_of_players = number_of_players
self.__players = self.__create_players(number_of_players)
self.__deck = Deck()
@classmethod
def check_splitable_hand(cls, hand):
return hand[0].raw_value == hand[1].raw_value
@classmethod
def __create_players(cls, number_of_players):
buffer = []
for n in range(number_of_players + 1):
buffer.append(Player(n))
return buffer
def divide_start_cards(self):
self.__deck.shuffle()
for n in range(2):
for player in self.__players:
player[0].add_card(self.__deck.pop())
# Only do this if the user receive his last start card
if n:
print(player)
if self.check_splitable_hand(player[0]) and player.player_id:
if player.ask_split_hand():
player.split_hand()
for hand in player:
card = self.__deck.pop()
hand.add_card(card)
print(player)
elif player.player_id == 0 and player[0].check_blackjack():
return False
return True
def divide_rest_of_game(self):
for player in self.__players[1:]:
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
print("\033[1;34m", player, "\033[0;0m")
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
for hand in player:
if hand:
print("\033[0;32m", "Your Hand:", hand, "\033[0;0m")
print("\033[1;31m", "Dealer Hand:", self.__players[0][0], "\033[0;0m")
print("\033[0;32m", "Your hand value:", hand.sum_hand_of_cards(), "\033[0;0m")
print("\033[1;31m", "Dealer Points:", self.__players[0][0].sum_hand_of_cards(), "\033[0;0m")
while hand.sum_hand_of_cards() < 21:
if player.ask_for_card():
card = self.__deck.pop()
print("\033[0;32m", "You got", card, "\033[0;0m")
hand.add_card(card)
else:
break
print("\033[0;32m", "Your hand value now:", hand.sum_hand_of_cards(), "\033[0;0m")
print("Your hand now:", hand)
print("=======Hand results=========")
points = hand.sum_hand_of_cards()
if points == 21:
print("\033[1;32m", "Nice!", "\033[0;0m")
elif points < 21:
print("\033[1;32m", "You got", points, "\033[0;0m")
else:
print("\033[1;31m", "You fly far away my friend; with", points, "\033[0;0m")
print("============================")
print()
print()
print("------Dealer thinking------")
print("Dealer starts with:", self.__players[0])
dealer = DealerAI(self.__players)
while self.__players[0][0].sum_hand_of_cards() < 21:
print(self.__players[0])
if dealer.ask_for_card():
card = self.__deck.pop()
print("Dealer receive:", card)
self.__players[0][0].add_card(card)
dealer.update_risk(card)
print("\033[1;31m", "Dealer points now:",self.__players[0][0].sum_hand_of_cards(), "\033[0;0m")
else:
break
def show_winners(self):
dealer_points = self.__players[0][0].sum_hand_of_cards()
for player in self.__players:
if player.player_id:
print(player)
for hand in player:
if hand:
hand_points = hand.sum_hand_of_cards()
if hand_points > 21:
win = False
else:
if hand_points < dealer_points:
if dealer_points > 21:
win = True
else:
win = False
elif hand_points > dealer_points:
win = False
else:
if hand.check_blackjack() and not self.__players[0][0].check_blackjack():
win = True
else:
win = False
if win:
print("\033[0;32m", "You win with:", hand, "\033[0;0m")
else:
print("\033[1;31m", player, "\033[0;0m")
def start(self):
self.__deck.merge_cementery()
self.__deck.shuffle()
for player in self.__players:
player.clean_hands()
print("------Dividing cards------")
if self.divide_start_cards():
print()
print("------Now it's your time------")
self.divide_rest_of_game()
print()
print("------Checking winners------")
self.show_winners()
else:
print("\033[1;31m", "Sorry but the dealer won", "\033[0;0m")
|
StarcoderdataPython
|
3255838
|
<reponame>peterkulik/ois_api_client
from typing import List
from dataclasses import dataclass
@dataclass
class OrderNumbers:
"""Order numbers
:param order_number: Order number
"""
order_number: List[str]
|
StarcoderdataPython
|
1851713
|
import fnmatch
import os
import h5py
import numpy as np
from basic_utils import profile
from depth_utils import colorized_surfnorm
@profile
def process_depth_save(params):
suffix = '*_' + params.data_type + '.png'
data_path = os.path.join(params.dataset_path, 'eval-set/')
results_dir = params.dataset_path + params.features_root + params.proceed_step + '/' + \
params.net_model + '_results_' + params.data_type
if not os.path.exists(results_dir):
os.makedirs(results_dir)
data_type = 'colorized_depth'
for category in sorted(os.listdir(data_path)):
category_path = os.path.join(data_path, category)
for instance in sorted(os.listdir(category_path)):
instance_path = os.path.join(category_path, instance)
num_debug = 0
for file in fnmatch.filter(sorted(os.listdir(instance_path)), suffix):
path = os.path.join(instance_path, file)
result_filename = results_dir + "/" + file + '.hdf5'
with h5py.File(result_filename, 'w') as f:
f.create_dataset(data_type, data=np.array(colorized_surfnorm(path), dtype=np.float32))
f.close()
# get the first #debug_size (default=10) of sorted samples from each instance
num_debug += 1
if num_debug == params.debug_size and params.debug_mode:
break
|
StarcoderdataPython
|
12820057
|
from django.contrib import admin
from .models import Project, Cv, Technique, Info
# Register your models here.
admin.site.register(Project)
admin.site.register(Cv)
admin.site.register(Technique)
admin.site.register(Info)
|
StarcoderdataPython
|
3568453
|
"""Tests for avro2py/rendering.py"""
import io
import sys
import json
from datetime import datetime
from decimal import Decimal
from pathlib import Path
import importlib.util as imp
from typing import Optional
import avro
from hypothesis import strategies as st, given, assume, note, settings, HealthCheck
from avro2py.avro_types import parse_into_types
from avro2py.codegen import populate_namespaces
from avro2py.utils import to_avro_dict, from_avro_dict
import avro2py.rendering as rendering
TEST_DIR = Path(__file__).parent
EXAMPLE_PATH = TEST_DIR / '..' / 'example'
EXAMPLE_SCHEMA = EXAMPLE_PATH / 'schema.avsc'
EXAMPLE_AVRO_MODEL_SCHEMA, = (x for x in json.load(open(EXAMPLE_SCHEMA)) if x['name'] == 'ExampleAvroModel')
@given(st.data())
@settings(suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.too_slow]) # because of exponent check below
def test_example_message_round_trippable(data):
"""Ensure generated classes can be round-tripped into avro and back."""
# setup
parsed_schema = parse_into_types(schema=EXAMPLE_AVRO_MODEL_SCHEMA)
namespaces = populate_namespaces([parsed_schema])
# the actual intrinsic thing we care to test
module_contents, = (
contents
for path, contents in rendering.render_modules(namespaces).items()
if path.stem != '__init__'
)
# write module contents out to file, then, load file and generate an example object
tmp_path = Path('/tmp/example.py')
with tmp_path.open('w') as f:
f.write(module_contents)
spec = imp.spec_from_file_location('example', tmp_path)
example = imp.module_from_spec(spec)
sys.modules['example'] = example
spec.loader.exec_module(example)
original_example_avro_model = data.draw(st.from_type(example.ExampleAvroModel))
tmp_path.unlink()
# clean up PBT-generated example model
def check_decimal(d: Optional[Decimal]):
if d is None:
return
assume(d.is_finite())
assume(len(d.as_tuple().digits) < 21) # if there are more digits than precision, round-tripping will truncate
assume(d.as_tuple().exponent == -2) # bug in avro python implementation; exponent _must_ match scale
check_decimal(original_example_avro_model.decimal)
check_decimal(original_example_avro_model.maybeDecimal)
assume(
original_example_avro_model.maybeInt is None
or -2_147_483_648 <= original_example_avro_model.maybeInt <= 2_147_483_647
) # 32-bit signed range is underspecified by python "int" type (which also supports longs)
assume(
-9_223_372_036_854_775_808 <= original_example_avro_model.sampleInner.foo <= 9_223_372_036_854_775_808
) # 64-bit signed range also underspecified
if isinstance(original_example_avro_model.sampleUnion, example.ExampleAvroModel.RecordWithInt):
assume(-2_147_483_648 <= original_example_avro_model.sampleUnion.value <= 2_147_483_647)
original_example_avro_model = original_example_avro_model._replace(
timestamp=datetime(2000, 1, 1, 0, 0, 0, 000000, tzinfo=avro.timezones.utc)
) # set this manually, since it's underspecified by the `datetime.datetime` type annotation
# round trip through avro ser/deser
avro_parsed_schema = avro.schema.parse(json.dumps(EXAMPLE_AVRO_MODEL_SCHEMA))
example_model_dict = to_avro_dict(original_example_avro_model)
note(example_model_dict)
buffer = io.BytesIO()
encoder = avro.io.BinaryEncoder(buffer)
datum_writer = avro.io.DatumWriter(avro_parsed_schema)
datum_writer.write(example_model_dict, encoder)
buffer.seek(0)
decoder = avro.io.BinaryDecoder(buffer)
datum_reader = avro.io.DatumReader(writers_schema=avro_parsed_schema, readers_schema=avro_parsed_schema)
round_tripped_example_avro_model_dict = datum_reader.read(decoder)
note(round_tripped_example_avro_model_dict)
round_tripped_example_avro_model = from_avro_dict(
round_tripped_example_avro_model_dict,
record_type=example.ExampleAvroModel
)
assert round_tripped_example_avro_model == original_example_avro_model
|
StarcoderdataPython
|
6664536
|
<gh_stars>1-10
input_file = open('input.txt', 'r')
input_list = input_file.read().split('\n')
def most_common(lst):
return str(max(set(lst), key=lst.count))
matrix = [[]for _ in range(12)]
for row in input_list:
for index, column in enumerate(row):
matrix[index].append(int(column))
bit_string = ""
bit_string_flipped = ""
for column in matrix:
bit_string += most_common(column)
bit_string_flipped = ''.join('1' if x == '0' else '0' for x in bit_string)
gamma_rate = int(bit_string,2)
epsilon_rate = int(bit_string_flipped,2)
power_comsumption = gamma_rate * epsilon_rate
print(f'Power consumption: {power_comsumption}')
|
StarcoderdataPython
|
8109402
|
import node
def rec(my_ip):
while True:
message,address = node.receive(my_ip)
print(message)
print(address)
with open("recent_messages.txt", "a") as file:
if " ".join(message) != " " or " ".join(message) != "":
file.write("\n" + address[0] + " " + " ".join(message))
if __name__ == "__main__":
rec("127.0.0.1")
|
StarcoderdataPython
|
6591247
|
# graph definition
from datetime import datetime
from hiku.graph import Graph, Root, Field
GRAPH = Graph([
Root([
Field('now', None, lambda _: [datetime.now().isoformat()]),
]),
])
# test
from hiku.engine import Engine
from hiku.result import denormalize
from hiku.executors.sync import SyncExecutor
from hiku.readers.simple import read
hiku_engine = Engine(SyncExecutor())
def execute(graph, query_string):
query = read(query_string)
result = hiku_engine.execute(graph, query)
return denormalize(graph, result)
from tests.base import patch, Mock
_NOW = datetime(2015, 10, 21, 7, 28)
@patch('{}.datetime'.format(__name__))
def test(dt):
dt.now = Mock(return_value=_NOW)
result = execute(GRAPH, '[:now]')
assert result == {'now': '2015-10-21T07:28:00'}
# console
from hiku.console.ui import ConsoleApplication
app = ConsoleApplication(GRAPH, hiku_engine, debug=True)
if __name__ == '__main__':
from wsgiref.simple_server import make_server
http_server = make_server('localhost', 5000, app)
http_server.serve_forever()
|
StarcoderdataPython
|
9659477
|
<reponame>cadomani/fprime<filename>Autocoders/Python/src/fprime_ac/utils/ConfigManager.py<gh_stars>0
#
# ===============================================================================
# NAME: ConfigManager.py
#
# DESCRIPTION: This is a simple configuration class patterned after the
# one in the Keck observation sequencer GUI and the Tahoe
# CalVal pipeline. The configuration class has changed since
# previous ones. This new one uses the ConfigParser module
# included with Python 2.4 to extend configuration out to
# reading windows like .ini files. If none exist then this
# uses hardwired values set in a dictionary called prop to
# default initialization.
#
# AUTHOR: <NAME>
#
# EMAIL: <EMAIL>
#
# DATE CREATED : 30 January 2007
#
# Copyright 2007, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
import configparser
import os
parent = configparser.ConfigParser
class ConfigManager(parent):
"""
This class provides a single entrypoint for all configurable properties,
namely the self.Prop dictionary.
"""
__instance = None
__prop = None
def __init__(self):
"""
Constructor.
"""
configparser.ConfigParser.__init__(self)
self.__prop = dict()
self._setProps()
# Now look for an ac.ini file within
# first the current directory and then
# the users $HOME directory. If not found
# then the default values from setProps are used.
config_file_name = "ac.ini"
files = list()
# Append other paths to look in here.
# Look in next one, the home directory of the user.
files.append(os.path.join(os.environ["HOME"], config_file_name))
# Look in last directory first, the current directory
files.append(os.path.join(os.path.realpath(os.curdir), config_file_name))
self.read(files)
def getInstance():
"""
Return instance of singleton.
"""
if ConfigManager.__instance is None:
ConfigManager.__instance = ConfigManager()
return ConfigManager.__instance
# define static method
getInstance = staticmethod(getInstance)
def _setProps(self):
"""
Used only by constructor to set all ConfigParser defaults. Establishes
a dictionary of sections and then a dictionary of keyword, value
association for each section.
@params None
"""
################################################################
# General parameters here. This is the DEFAULT section.
################################################################
# self.__prop['DEFAULT'] = dict()
#
# 'DEFAULT' section is baked in for Python 2.6x
# the call below will raise ValueError
#
# This sets the defaults within a section.
# self._setSectionDefaults('DEFAULT')
################################################################
# schema parameters here.
################################################################
# When using these, make sure to either prepend the build root env var or the appropriate amount of "../"
self.__prop["schema"] = dict()
self.__prop["schema"][
"commands"
] = "/Autocoders/Python/schema/default/command_schema.rng"
self.__prop["schema"][
"events"
] = "/Autocoders/Python/schema/default/event_schema.rng"
self.__prop["schema"][
"telemetry"
] = "/Autocoders/Python/schema/default/channel_schema.rng"
self.__prop["schema"][
"component"
] = "/Autocoders/Python/schema/default/component_schema.rng"
self.__prop["schema"][
"assembly"
] = "/Autocoders/Python/schema/default/topology_schema.rng"
self.__prop["schema"][
"deployment"
] = "/Autocoders/Python/schema/default/topology_schema.rng"
self.__prop["schema"][
"internal_interfaces"
] = "/Autocoders/Python/schema/default/internal_interface_schema.rng"
self.__prop["schema"][
"interface"
] = "/Autocoders/Python/schema/default/interface_schema.rng"
self.__prop["schema"][
"serializable"
] = "/Autocoders/Python/schema/default/serializable_schema.rng"
self.__prop["schema"][
"parameters"
] = "/Autocoders/Python/schema/default/parameters_schema.rng"
self.__prop["schema"][
"enum"
] = "/Autocoders/Python/schema/default/enum_schema.rng"
self.__prop["schema"][
"array"
] = "/Autocoders/Python/schema/default/array_schema.rng"
self._setSectionDefaults("schema")
################################################################
# Schematron parameters here.
################################################################
self.__prop["schematron"] = dict()
self.__prop["schematron"][
"chan_id"
] = "/Autocoders/Python/schema/default/channel_id_schematron.rng"
self.__prop["schematron"][
"param_id"
] = "/Autocoders/Python/schema/default/parameter_id_schematron.rng"
self.__prop["schematron"][
"cmd_op"
] = "/Autocoders/Python/schema/default/command_op_schematron.rng"
self.__prop["schematron"][
"evr_id"
] = "/Autocoders/Python/schema/default/event_id_schematron.rng"
self.__prop["schematron"][
"comp_unique"
] = "/Autocoders/Python/schema/default/comp_uniqueness_schematron.rng"
self.__prop["schematron"][
"top_unique"
] = "/Autocoders/Python/schema/default/top_uniqueness_schematron.rng"
self.__prop["schematron"][
"active_comp"
] = "/Autocoders/Python/schema/default/active_comp_schematron.rng"
self.__prop["schematron"][
"enum_value"
] = "/Autocoders/Python/schema/default/enum_value_schematron.rng"
self.__prop["schematron"][
"array_default"
] = "/Autocoders/Python/schema/default/array_schematron.rng"
self._setSectionDefaults("schematron")
################################################################
# component parameters here.
################################################################
self.__prop["component"] = dict()
#
# Default file naming corresponds to XML name and namespace
# attributes if this is "True". Otherwise it corresponds to
# the input XML file name prefix if this is "False".
#
self.__prop["component"]["XMLDefaultFileName"] = "False"
#
# Define default file suffix here...
#
self.__prop["component"]["ComponentXML"] = "ComponentAi.xml"
self.__prop["component"]["ComponentCpp"] = "ComponentAc.cpp"
self.__prop["component"]["ComponentH"] = "ComponentAc.hpp"
self.__prop["component"]["ImplCpp"] = "ComponentImpl.cpp-template"
self.__prop["component"]["ImplH"] = "ComponentImpl.hpp-template"
self.__prop["component"]["ComponentTestCpp"] = "ComponentTestAc.cpp"
self.__prop["component"]["ComponentTestH"] = "ComponentTestAc.hpp"
self.__prop["component"]["GTestCpp"] = "GTestAc.cpp"
self.__prop["component"]["GTestH"] = "GTestAc.hpp"
self.__prop["component"]["TestImplCpp"] = "TestImpl.cpp"
self.__prop["component"]["TestImplH"] = "TestImpl.hpp"
# This sets the defaults within a section.
self._setSectionDefaults("component")
################################################################
# port or interface parameters here.
################################################################
self.__prop["port"] = dict()
#
# Default file naming corresponds to XML name and namespace
# attributes if this is "True". Otherwise it corresponds to
# the input XML file name prefix if this is "False".
#
self.__prop["port"]["XMLDefaultFileName"] = "False"
#
# Define default file suffix here...
#
self.__prop["port"]["PortXML"] = "PortAi.xml"
self.__prop["port"]["PortCpp"] = "PortAc.cpp"
self.__prop["port"]["PortH"] = "PortAc.hpp"
#
# This sets the defaults within a section.
self._setSectionDefaults("port")
################################################################
# assembly (topology) parameters here.
################################################################
self.__prop["assembly"] = dict()
self.__prop["assembly"]["TopologyXML"] = "AppAi.xml"
self.__prop["assembly"]["TopologyCpp"] = "AppAc.cpp"
self.__prop["assembly"]["TopologyH"] = "AppAc.hpp"
self.__prop["assembly"]["TopologyID"] = "AppID.csv"
# defaults for start baseId and window size set here
self.__prop["assembly"]["baseID"] = 0
self.__prop["assembly"]["window"] = 20
#
self._setSectionDefaults("assembly")
################################################################
# deployment (topology) parameters here.
################################################################
self.__prop["deployment"] = dict()
self.__prop["deployment"]["TopologyXML"] = "AppAi.xml"
self.__prop["deployment"]["TopologyCpp"] = "AppAc.cpp"
self.__prop["deployment"]["TopologyH"] = "AppAc.hpp"
# defaults for start baseId and window size set here
self.__prop["deployment"]["baseID"] = 0
self.__prop["deployment"]["window"] = 20
#
self._setSectionDefaults("deployment")
################################################################
# This sets the defaults for generated include options
# The main idea is playing with include header files for testing
################################################################
self.__prop["includes"] = dict()
self.__prop["includes"]["comp_include_path"] = None
self.__prop["includes"]["port_include_path"] = None
self.__prop["includes"]["serial_include_path"] = None
#
self._setSectionDefaults("includes")
################################################################
# Serializable parameters here.
################################################################
self.__prop["serialize"] = dict()
#
# Default file naming corresponds to XML name and namespace
# attributes if this is "True". Otherwise it corresponds to
# the input XML file name prefix if this is "False".
#
self.__prop["serialize"]["XMLDefaultFileName"] = "False"
#
# Define default file suffix here...
#
self.__prop["serialize"]["SerializableXML"] = "SerializableAi.xml"
self.__prop["serialize"]["SerializableCpp"] = "SerializableAc.cpp"
self.__prop["serialize"]["SerializableH"] = "SerializableAc.hpp"
self._setSectionDefaults("serialize")
#
###############################################################
# Special Ports and their attributes
# They name's default to the implicit ports
###############################################################
self.__prop["special_ports"] = dict()
self.__prop["special_ports"]["Cmd"] = dict()
self.__prop["special_ports"]["Cmd"]["role"] = "Cmd"
self.__prop["special_ports"]["Cmd"]["name"] = "CmdDisp"
self.__prop["special_ports"]["Cmd"]["type"] = "Fw::Cmd"
self.__prop["special_ports"]["Cmd"]["direction"] = "Input"
self.__prop["special_ports"]["Cmd"]["comment"] = None
self.__prop["special_ports"]["Cmd"]["port_file"] = "Fw/Cmd/CmdPortAi.xml"
self.__prop["special_ports"]["CmdResponse"] = dict()
self.__prop["special_ports"]["CmdResponse"]["name"] = "CmdStatus"
self.__prop["special_ports"]["CmdResponse"]["type"] = "Fw::CmdResponse"
self.__prop["special_ports"]["CmdResponse"]["direction"] = "Output"
self.__prop["special_ports"]["CmdResponse"]["comment"] = None
self.__prop["special_ports"]["CmdResponse"][
"port_file"
] = "Fw/Cmd/CmdResponsePortAi.xml"
self.__prop["special_ports"]["CmdRegistration"] = dict()
self.__prop["special_ports"]["CmdRegistration"]["name"] = "CmdReg"
self.__prop["special_ports"]["CmdRegistration"]["type"] = "Fw::CmdReg"
self.__prop["special_ports"]["CmdRegistration"]["direction"] = "Output"
self.__prop["special_ports"]["CmdRegistration"]["comment"] = None
self.__prop["special_ports"]["CmdRegistration"][
"port_file"
] = "Fw/Cmd/CmdRegPortAi.xml"
self.__prop["special_ports"]["LogEvent"] = dict()
self.__prop["special_ports"]["LogEvent"]["name"] = "Log"
self.__prop["special_ports"]["LogEvent"]["type"] = "Fw::Log"
self.__prop["special_ports"]["LogEvent"]["direction"] = "Output"
self.__prop["special_ports"]["LogEvent"]["comment"] = None
self.__prop["special_ports"]["LogEvent"]["port_file"] = "Fw/Log/LogPortAi.xml"
self.__prop["special_ports"]["LogTextEvent"] = dict()
self.__prop["special_ports"]["LogTextEvent"]["name"] = "LogText"
self.__prop["special_ports"]["LogTextEvent"]["type"] = "Fw::LogText"
self.__prop["special_ports"]["LogTextEvent"]["direction"] = "Output"
self.__prop["special_ports"]["LogTextEvent"]["comment"] = None
self.__prop["special_ports"]["LogTextEvent"][
"port_file"
] = "Fw/Log/LogTextPortAi.xml"
self.__prop["special_ports"]["TimeGet"] = dict()
self.__prop["special_ports"]["TimeGet"]["name"] = "Time"
self.__prop["special_ports"]["TimeGet"]["type"] = "Fw::Time"
self.__prop["special_ports"]["TimeGet"]["direction"] = "Output"
self.__prop["special_ports"]["TimeGet"]["comment"] = None
self.__prop["special_ports"]["TimeGet"]["port_file"] = "Fw/Time/TimePortAi.xml"
self.__prop["special_ports"]["Telemetry"] = dict()
self.__prop["special_ports"]["Telemetry"]["name"] = "Tlm"
self.__prop["special_ports"]["Telemetry"]["type"] = "Fw::Tlm"
self.__prop["special_ports"]["Telemetry"]["direction"] = "Output"
self.__prop["special_ports"]["Telemetry"]["comment"] = None
self.__prop["special_ports"]["Telemetry"]["port_file"] = "Fw/Tlm/TlmPortAi.xml"
self.__prop["special_ports"]["ParamGet"] = dict()
self.__prop["special_ports"]["ParamGet"]["name"] = "ParamGet"
self.__prop["special_ports"]["ParamGet"]["type"] = "Fw::PrmGet"
self.__prop["special_ports"]["ParamGet"]["direction"] = "Output"
self.__prop["special_ports"]["ParamGet"]["comment"] = None
self.__prop["special_ports"]["ParamGet"][
"port_file"
] = "Fw/Prm/PrmGetPortAi.xml"
self.__prop["special_ports"]["ParamSet"] = dict()
self.__prop["special_ports"]["ParamSet"]["name"] = "ParamSet"
self.__prop["special_ports"]["ParamSet"]["type"] = "Fw::PrmSet"
self.__prop["special_ports"]["ParamSet"]["direction"] = "Output"
self.__prop["special_ports"]["ParamSet"]["comment"] = None
self.__prop["special_ports"]["ParamSet"][
"port_file"
] = "Fw/Prm/PrmSetPortAi.xml"
self._setSectionDefaults("special_ports")
# Add constants processing
self.__prop["constants"] = dict()
# where the constants file is relative to BUILD_ROOT
# constants will be in the INI file format supported by the ConfigParser library
self.__prop["constants"]["constants_file"] = os.environ.get(
"FPRIME_AC_CONSTANTS_FILE", os.path.join("config", "AcConstants.ini")
)
self._setSectionDefaults("constants")
def _setSectionDefaults(self, section):
"""
For a section set up the default values.
"""
self.add_section(section)
for (key, value) in list(self.__prop[section].items()):
self.set(section, key, "%s" % value)
if __name__ == "__main__":
#
# Quick test of configure defaults.
#
config = ConfigManager().getInstance()
print()
print("IPC section defaults:")
for (key, value) in config.items("ipc"):
print("{} = {}".format(key, value))
print()
print("Get some of the ipc values:")
print("h_pub_suffix = %s" % config.get("ipc", "h_pub_suffix"))
print("h_msg_suffix = %s" % config.get("ipc", "h_msg_suffix"))
print("c_int_suffix = %s" % config.get("ipc", "c_int_suffix"))
print("c_dispatch_suffix = %s" % config.get("ipc", "c_dispatch_suffix"))
print("c_cmd_dispatch_suffix = %s" % config.get("ipc", "c_cmd_dispatch_suffix"))
|
StarcoderdataPython
|
6557926
|
#!/usr/bin/python
import os
import sys
import glob
import base64
import subprocess
import xmlrpclib
import time
import getopt
def main(argv):
iDBUser = ''
oDBName = ''
BacKDir = ''
#init variable
date_backup = time.strftime('%Y%m%d_%H%M%S')
#Logging
command = "#Start:OE-->Backup DB at %s" % (date_backup)
print command
#Check parameter
try:
opts, args = getopt.getopt(argv,"hu:d:p:",["uDBUser=","dDBName=","pBacKDir"])
except getopt.GetoptError:
print "Invalid command db_backup.sh -u <DBUser> -d <DBName> -p <BacKDir>"
#subprocess.call([command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'db_backup.py -u <DBUser> -d <DBName> -p <BacKDir>'
sys.exit()
elif opt in ("-u", "--iDBUser"):
iDBUser = arg
elif opt in ("-d", "--oDBName"):
oDBName = arg
elif opt in ("-p", "--pBacKDir"):
BacKDir = arg
else:
print "Invalid command db_backup.sh -u <DBUser> -d <DBName> -p <BacKDir>"
#subprocess.call([command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
sys.exit(2)
#Backup Database
command = "pg_dump -U %s -f '%s/%s_dbbackup-%s.dmp' %s" % (iDBUser, BacKDir, oDBName, date_backup ,oDBName)
print command
subprocess.call([command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
#Write file name to file for next restore database
command = "echo '%s/%s_dbbackup-%s.dmp' > %s/oe_db_last_bkup.txt" % (BacKDir, oDBName, date_backup, BacKDir)
print command
subprocess.call([command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
#Logging
command = "#End:OE-->Backup DB at %s" % (date_backup)
print command
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
11239794
|
<gh_stars>0
#-*- coding: utf-8 -*-
from .geocodeSlc import geocodeSlc
|
StarcoderdataPython
|
4809965
|
<gh_stars>1-10
from . import utils
class Dim(object):
"""Stores a list of configurations used during the experiment.
For example, lets say that we want to compare variants of GP with different
probability of mutation and also at the same time with different tournament sizes.
The first dimension would be the probability of mutation. We will define this
dimension with a list of possible configurations, e.g.:
Config('mut0.2', pred_mut02), Config('mut0.5', pred_mut05), ...
The other dimension would be a list of tuples describing possible tournament sizes, e.g.:
Config('k4', pred_k4), Config('k7', pred_k7), ...
Dimensions may be combined with each other to generate concrete "real" configurations
of the experiment. This is done by means of a Cartesian product between them. This
is realized with * operator, and in this case it would lead to the following
configurations of a newly created "aggregate" dimension:
Config([('mut0.2', pred_mut02), ('k4', pred_k4)]),
.....,
Config([('mut0.5', pred_mut05), ('k7', pred_k7)])
A simpler example:
dim1 = [A1, A2, A3], dim2 = [B1, B2].
By combining those two dimensions (Dim1 * Dim2) we obtain a single dimension defined as:
dim3 = [(A1,B1), (A1,B2), ..., (A3,B1), (A3,B2)].
In such a case experiment logs will be filtered by successive applications of predicates.
Operator + may be used to add a configuration to already existing dimension. This
may be useful for instance for the control random algorithm, which is a single
complete configuration (not dependent on any considered parameters).
"""
def __init__(self, configs):
if isinstance(configs, list):
self.configs = []
for c in configs:
if isinstance(c, ConfigList):
self.configs.append(c)
elif isinstance(c, tuple):
self.configs.append(Config(c[0], c[1]))
elif isinstance(c, str): # creating only labels
self.configs.append(Config(c, None))
else:
raise Exception("Incorrect arguments passed to Dimension: {0}".format(c))
elif isinstance(configs, ConfigList):
self.configs = [configs]
else:
raise Exception("Incorrect arguments passed to Dimension!")
def __len__(self):
return len(self.configs)
def __iter__(self):
for c in self.configs:
yield c
def __getitem__(self, item):
if isinstance(item, tuple) or isinstance(item, list):
configs = []
for i in item:
configs.append(self.configs[i])
return configs
else:
return self.configs[item]
def __delitem__(self, key):
del self.configs[key]
def __mul__(self, other):
assert isinstance(other, Dim) or isinstance(other, ConfigList), "Dimension may be merged only with other Dimension or ConfigList."
if isinstance(other, Dim):
if len(other) == 0:
return self
else:
return Dim(generate_configs([self, other]))
else:
return self * Dim(other)
def __add__(self, other):
if isinstance(other, ConfigList):
configs = self.configs[:]
configs.append(other)
return Dim(configs)
elif isinstance(other, Dim):
configs = self.configs[:]
configs.extend(other.configs[:])
return Dim(configs)
else:
raise Exception("To the Dimension may be added only a Config or other Dimension.")
__rmul__ = __mul__
def __reversed__(self):
return Dim(list(reversed(self.configs)))
def __str__(self):
return str([c.get_caption() for c in self.configs])
def insert(self, index, config):
"""Inserts config at the particular position on the list of dimension's configs."""
if isinstance(config, Dim):
config = config[0]
self.configs.insert(index, config)
def get_captions_list(self):
"""Returns a list of captions of all configs in this dimension."""
return [c.get_captions_list() for c in self.configs]
def get_captions(self):
"""Returns a list of captions of all configs in this dimension."""
return [c.get_caption() for c in self.configs]
def get_predicates(self):
"""Returns a list of predicates of all configs in this dimension."""
all_lam = []
for c in self.configs:
lam = lambda p: all([f[1](p) for f in c.filters])
all_lam.append(lam)
return all_lam
def filter_out_outsiders(self, props):
"""Returns properties in which contained are only elements belonging to one of
the configs in this dimension. Note that dimension values (configs) do not have to
cover the whole possible space or to be disjoint. This functions allows to remove
unnecessary configs and thus may reduce computation time.
"""
return [p for p in props if any([c.filter(p) for c in self.configs])]
def filter_props(self, props):
"""Returns all properties files satisfied by at least one config."""
assert isinstance(props, list), "filter_props expects a list of property files"
return [p for p in props if self.filter(p)]
def filter(self, p):
"""Returns True, if a given property file is covered by at least one config."""
return any(c.filter(p) for c in self.configs)
def sort(self):
"""Sorts this dimension alphabetically on the names of Configs within it."""
self.configs.sort()
return self
def copy(self):
"""Creates a copy of this dimension."""
return Dim(self.configs[:])
def dim_true_within(self, name="ALL"):
"""Returns a new dimension accepting any configuration accepted by this 'parent' dimension."""
return Dim(ConfigOr(name, self.configs))
@classmethod
def dim_true(cls, name="ALL"):
"""Returns a new dimension accepting all configurations."""
return Dim(Config(name, lambda p: True, method=None))
@classmethod
def generic_labels(cls, num, prefix="A"):
"""Creates a Dim object containing num generic names."""
configs = [prefix + str(x) for x in range(num)]
return Dim(configs)
@classmethod
def from_names(cls, names):
"""Creates a dummy Dim object with config names but no lambdas."""
configs = [Config(name, None) for name in names]
return Dim(configs)
@classmethod
def from_data(cls, props, extr):
"""Creates a Dim object by collecting all unique values in the data.
Extractor (extr) is a function used to get values."""
s = utils.get_unique_values(props, extr)
configs = [Config(el, lambda p, extr=extr, el=el: extr(p) == el) for el in s]
return Dim(configs)
@classmethod
def from_dict(cls, props, key, nameFun=None):
"""Creates a Dim object by collecting all unique values under the specified
key in the dictionaries."""
s = utils.get_unique_values(props, lambda p: p[key])
configs = []
for el in s:
kwargs = {key: el}
name = el if nameFun is None else nameFun(el)
configs.append(Config(name, lambda p, key=key, el=el: p[key] == el, **kwargs))
return Dim(configs)
@classmethod
def from_dict_postprocess(cls, props, key, fun):
"""Creates a Dim object by collecting all unique values under the specified
key in the dictionaries after the retrieved values are processed in a specified way."""
values = utils.get_unique_values(props, lambda p: p[key])
values = {fun(x) for x in values}
configs = []
for v in values:
kwargs = {key: v}
configs.append(Config(v, lambda p, key=key, v=v, fun=fun: fun(p[key]) == v, **kwargs))
return Dim(configs)
@classmethod
def from_dict_value_match(cls, key, values):
"""Creates a dimension based on a check if the dictionary contains a given value under the
provided key. Dimension spawns all the provided values.
:param key: (str) a key used to obtain an actual value from the dictionary.
:param values: (list[any]) values, which will be matched to the value obtained from the dict."""
assert isinstance(key, str)
assert isinstance(values, list) and len(values) > 0
configs = []
for v in values:
kwargs = {key: v}
configs.append(Config(v, lambda p, key=key, v=v: p[key] == v, **kwargs))
return Dim(configs)
def generate_configs(dims_list):
"""Returns a list of configurations for a dimension."""
final_filters = []
final_values = []
_generate_filters_helper(dims_list, [], {}, final_filters, final_values)
return [ConfigList(flist, **values) for flist, values in zip(final_filters, final_values)]
def _generate_filters_helper(cur_dims, cur_filters, cur_values, final_filters, final_values):
assert isinstance(cur_filters, list)
assert isinstance(cur_values, dict)
if len(cur_dims) == 0:
final_filters.append(cur_filters)
final_values.append(cur_values)
else:
for config in cur_dims[0]:
new_filters = cur_filters[:]
new_filters.extend(config.filters)
new_values = cur_values.copy()
new_values.update(config.stored_values)
_generate_filters_helper(cur_dims[1:], new_filters, new_values, final_filters, final_values)
class ConfigList(object):
"""ConfigList is defined as a list of filters. A 'filter' is a tuple containing a name
and a predicate. Names of the filters are used during generation of plots/tables, and predicate
is used to leave only properties dicts which were generated in a run matching this configuration.
If more than one filter is defined, conjunction of all predicates is considered."""
def __init__(self, *filters, **kwargs):
self.stored_values = kwargs
if len(filters) == 1:
if isinstance(filters[0], list):
# filters: [(name, lambda)]
self.filters = filters[0]
else:
# filters: (name, lambda)
self.filters = [filters[0]]
else:
# filters: name, lambda
self.filters = [tuple(filters)]
def __len__(self):
return len(self.filters)
def __iter__(self):
for c in self.filters:
yield c
def __getitem__(self, item):
return self.filters[item]
def __delitem__(self, key):
del self.filters[key]
def __lt__(self, other):
n_other = "_".join(x[0] for x in other.filters)
n_self = "_".join([x[0] for x in self.filters])
return n_self < n_other
def __str__(self):
return "ConfigList({0})".format(self.get_caption())
def __call__(self, *args, **kwargs):
return self.filter(args[0])
def head(self):
"""Returns the first filter defined in this config. Convenience function."""
return self.filters[0]
def get_caption(self, sep="/"):
"""Returns a merged name of this Config. This name is generated by merging
names of filters which constitute it."""
return sep.join([str(f[0]) for f in self.filters])
def get_captions_list(self):
"""Returns a list containing names of all filters in this Config."""
return [str(f[0]) for f in self.filters]
def contains_filter(self, otherFilt):
"""Checks, if this Config contains specified filter."""
return any([f == otherFilt for f in self.filters])
def contains_filter_name(self, otherFiltName):
"""Checks, if this Config contains a filter with the specified name."""
return any([f[0] == otherFiltName for f in self.filters])
def filter_props(self, props):
"""Returns all properties files satisfied by a conjunction of all filters in this Config."""
assert isinstance(props, list), "filter_props expects a list of property files"
return [p for p in props if self.filter(p)]
def filter(self, p):
"""Checks, if properties file p is satisfied by a *conjunction* of all filters in this Config."""
assert isinstance(p, dict), "filter expects property file"
for f in self.filters:
if not f[1](p):
return False
return True
class Config(ConfigList):
"""Defines a single configuration of the experiment. Is equivalent to the tuple (name, filter)."""
def __init__(self, name, filter, **kwargs):
assert not isinstance(filter, list), "Config associates the name and a particular filter. " \
"To use multiple filters, please use ConfigOr or ConfigAnd."
assert filter is None or callable(filter), "Filter must be a callable object or None."
self.name = name
self.filter = filter
ConfigList.__init__(self, (name, self), **kwargs)
def __getitem__(self, item):
return self.name if item == 0 else self
def __call__(self, *args, **kwargs):
return self.filter(args[0])
def __str__(self):
return "Config({0})".format(self.get_caption())
def filter(self, p):
return self.filter(p)
class ConfigAnd(ConfigList):
"""A list of configs composed by conjunction. E.g.: accept solutions both with p=1
and p=2 (in this case, impossibility)."""
def __init__(self, name, configs, **kwargs):
assert isinstance(configs, list), "Configs should be provided as a list."
assert len(configs) > 0, "Trying to create ConfigAnd with empty configs list."
self.name = name
self.configs = configs
ConfigList.__init__(self, (name, self), **kwargs)
def __call__(self, *args, **kwargs):
return self.filter(args[0])
def __str__(self):
return "ConfigAnd({0})".format(self.get_caption())
def filter(self, p):
"""Checks, if properties file p is satisfied by a *disjunction* of all filters in this Config."""
assert isinstance(p, dict), "filter expects property file"
for f in self.configs:
if not f(p):
return False
return True
class ConfigOr(ConfigList):
"""A list of configs composed by disjunction. E.g.: accept solutions either with p=1 or p=2."""
def __init__(self, name, configs, **kwargs):
assert isinstance(configs, list), "Configs should be provided as a list."
assert len(configs) > 0, "Trying to create ConfigOr with empty configs list."
self.name = name
self.configs = configs
ConfigList.__init__(self, (name, self), **kwargs)
def __call__(self, *args, **kwargs):
return self.filter(args[0])
def __str__(self):
return "ConfigOr({0})".format(self.get_caption())
def filter(self, p):
"""Checks, if properties file p is satisfied by a *disjunction* of all filters in this Config."""
assert isinstance(p, dict), "filter expects property file"
for f in self.configs:
if f(p):
return True
return False
dim_all = Dim([Config("", lambda p: True)])
|
StarcoderdataPython
|
1688637
|
<gh_stars>0
import sys
print('Enter an integer:')
num = int(input())
sum = 0
if num < 1:
print(0)
else :
for i in range(1,num+1) :
sum = sum + i
print(sum)
sys.exit()
|
StarcoderdataPython
|
3365846
|
<filename>viscid/plot/vlab.py
"""Convevience module for making 3d plots with Mayavi
Note:
You can't set rc parameters for this module!
"""
from __future__ import print_function, division
import os
import sys
import numpy as np
import mayavi
from mayavi import mlab
from mayavi.modules.axes import Axes
from mayavi.sources.builtin_surface import BuiltinSurface
from mayavi.sources.vtk_data_source import VTKDataSource
from traits.trait_errors import TraitError
from tvtk.api import tvtk
import viscid
from viscid import field
def add_source(src, figure=None):
"""Add a vtk data source to a figure
Args:
src (VTKDataSource): Description
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
Returns:
None
"""
if not figure:
figure = mlab.gcf()
if src not in figure.children:
engine = figure.parent
engine.add_source(src, scene=figure)
return src
def add_lines(lines, scalars=None, figure=None, name="NoName"):
"""Add list of lines to a figure
Args:
lines (list): See :py:func:`lines2source`
scalars (ndarray): See :py:func:`lines2source`
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
name (str): name of vtk object
Returns:
:py:class:`mayavi.sources.vtk_data_source.VTKDataSource`
"""
src = lines2source(lines, scalars=scalars, name=name)
add_source(src, figure=figure)
return src
def add_field(fld, figure=None, center="", name=""):
"""Add a Viscid Field to a mayavi figure
Args:
fld (Field): Some Viscid Field
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
center (str): 'cell' or 'node', leave blank to use fld.center
name (str): name of vtk object, leave black for fld.name
Returns:
:py:class:`mayavi.sources.vtk_data_source.VTKDataSource`
"""
src = field2source(fld, center=center, name=name)
add_source(src, figure=figure)
return src
def points2source(vertices, scalars=None, name="NoName"):
# if scalars:
# scalars = [scalars]
verts, scalars, _, other = viscid.vutil.prepare_lines([vertices], scalars)
src = mlab.pipeline.scalar_scatter(verts[0], verts[1], verts[2])
if scalars is not None:
if scalars.dtype == np.dtype('u1'):
sc = tvtk.UnsignedCharArray()
sc.from_array(scalars.T)
scalars = sc
src.mlab_source.dataset.point_data.scalars = scalars
src.mlab_source.dataset.modified()
src.name = name
return src
def lines2source(lines, scalars=None, name="NoName"):
"""Turn a list of lines as ndarrays into vtk data source
Args:
lines (list): List of 3xN, 4xN, 6xN ndarrays of xyz, xyzs, or
xyzrgb data for N points along the line. N need not be the
same for all lines.
scalars (ndarray, list): Scalars for each point, or each line.
See :py:func:`viscid.vutil.prepare_lines` for more details
name (str): name of vtk object
Returns:
:py:class:`mayavi.sources.vtk_data_source.VTKDataSource`
See Also:
* :py:func:`viscid.vutil.prepare_lines`
"""
r = viscid.vutil.prepare_lines(lines, scalars, do_connections=True)
lines, scalars, connections, other = r
src = mlab.pipeline.scalar_scatter(lines[0], lines[1], lines[2])
if scalars is not None:
if scalars.dtype == np.dtype('u1'):
sc = tvtk.UnsignedCharArray()
sc.from_array(scalars.T)
scalars = sc
src.mlab_source.dataset.point_data.scalars = scalars
src.mlab_source.dataset.modified()
src.mlab_source.dataset.lines = connections
src.name = name
return src
def field2source(fld, center=None, name=None):
"""Convert a field to a vtk data source
This dispatches to either :meth:`field_to_point_source` or
:meth:`field_to_cell_source` depending on the centering of
`fld`.
Parameters:
fld: field to convert
center (str): Either "cell", "node", or "" to use the
same centering as fld
name (str): Add specific name. Leave as "" to use fld.name
Returns:
mayavi source
Raises:
NotImplementedError: If center (or fld.center) is not
recognized
"""
if not center:
center = fld.center
center = center.lower()
if center == "node":
src = field2point_source(fld, name=name)
elif center == "cell":
src = field2cell_source(fld, name=name)
else:
raise NotImplementedError("cell / node only for now")
return src
def field2point_source(fld, name=None):
"""Convert a field to a vtk point data source"""
grid, arr = _prep_field(fld)
dat_target = grid.point_data
if fld.iscentered("Cell"):
grid.dimensions = tuple(fld.crds.shape_cc)
grid.x_coordinates = fld.get_crd_cc(0) # ('x')
grid.y_coordinates = fld.get_crd_cc(1) # ('y')
grid.z_coordinates = fld.get_crd_cc(2) # ('z')
elif fld.iscentered("Node"):
grid.dimensions = tuple(fld.crds.shape_nc)
grid.x_coordinates = fld.get_crd_nc(0) # ('x')
grid.y_coordinates = fld.get_crd_nc(1) # ('y')
grid.z_coordinates = fld.get_crd_nc(2) # ('z')
else:
raise ValueError("cell or node only please")
src = _finalize_source(fld, arr, grid, dat_target)
if name:
src.name = name
return src
def field2cell_source(fld, name=None):
"""Convert a field to a vtk cell data source"""
grid, arr = _prep_field(fld)
dat_target = grid.cell_data
if fld.iscentered("Cell"):
grid.dimensions = tuple(fld.crds.shape_nc)
grid.x_coordinates = fld.get_crd_nc(0) # ('x')
grid.y_coordinates = fld.get_crd_nc(1) # ('y')
grid.z_coordinates = fld.get_crd_nc(2) # ('z')
elif fld.iscentered("Node"):
raise NotImplementedError("can't do lossless cell data from nodes yet")
else:
raise ValueError("cell or node only please")
src = _finalize_source(fld, arr, grid, dat_target)
if name:
src.name = name
return src
def _prep_field(fld):
grid = tvtk.RectilinearGrid()
# note, the transpose operations are b/c fld.data is now xyz ordered,
# but vtk expects zyx data
if isinstance(fld, field.ScalarField):
zyx_dat = fld.data.T
arr = np.reshape(zyx_dat, (-1,))
# vtk expects zyx data, but fld.data is now xyz
elif isinstance(fld, field.VectorField):
if fld.layout == field.LAYOUT_INTERLACED:
zyx_dat = np.transpose(fld.data, (2, 1, 0, 3))
arr = np.reshape(zyx_dat, (-1, 3))
elif fld.layout == field.LAYOUT_FLAT:
zyx_dat = np.transpose(fld.data, (0, 3, 2, 1))
arr = np.reshape(np.rollaxis(zyx_dat, 0, len(fld.shape)), (-1, 3))
else:
raise ValueError()
else:
raise ValueError("Unexpected fld type: {0}".format(type(fld)))
# swap endian if needed
if str(arr.dtype).startswith(">"):
arr = arr.byteswap().newbyteorder()
return grid, arr
def _finalize_source(fld, arr, grid, dat_target):
if isinstance(fld, field.ScalarField):
dat_target.scalars = arr
dat_target.scalars.name = fld.name
elif isinstance(fld, field.VectorField):
dat_target.vectors = arr
dat_target.vectors.name = fld.name
src = VTKDataSource(data=grid)
src.name = fld.name
return src
def _prep_vector_source(v_src, scalars):
"""Side-effect: v_src will be modified if scalars are given"""
if isinstance(v_src, viscid.field.Field):
v_src = field2source(v_src, center='node')
if scalars is not None:
if isinstance(scalars, viscid.field.Field):
scalars = field2source(scalars, center='node')
v_src._point_scalars_list.append(scalars.name) # pylint: disable=protected-access
v_src.data.point_data.scalars = scalars.data.point_data.scalars
v_src.point_scalars_name = scalars.name
return v_src, scalars
def scalar_cut_plane(src, center=None, **kwargs):
"""Wraps `mayavi.mlab.pipeline.scalar_cut_plane`
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
If you call this multiple times with the same
`viscid.field.Field`, you should consider using field2source
yourself and passing the Mayavi source object
Args:
src (Mayavi Source or ScalarField): If src is a ScalarField,
then the field is wrapped into a Mayavi Source and added
to the figure
center (str): centering for the Mayavi source, 'cell' will
make the grid visible, while 'node' will interpolate
between points
**kwargs: Passed to `mayavi.mlab.pipeline.scalar_cut_plane`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.scalar_cut_plane.ScalarCutPlane`
"""
if isinstance(src, viscid.field.Field):
src = field2source(src, center=center)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
scp = mlab.pipeline.scalar_cut_plane(src, **kwargs)
apply_cmap(scp, **cmap_kwargs)
return scp
def vector_cut_plane(v_src, scalars=None, color_mode='vector', **kwargs):
"""Wraps `mayavi.mlab.pipeline.vector_cut_plane`
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
If you call this multiple times with the same
`viscid.field.Field`, you should consider using field2source
yourself and passing the Mayavi source object
Args:
v_src (Mayavi Source, or VectorField): Vector to cut-plane. If
a Mayavi Source, then it must be node centered.
scalars (Mayavi Source, or ScalarField): Optional scalar data.
If a Mayavi Source, then it must be node centered. This
will enable scale_mode and color_mode by 'scalar'
color_mode (str): Color by 'vector', 'scalar', or 'none'
**kwargs: Passed to `mayavi.mlab.pipeline.vector_cut_plane`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.vector_cut_plane.VectorCutPlane`
"""
v_src, scalars = _prep_vector_source(v_src, scalars)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
vcp = mlab.pipeline.vector_cut_plane(v_src, **kwargs)
apply_cmap(vcp, mode='vector', **cmap_kwargs)
apply_cmap(vcp, mode='scalar', **cmap_kwargs)
vcp.glyph.color_mode = 'color_by_{0}'.format(color_mode.strip().lower())
return vcp
def mesh_from_seeds(seeds, scalars=None, **kwargs):
"""Wraps `mayavi.mlab.mesh` for Viscid seed generators
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
Args:
seeds (Viscid.SeedGen): Some seed generator with a 2D mesh
representation
scalars (ndarray, ScalarField): data mapped onto the mesh,
i.e., the result of viscid.interp_trilin(seeds, ...)
**kwargs: Passed to `mayavi.mlab.mesh`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.surface.Surface`
"""
if scalars is not None:
vertices, scalars = seeds.wrap_mesh(scalars)
else:
vertices, = seeds.wrap_mesh()
return mesh(vertices[0], vertices[1], vertices[2], scalars=scalars,
**kwargs)
def mesh(x, y, z, scalars=None, **kwargs):
"""Wraps `mayavi.mlab.mesh`
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
Args:
x (TYPE): 2D array of vertices' x-values
y (TYPE): 2D array of vertices' y-values
z (TYPE): 2D array of vertices' z-values
scalars (ndarray, ScalarField): optional scalar data
**kwargs: Passed to `mayavi.mlab.mesh`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.surface.Surface`
"""
if scalars is not None:
if isinstance(scalars, viscid.field.Field):
scalars = scalars.data
scalars = scalars.reshape(x.shape)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
m = mlab.mesh(x, y, z, scalars=scalars, **kwargs)
if scalars is not None:
apply_cmap(m, **cmap_kwargs)
return m
def quiver3d(*args, **kwargs):
"""Wraps `mayavi.mlab.quiver3d`
Args:
*args: passed to `mayavi.mlab.quiver3d`
**kwargs: Other Arguments are popped, then kwargs is passed to
`mayavi.mlab.quiver3d`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
TYPE: Description
"""
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
quivers = mlab.quiver3d(*args, **kwargs)
apply_cmap(quivers, mode='scalar', **cmap_kwargs)
apply_cmap(quivers, mode='vector', **cmap_kwargs)
return quivers
def points3d(*args, **kwargs):
"""Wraps `mayavi.mlab.points3d`
Args:
*args: passed to `mayavi.mlab.points3d`
**kwargs: Other Arguments are popped, then kwargs is passed to
`mayavi.mlab.points3d`
Keyword Arguments:
modify_args (bool): if True (default), then check if args is a
single 2d sequence of shape 3xN or Nx3. Then split them up
appropriately. if False, then args are passed through
to mlab.points3d unchanged, nomatter what.
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
TYPE: Description
"""
modify_args = kwargs.pop('modify_args', True)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
if modify_args and len(args) < 3:
a0 = np.asarray(args[0])
if len(a0.shape) > 1 and a0.shape[0] == 3:
args = [a0[0, :].reshape(-1),
a0[1, :].reshape(-1),
a0[2, :].reshape(-1)] + list(args[1:])
elif len(a0.shape) > 1 and a0.shape[1] == 3:
args = [a0[:, 0].reshape(-1),
a0[:, 1].reshape(-1),
a0[:, 2].reshape(-1)] + list(args[1:])
points = mlab.points3d(*args, **kwargs)
apply_cmap(points, **cmap_kwargs)
return points
def streamline(v_src, scalars=None, **kwargs):
"""Wraps `mayavi.mlab.pipeline.streamline`; mind the caveats
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
Side-effect: If scalars are given, then v_src is modified to
point to the scalar data!
If v_src and scalars are Mayavi sources, they must be node
centered.
If you call this multiple times with the same v_src and
scalars, you should consider using field2source yourself and
passing the Mayavi source objects, unless you're using
different scalars with the same vector field, since this
function has side-effects on the vector sourc.
Args:
v_src (Mayavi Source, or VectorField): Vector to streamline. If
a Mayavi Source, then it must be node centered.
scalars (Mayavi Source, or ScalarField): Optional scalar data.
If a Mayavi Source, then it must be node centered.
**kwargs: Passed to `mayavi.mlab.mesh`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.streamline.Streamline`
"""
v_src, scalars = _prep_vector_source(v_src, scalars)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
sl = mlab.pipeline.streamline(v_src, **kwargs)
apply_cmap(sl, mode='vector', **cmap_kwargs)
apply_cmap(sl, mode='scalar', **cmap_kwargs)
return sl
def iso_surface(src, backface_culling=True, **kwargs):
"""Wraps `mayavi.mlab.pipeline.iso_surface`; mind the caveats
Note that backfaces are culled by default.
Note:
This function will automatically switch to the default
Matplotlib colormap (or the one from your viscidrc file)
If src is a Mayavi source, it must be node centered.
If you call this multiple times with the same
`viscid.field.Field`, you should consider using field2source
yourself and passing the Mayavi source object
Args:
src (Mayavi Source or ScalarField): If src is a ScalarField,
then the field is wrapped into a Mayavi Source and added
to the figure. If a Mayavi Source, then it must be node
centered.
backface_culling (bool): Cull backfaces by default. Useful for
translucent surfaces.
**kwargs: Passed to `mayavi.mlab.pipeline.scalar_cut_plane`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
`mayavi.modules.iso_surface.IsoSurface`
"""
if isinstance(src, viscid.field.Field):
src = field2source(src, center='node')
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
iso = mlab.pipeline.iso_surface(src, **kwargs)
apply_cmap(iso, **cmap_kwargs)
iso.actor.property.backface_culling = backface_culling
return iso
def plot_line(line, scalars=None, **kwargs):
"""Wrap :py:func:`plot_lines` for a single line"""
if scalars is not None:
scalars = [scalars]
return plot_lines([line], scalars=scalars, **kwargs)
def plot_lines(lines, scalars=None, style="tube", figure=None,
name="Lines", tube_radius=0.05, tube_sides=6, **kwargs):
"""Make 3D mayavi plot of lines
Scalars can be a bunch of single values, or a bunch of rgb data
to set the color of each line / vertex explicitly. This is
explained in :py:func:`lines2source`.
Example:
A common use case of setting the line color from a topology
will want to use :py:func:`viscid.topology2color`::
>>> import viscid
>>> from viscid.plot import vlab
>>>
>>> B = viscid.make_dipole()
>>> seeds = viscid.Line([-4, 0, 0], [4, 0, 0])
>>> lines, topology = viscid.calc_streamlines(B, seeds,
>>> ibound=0.05)
>>> scalars = viscid.topology2color(topology)
>>> vlab.plot_lines(lines, scalars, tube_radius=0.02)
>>> vlab.savefig("dipole.x3d")
>>> viscid.meshlab_convert("dipole.x3d", "dae")
>>> vlab.show()
Parameters:
lines (list): See :py:func:`lines2source`
scalars (TYPE): See :py:func:`lines2source`
style (str): 'tube' or 'none'
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
name (str): Description
tube_radius (float): Radius if style == 'tube'
tube_sides (int): Angular resolution if style == 'tube'
**kwargs: passed to :meth:`mayavi.mlab.pipeline.surface`. This
is useful for setting a colormap among other things.
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
Mayavi surface module
Raises:
ValueError: if style is neither tube nor strip
"""
style = style.lower()
if not figure:
figure = mlab.gcf()
src = lines2source(lines, scalars=scalars, name=name)
# always use the stripper since actually turns a collection of line
# segments into a line... that way capping will cap lines, not line
# segments, etc.
lines = mlab.pipeline.stripper(src, figure=figure)
if style == "tube":
lines = mlab.pipeline.tube(lines, figure=figure, tube_radius=tube_radius,
tube_sides=tube_sides)
elif style == "none" or not style:
pass
else:
raise ValueError("Unknown style for lines: {0}".format(style))
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
surface = mlab.pipeline.surface(lines, **kwargs)
apply_cmap(surface, **cmap_kwargs)
return surface
def plot_ionosphere(fld, radius=1.063, figure=None, bounding_lat=0.0,
rotate=None, crd_system="gse", **kwargs):
"""Plot an ionospheric field
Args:
fld (Field): Some spherical (phi, theta) / (lot, lat) field
radius (float): Defaults to 1Re + 400km == 1.063Re
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
bounding_lat (float): Description
rotate (None, sequence, str, datetime64): sequence of length 4
that contains (angle, ux, uy, uz) for the angle and axis of
a rotation, or a UT time as string or datetime64 to rotate
earth to a specific date/time, or a cotr object in
conjunction with crd_system
crd_system (str, other): Used if rotate is datetime-like. Can
be one of ('gse', 'mhd'), or anything that returns from
:py:func:`viscid.as_crd_system`.
**kwargs: passed to :py:func:`mayavi.mlab.mesh`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
No Longer Raises:
ValueError: Description
"""
if figure is None:
figure = mlab.gcf()
fld = viscid.as_spherefield(fld, order=('phi', 'theta'), units='deg')
phil, thetal = fld.xl
phih, thetah = fld.xh
nphi, ntheta = fld.shape
sphere = viscid.Sphere([0, 0, 0], r=radius, ntheta=ntheta, nphi=nphi,
thetalim=(thetal, thetah), philim=(phil, phih),
theta_phi=False)
verts, arr = sphere.wrap_mesh(fld.data)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
if 'name' not in kwargs:
kwargs['name'] = fld.name
m = mlab.mesh(verts[0], verts[1], verts[2], scalars=arr, figure=figure,
**kwargs)
if bounding_lat:
rp = 1.5 * radius
z = radius * np.cos((np.pi / 180.0) * bounding_lat)
clip = mlab.pipeline.data_set_clipper(m.module_manager.parent)
clip.widget.widget.place_widget(-rp, rp, -rp, rp, -z, z)
clip.update_pipeline()
clip.widget.widget.enabled = False
insert_filter(clip, m.module_manager)
# m.module_manager.parent.parent.filter.auto_orient_normals = True
else:
pass
# m.module_manager.parent.filter.auto_orient_normals = True
m.actor.mapper.interpolate_scalars_before_mapping = True
apply_cmap(m, **cmap_kwargs)
m.actor.actor.rotate_z(180)
_apply_rotation(m, 'sm', rotate, crd_system=crd_system)
return m
def plot_nulls(nulls, Acolor=(0.0, 0.263, 0.345), Bcolor=(0.686, 0.314, 0.0),
Ocolor=(0.239, 0.659, 0.557), **kwargs):
kwargs.setdefault('scale_mode', 'none')
kwargs.setdefault('scale_factor', 0.3)
if not isinstance(nulls, dict):
empty = np.ones((3, 0))
nulls = dict(O=[empty, nulls], A=[empty, empty], B=[empty, empty])
Opts = nulls['O'][1]
if Ocolor is not None and Opts.shape[1]:
mlab.points3d(Opts[0], Opts[1], Opts[2], color=Ocolor, name="Onulls",
**kwargs)
Apts = nulls['A'][1]
if Ocolor is not None and Opts.shape[1]:
mlab.points3d(Apts[0], Apts[1], Apts[2], color=Acolor, name="Anulls",
**kwargs)
Bpts = nulls['B'][1]
if Bcolor is not None and Bpts.shape[1]:
mlab.points3d(Bpts[0], Bpts[1], Bpts[2], color=Bcolor, name="Bnulls",
**kwargs)
def fancy_axes(figure=None, target=None, nb_labels=5, xl=None, xh=None,
tight=False, symmetric=False, padding=0.05, opacity=0.7,
face_color=None, line_width=2.0, grid_color=None,
labels=True, label_color=None, label_shadow=True,
consolidate_labels=True):
"""Make axes with 3 shaded walls and a grid similar to matplotlib
Args:
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
target (Mayavi Element): If either xl or xh are not given, then
get that limit from a bounding box around `target`
nb_labels (int, sequence): number of labels in all, or each
(x, y, z) directions
xl (float, sequence): lower corner of axes
xh (float, sequence): upper corner of axes
tight (bool): If False, then let xl and xh expand to make nicer
labels. This uses matplotlib to determine new extrema
symmetric (bool): If True, then xl + xh = 0
padding (float): add padding as a fraction of the total length
opacity (float): opacity of faces
face_color (sequence): color (r, g, b) of faces
line_width (float): Width of grid lines
grid_color (sequence): Color of grid lines
labels (bool): Whether or not to put axis labels on
label_color (sequence): color of axis labels
label_shadow (bool): Add shadows to all labels
consolidate_labels (bool): if all nb_labels are the same, then
only make one axis for the labels
Returns:
VTKDataSource: source to which 2 surfaces and 3 axes belong
"""
if figure is None:
figure = mlab.gcf()
# setup xl and xh
if xl is None or xh is None:
_outline = mlab.outline(target, figure=figure)
if xl is None:
xl = _outline.bounds[0::2]
if xh is None:
xh = _outline.bounds[1::2]
_outline.remove()
nb_labels = np.broadcast_to(nb_labels, (3,))
xl = np.array(np.broadcast_to(xl, (3,)))
xh = np.array(np.broadcast_to(xh, (3,)))
L = xh - xl
xl -= padding * L
xh += padding * L
# now adjust xl and xh to be prettier
if symmetric:
tight = False
if not tight:
from matplotlib.ticker import AutoLocator
for i in range(len(xl)): # pylint: disable=consider-using-enumerate
l = AutoLocator()
l.create_dummy_axis()
l.set_view_interval(xl[i], xh[i])
locs = l()
xl[i] = locs[0]
xh[i] = locs[-1]
dx = (xh - xl) / (nb_labels - 1)
grid = tvtk.ImageData(dimensions=nb_labels, origin=xl, spacing=dx)
src = VTKDataSource(data=grid)
src.name = "fancy_axes"
if face_color is None:
face_color = figure.scene.background
if grid_color is None:
grid_color = figure.scene.foreground
if label_color is None:
label_color = grid_color
face = mlab.pipeline.surface(src, figure=figure, opacity=opacity,
color=face_color)
face.actor.property.frontface_culling = True
if line_width:
grid = mlab.pipeline.surface(src, figure=figure, opacity=1.0,
color=grid_color, line_width=line_width,
representation='wireframe')
grid.actor.property.frontface_culling = True
if labels:
def _make_ax_for_labels(_i, all_axes=False):
if all_axes:
_ax = Axes(name='axes-labels')
else:
_ax = Axes(name='{0}-axis-labels'.format('xyz'[_i]))
# VTK bug... y_axis and z_axis are flipped... how is VTK still
# the de-facto 3d plotting library?
if _i == 0:
_ax.axes.x_axis_visibility = True
_ax.axes.y_axis_visibility = False
_ax.axes.z_axis_visibility = False
elif _i == 1:
_ax.axes.x_axis_visibility = False
_ax.axes.y_axis_visibility = False
_ax.axes.z_axis_visibility = True # VTK bug
elif _i == 2:
_ax.axes.x_axis_visibility = False
_ax.axes.y_axis_visibility = True # VTK bug
_ax.axes.z_axis_visibility = False
else:
raise ValueError()
_ax.property.opacity = 0.0
_ax.axes.number_of_labels = nb_labels[_i]
# import IPython; IPython.embed()
_ax.title_text_property.color = label_color
_ax.title_text_property.shadow = label_shadow
_ax.label_text_property.color = label_color
_ax.label_text_property.shadow = label_shadow
src.add_module(_ax)
if consolidate_labels and np.all(nb_labels[:] == nb_labels[0]):
_make_ax_for_labels(0, all_axes=True)
else:
_make_ax_for_labels(0, all_axes=False)
_make_ax_for_labels(1, all_axes=False)
_make_ax_for_labels(2, all_axes=False)
return src
axes = mlab.axes
xlabel = mlab.xlabel
ylabel = mlab.ylabel
zlabel = mlab.zlabel
title = mlab.title
outline = mlab.outline
orientation_axes = mlab.orientation_axes
view = mlab.view
def _extract_cmap_kwargs(kwargs):
cmap_kwargs = dict()
cmap_kwargs["cmap"] = kwargs.pop("cmap", None)
cmap_kwargs["alpha"] = kwargs.pop("alpha", None)
cmap_kwargs["clim"] = kwargs.pop("clim", None)
cmap_kwargs["symmetric"] = kwargs.pop("symmetric", False)
cmap_kwargs["logscale"] = kwargs.pop("logscale", False)
return kwargs, cmap_kwargs
def colorbar(*args, **kwargs):
"""Wraps mayavi.mlab.colorbar and adjusts cmap if you so choose"""
cmap = kwargs.pop("cmap", False)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
cmap_kwargs.pop("cmap")
ret = mlab.colorbar(*args, **kwargs)
apply_cmap(ret, cmap=cmap, **cmap_kwargs)
return ret
def scalarbar(*args, **kwargs):
"""Wraps mayavi.mlab.scalarbar and adjusts cmap if you so choose"""
cmap = kwargs.pop("cmap", False)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
cmap_kwargs.pop("cmap")
ret = mlab.scalarbar(*args, **kwargs)
apply_cmap(ret, cmap=cmap, **cmap_kwargs)
return ret
def vectorbar(*args, **kwargs):
"""Wraps mayavi.mlab.vectorbar and adjusts cmap if you so choose"""
cmap = kwargs.pop("cmap", False)
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
cmap_kwargs.pop("cmap")
ret = mlab.vectorbar(*args, **kwargs)
apply_cmap(ret, cmap=cmap, **cmap_kwargs)
return ret
def get_cmap(cmap=None, lut=None, symmetric=False):
"""Get a Matplotlib colormap as an rgba ndarray
Args:
cmap (str): name of colormap, or an ndarray of rgb(a) colors
lut (int): number of entries desired in the lookup table
Returns:
ndarray: Nx4 array of N rgba colors
"""
import matplotlib
if symmetric and not cmap:
cmap = matplotlib.rcParams.get("viscid.symmetric_cmap", None)
try:
cm = matplotlib.cm.get_cmap(name=cmap, lut=lut)
rgba = (255 * np.asarray(cm(np.linspace(0, 1, cm.N)))).astype('i')
except TypeError:
rgba = np.asarray(cmap)
if np.all(rgba >= 0.0) and np.all(rgba <= 1.0):
rgba = (255 * rgba).astype('i')
else:
rgba = rgba.astype('i')
if np.any(rgba < 0) or np.any(rgba > 255):
raise ValueError("cmap ndarray must have color values between "
"0 and 255 or 0.0 and 1.0")
if rgba.shape[1] not in (3, 4) and rgba.shape[0] in (3, 4):
rgba = np.array(rgba.T)
if rgba.shape[1] == 3:
rgba = np.hstack([rgba, 255 * np.ones_like(rgba[:, :1])])
return rgba
def apply_cmap(target, cmap=None, lut=None, alpha=None, mode='scalar',
clim=None, symmetric=False, logscale=False):
"""Apply a Matplotlib colormap to a Mayavi object & adjust limits
Args:
target: Some Mayavi object on mode to apply the colormap
cmap (sequence, None, False): name of a Matplotlib colormap, or
a sequence of rgb(a) colors, or None to use the default,
or False to leave the colormap alone.
lut (int): number of entries desired in the lookup table
alpha (number, sequence): scalar or array that sets the alpha
(opacity) channel in the range [0..255]. This is expanded
to both ends of the colormap using linear interpolation,
i.e., [0, 255] will be a linear ramp from transparent to
opaque over the whole colormap.
mode (str): one of 'scalar', 'vector', or 'other'
clim (sequence): contains (vmin, vmax) for color scale
symmetric (bool): force the limits on the colorbar to be
symmetric around 0, and if no `cmap` is given, then also
use the default symmetric colormap
logscale (bool): Use a logarithmic color scale
Raises:
AttributeError: Description
ValueError: Description
"""
mode = mode.strip().lower()
# get the mayavi lut object
try:
if mode == "scalar":
mvi_lut = target.module_manager.scalar_lut_manager.lut
elif mode == "vector":
mvi_lut = target.module_manager.vector_lut_manager.lut
else:
if mode != "other":
raise ValueError("mode should be 'scalar', 'vector', or "
"'other'; not '{0}'".format(mode))
raise AttributeError()
except AttributeError:
mvi_lut = target.lut
# set the limits on the colorbar
if isinstance(clim, (list, tuple)):
mvi_lut.range = [clim[0], clim[1]]
elif clim == 0:
symmetric = True
elif clim:
symmetric = clim
if logscale and symmetric:
viscid.logger.warn("logscale and symmetric are mutually exclusive;"
"ignoring symmetric.")
if logscale:
mvi_lut.scale = 'log10'
elif symmetric:
# float(True) -> 1
val = float(symmetric) * np.max(np.abs(mvi_lut.range))
mvi_lut.range = [-val, val]
vmin, vmax = mvi_lut.range
is_symmetric = bool(np.isclose(vmax, -1 * vmin, atol=0))
# now set the colormap
changed = False
if cmap is False:
rgba = None if alpha is None else mvi_lut.table.to_array()
else:
rgba = get_cmap(cmap=cmap, lut=lut, symmetric=is_symmetric)
changed = True
if alpha is not None:
alpha = np.asarray(alpha).reshape(-1)
rgba[:, -1] = np.interp(np.linspace(0, 1, len(rgba)),
np.linspace(0, 1, len(alpha)), alpha)
changed = True
if changed:
mvi_lut.table = rgba
def insert_filter(filtr, module_manager):
"""Insert a filter above an existing module_manager
Args:
filter (TYPE): Description
module_manager (TYPE): Description
"""
filtr.parent.children.remove(module_manager)
filtr.children.append(module_manager)
def _apply_rotation(obj, from_system, rotate=None, crd_system='gse'):
if hasattr(rotate, "get_rotation_wxyz"):
rotate = rotate.get_rotation_wxyz(from_system, crd_system)
else:
cotr = viscid.as_cotr(rotate)
rotate = cotr.get_rotation_wxyz(from_system, crd_system)
if len(rotate) != 4:
raise ValueError("Rotate should be [angle, ux, uy, uz], got {0}"
"".format(rotate))
obj.actor.actor.rotate_wxyz(*rotate)
def plot_blue_marble(r=1.0, figure=None, nphi=128, ntheta=64, map_style=None,
lines=False, res=2, rotate=None, crd_system='gse'):
"""Plot Earth using the Natural Earth dataset maps
Args:
r (float): radius of earth
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
nphi (int): phi resolution of Earth's mesh
ntheta (int): theta resolution of Earth's mesh
map_style (str): Nothing for standard map, or 'faded'
lines (bool): Whether or not to show equator, tropics,
arctic circles, and a couple meridians.
res (int): Resolution in thousands of pixels longitude (must
be one of 1, 2, 4, 8)
rotate (None, sequence, str, datetime64): sequence of length 4
that contains (angle, ux, uy, uz) for the angle and axis of
a rotation, or a UT time as string or datetime64 to rotate
earth to a specific date/time, or a cotr object in
conjunction with crd_system
crd_system (str, other): Used if rotate is datetime-like. Can
be one of ('gse', 'mhd'), or anything that returns from
:py:func:`viscid.as_crd_system`.
Returns:
(VTKDataSource, mayavi.modules.surface.Surface)
"""
# make a plane, then deform it into a sphere
eps = 1e-4
ps = tvtk.PlaneSource(origin=(r, r * np.pi - eps, r * 0.0),
point1=(r, r * np.pi - eps, r * 2 * np.pi),
point2=(r, eps, 0.0),
x_resolution=nphi,
y_resolution=ntheta)
ps.update()
transform = tvtk.SphericalTransform()
tpoly = tvtk.TransformPolyDataFilter(transform=transform,
input_connection=ps.output_port)
tpoly.update()
src = VTKDataSource(data=tpoly.output, name="blue_marble")
surf = mlab.pipeline.surface(src)
# now load a jpg, and use it to texture the sphere
linestr = '_lines' if lines else ''
assert map_style in (None, '', 'faded')
assert res in (1, 2, 4, 8)
map_style = '_{0}'.format(map_style) if map_style else ''
img_name = "images/earth{0}{1}_{2}k.jpg".format(map_style, linestr, res)
fname = os.path.realpath(os.path.dirname(__file__) + '/' + img_name)
img = tvtk.JPEGReader(file_name=fname)
texture = tvtk.Texture(input_connection=img.output_port, interpolate=1)
surf.actor.enable_texture = True
surf.actor.texture = texture
surf.actor.property.color = (1.0, 1.0, 1.0)
# rotate 180deg b/c i can't rotate the texture to make the prime meridian
surf.actor.actor.rotate_z(180)
_apply_rotation(surf, 'geo', rotate, crd_system=crd_system)
add_source(src, figure=figure)
return src, surf
plot_natural_earth = plot_blue_marble
def plot_earth_3d(figure=None, daycol=(1, 1, 1), nightcol=(0, 0, 0),
radius=1.0, res=24, crd_system="gse", night_only=False,
**kwargs):
"""Plot a black and white sphere (Earth) showing sunward direction
Parameters:
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
daycol (tuple, optional): color of dayside (RGB)
nightcol (tuple, optional): color of nightside (RGB)
res (optional): rosolution of teh sphere
crd_system (str, other): One of ('mhd', 'gse'), or anything
that returns from :py:func:`viscid.as_crd_system`.
Returns:
Tuple (day, night) as vtk sources
"""
if figure is None:
figure = mlab.gcf()
crd_system = viscid.as_crd_system(crd_system)
if crd_system == "mhd":
theta_dusk, theta_dawn = 270, 90
elif crd_system == "gse":
theta_dusk, theta_dawn = 90, 270
else:
# use GSE convention?
theta_dusk, theta_dawn = 90, 270
night = BuiltinSurface(source='sphere', name='night')
night.data_source.set(center=(0, 0, 0), radius=radius,
start_theta=theta_dusk, end_theta=theta_dawn,
theta_resolution=res, phi_resolution=res)
mod = mlab.pipeline.surface(night, color=nightcol, figure=figure, **kwargs)
mod.actor.property.backface_culling = True
if not night_only:
day = BuiltinSurface(source='sphere', name='day')
day.data_source.set(center=(0, 0, 0), radius=radius,
start_theta=theta_dawn, end_theta=theta_dusk,
theta_resolution=res, phi_resolution=res)
mod = mlab.pipeline.surface(day, color=daycol, figure=figure, **kwargs)
mod.actor.property.backface_culling = True
else:
day = None
return day, night
def to_mpl(figure=None, ax=None, size=None, antialiased=True, hide=True,
fit=None, **kwargs):
"""Display a mayavi figure inline in an Jupyter Notebook.
This function takes a screenshot of a figure and blits it to a matplotlib
figure using matplotlib.pyplot.imshow()
Args:
figure: A mayavi figure, if not specified, uses mlab.gcf()
ax: Matplotlib axis of the destination (plt.gca() if None)
size (None, tuple): if given, resize the scene in pixels (x, y)
antialiased (bool): Antialias mayavi plot
hide (bool): if True, try to hide the render window
fit (None, bool): Resize mpl window to fit image exactly. If
None, then fit if figure does not currently exist.
**kwargs: passed to mayavi.mlab.screenshot()
"""
if figure is None:
figure = mlab.gcf()
if size is not None:
resize(size, figure=figure)
pixmap = mlab.screenshot(figure, antialiased=antialiased, **kwargs)
# try to hide the window... Qt backend only
if hide:
hide_window(figure)
if ax is None:
from matplotlib import pyplot as plt
# if there are no figures, and fit is None, then fit
if fit is None:
fit = not bool(plt.get_fignums)
ax = plt.gca()
if fit:
pltfig = ax.figure
dpi = pltfig.get_dpi()
pltfig.set_size_inches([s / dpi for s in figure.scene.get_size()],
forward=True)
pltfig.subplots_adjust(top=1, bottom=0, left=0, right=1,
hspace=0, wspace=0)
ax.imshow(pixmap)
ax.axis('off')
def figure(*args, **kwargs):
offscreen = kwargs.pop('offscreen', False)
hide = kwargs.pop('hide', None)
fig = mlab.figure(*args, **kwargs)
# if size was set, run resize to account for the height of window
# decorations
if 'size' in kwargs:
resize(kwargs['size'], figure=fig)
# hide window by default?
if hide or (hide is None and offscreen):
hide_window(fig)
# send it offscreen?
if offscreen:
make_fig_offscreen(fig, hide=False)
return fig
def make_fig_offscreen(figure, hide=True):
if hide:
hide_window(figure)
figure.scene.off_screen_rendering = True
return figure
def show(stop=False):
"""Calls :meth:`mayavi.mlab.show(stop=stop)`"""
mlab.show(stop=stop)
def clf(figure=None):
"""Clear source data, then clear figure
Args:
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
"""
if not figure:
figure = mlab.gcf()
clear_data(figure)
mlab.clf(figure)
def remove_source(src):
"""Safely remove a specific vtk source
Args:
src (vtk_data_source): vtk data source to remove
"""
src.stop()
try:
try:
src.data.release_data()
except TraitError:
src.data.release_data_flag = 1
src.cell_scalars_name = ''
src.cell_tensors_name = ''
src.cell_vectors_name = ''
src.point_scalars_name = ''
src.point_tensors_name = ''
src.point_vectors_name = ''
except AttributeError:
pass
src.start()
src.stop()
src.remove()
def clear_data(figures=None):
"""Workaround for Mayavi / VTK memory leak
This is needed when Mayavi/VTK keeps a reference to source data
when you would expect it to be freed like on a call to `mlab.clf()`
or when removing sources from the pipeline.
Note:
This must be called when the pipeline still has the source, so
before a call to `mlab.clf()`, etc.
1. Set release_data_flag on all sources' data
2. Remove reference to the data
3. Remove the data source
Args:
figures (None, mayavi.core.scene.Scene, or 'all'): if None,
gets current scene; if Scene object, just that one; if
'all', act on all scenes in the current engine. Can also be
a list of Scene objects
"""
if figures is None:
figures = [mlab.gcf()]
elif figures == "all":
figures = mlab.get_engine().scenes
if not isinstance(figures, (list, tuple)):
figures = [figures]
if all(fig is None for fig in figures):
return
for fig in figures:
# # fig stop / start kills mayavi now, not sure why
# fig.stop()
for child in list(fig.children):
remove_source(child)
# fig.start()
return
def resize(size, figure=None):
"""Summary
Args:
size (tuple): width, height in pixels
figure (mayavi.core.scene.Scene): specific figure, or None for
:py:func:`mayavi.mlab.gcf`
Returns:
None
"""
if figure is None:
figure = mlab.gcf()
try:
# scene.set_size doesn't seem to work when rendering on screen, so
# go into the backend and do it by hand
if mlab.options.offscreen:
figure.scene.set_size(size)
elif figure.scene.off_screen_rendering:
viscid.logger.warn("viscid.plot.vlab.resize doesn't work for "
"figures that are off-screened this way. Try "
"creating the figure with viscid.plot.vlab."
"figure(size=(w, h), offscreen=True)")
else:
toolkit = mayavi.ETSConfig.toolkit
if toolkit == 'qt4':
sc = figure.scene
window_height = sc.control.parent().size().height()
render_height = sc.render_window.size[1]
h = window_height - render_height
sc.control.parent().resize(size[0], size[1] + h)
elif toolkit == 'wx':
w, h = size[0], size[1]
figure.scene.control.Parent.Parent.SetClientSizeWH(w, h)
else:
viscid.logger.warn("Unknown mayavi backend {0} (not qt4 or "
"wx); not resizing.".format(toolkit))
except Exception as e: # pylint: disable=broad-except
viscid.logger.warn("Resize didn't work:: {0}".format(repr(e)))
def hide_window(figure, debug=False):
"""Try to hide the window; only does something on Qt backend"""
try:
# fig.scene.control.parent().hide()
figure.scene.control.parent().showMinimized()
except Exception as e: # pylint: disable=broad-except,unused-variable
if debug:
print("Window hide didn't work::", repr(e))
def savefig(*args, **kwargs):
"""Wrap mayavi.mlab.savefig with offscreen hack"""
fig = mlab.gcf()
prev_offscreen_state = fig.scene.off_screen_rendering
if sys.platform != "darwin":
fig.scene.off_screen_rendering = True
mlab.savefig(*args, **kwargs)
if fig.scene.off_screen_rendering != prev_offscreen_state:
fig.scene.off_screen_rendering = prev_offscreen_state
def interact(stack_depth=0, **kwargs):
viscid.vutil.interact(stack_depth=stack_depth + 1, mvi_ns=True, **kwargs)
plot3d_lines = plot_lines
plot_lines3d = plot_lines
##
## EOF
##
|
StarcoderdataPython
|
11286640
|
<reponame>surya501/usdc-system-integration
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import rospy
import datetime
CONFIDENCE_CUTOFF = 0.5
class TLClassifier(object):
def __init__(self, model=None):
#TODO load classifier
self.GRAPH_FILE = model
# load the graph one time
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.GRAPH_FILE, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.sess = tf.Session(graph=self.detection_graph)
def filter_boxes(self, min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
with self.detection_graph.as_default():
image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)
start = datetime.datetime.now()
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes],
feed_dict={self.image_tensor: image_np})
end = datetime.datetime.now()
# rospy.logwarn("detection time: {0}".format((end-start).total_seconds()))
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
# Filter boxes with a confidence score less than `confidence_cutoff`
boxes, scores, classes = self.filter_boxes(CONFIDENCE_CUTOFF, boxes, scores, classes)
if len(classes) > 0:
# rospy.logwarn("matches {0} and class: {1}".format(len(classes), classes[0]))
if classes[0] == 1:
return TrafficLight.GREEN
elif classes[0] == 2:
return TrafficLight.RED
elif classes[0] == 3:
return TrafficLight.YELLOW
else:
return TrafficLight.UNKNOWN
else:
return TrafficLight.UNKNOWN
|
StarcoderdataPython
|
3208471
|
import geopandas as gpd
import oggm
from oggm import cfg, tasks
from oggm.utils import get_demo_file
# Set up the input data for this example
cfg.initialize()
cfg.PATHS['working_dir'] = oggm.utils.get_temp_dir('oggmcontrib_inv')
cfg.PATHS['dem_file'] = get_demo_file('srtm_oetztal.tif')
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
# Glacier directory for Hintereisferner in Austria
entity = gpd.read_file(get_demo_file('Hintereisferner_RGI5.shp')).iloc[0]
gdir = oggm.GlacierDirectory(entity)
# The usual OGGM preprecessing
tasks.define_glacier_region(gdir, entity=entity)
tasks.glacier_masks(gdir)
|
StarcoderdataPython
|
5008213
|
#!/usr/bin/env python3
from bisect import bisect_left
import math
class SummaryRanges:
def __init__(self):
self.I = [[-math.inf, -math.inf], [math.inf, math.inf]]
def addNum(self, val):
ix = bisect_left(self.I, [val, val])
print(f'val = {val}, ix = {ix}')
if self.I[ix-1][1]+1 == val == self.I[ix][0]-1:
self.I[ix-1][1] = self.I[ix][1]
self.I.pop(ix)
elif self.I[ix-1][1]+1 == val:
self.I[ix-1][1] = val
elif val == self.I[ix][0]-1:
self.I[ix][0] = val
elif self.I[ix-1][1] < val < self.I[ix][0]:
self.I.insert(ix, [val, val])
def getIntervals(self):
return self.I[1:-1]
l = [1,3,7]
l = [1,3,2]
l = [1,3,7,5]
l = [1,3,7,2,6]
l = [6,6,0,4,8,7,6,4,7,5]
l = [49,97,53,5,33,65,62,51,100,38,61,45,74,27,64,17,36,17,96,12,79,32,68,90,77,18,39,12,93,9,87,42,60,71,12,45,55,40,78,81,26,70,61]
l = [49,97,53,5,33,65,62,51,100,38,61,45,74,27,64,17,36,17,96,12,79,32,68,90,77,18,39,12,93,9,87,42,60,71,12,45,55,40,78,81,26,70,61,56,66,33,7,70,1,11,92,51,90,100,85,80,0,78,63,42,31,93,41,90,8,24,72,28,30,18,69,57,11,10,40,65,62,13,38,70,37,90,15,70,42,69,26,77,70,75,36,56,11,76,49,40,73,30,37,23]
sol = SummaryRanges()
for i in l:
sol.addNum(i)
print(sol.getIntervals())
|
StarcoderdataPython
|
5013816
|
import jwt
from datetime import datetime, timedelta
from hashlib import md5
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from helloworld import app, db
from helloworld import login
@login.user_loader
def load_user(id):
return User.query.get(int(id))
subscriptions = db.Table('subscriptions',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('course_id', db.Integer, db.ForeignKey('course.id'), primary_key=True)
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(128), index=True, unique=True)
password_hash = db.Column(db.String(128))
last_seen = db.Column(db.DateTime, default=datetime.utcnow())
courses = db.relationship('Course', secondary=subscriptions,
backref=db.backref('course_participant', lazy=True), lazy='dynamic')
def get_reset_password_token(self, expires_in=600):
return jwt.encode(
{'reset_password': self.id, 'exp': datetime.now() + timedelta(seconds=expires_in)},
app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
@staticmethod
def verify_reset_password_token(token):
try:
id = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])['reset_password']
except jwt.JWTError as e:
app.logger.error('Decoding error: {}'.format(e))
return
return User.query.get(id)
def avatar(self, size=128):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(digest, size)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def subscribe(self, course):
self.courses.append(course)
def unsubscribe(self, course):
self.courses.remove(course)
def __repr__(self):
return '<User {}>'.format(self.username)
class Course(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(140))
description = db.Column(db.String(140), index=True)
subscribers = db.relationship('User', secondary=subscriptions,
backref=db.backref('subscribed_courses', lazy=True), lazy='dynamic')
def __repr__(self):
return '<Course {}>'.format(self.title)
|
StarcoderdataPython
|
251279
|
<filename>pythonspain/partners/migrations/0007_notice_kind.py
# Generated by Django 3.1.7 on 2021-02-28 13:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("partners", "0006_auto_20200627_1526"),
]
operations = [
migrations.AddField(
model_name="notice",
name="kind",
field=models.CharField(
choices=[("late", "Late"), ("annual", "Annual")],
default="late",
max_length=16,
verbose_name="kind",
),
),
]
|
StarcoderdataPython
|
3473831
|
import boto3
client = boto3.client('s3')
Buckets = client.list_buckets()
for bucket in Buckets:
response = client.put_bucket_encryption(Bucket= bucket, ContentMD5='ndah2', ServerSideEncryptionConfiguration={'Rules': [{'ApplyServerSideEncryptionByDefault': {'SSEAlgorithm': 'AES256'}},]})
|
StarcoderdataPython
|
3272543
|
<filename>src/electionguard_cli/e2e/e2e_command.py
from io import TextIOWrapper
import click
from ..cli_steps import (
ElectionBuilderStep,
DecryptStep,
PrintResultsStep,
TallyStep,
KeyCeremonyStep,
EncryptVotesStep,
)
from .e2e_input_retrieval_step import E2eInputRetrievalStep
from .submit_votes_step import SubmitVotesStep
from .e2e_publish_step import E2ePublishStep
@click.command("e2e")
@click.option(
"--guardian-count",
prompt="Number of guardians",
help="The number of guardians that will participate in the key ceremony and tally.",
type=click.INT,
)
@click.option(
"--quorum",
prompt="Quorum",
help="The minimum number of guardians required to show up to the tally.",
type=click.INT,
)
@click.option(
"--manifest",
prompt="Manifest file",
help="The location of an election manifest.",
type=click.File(),
)
@click.option(
"--ballots",
prompt="Ballots file or directory",
help="The location of a file or directory that contains plaintext ballots.",
type=click.Path(exists=True, dir_okay=True, file_okay=True),
)
@click.option(
"--spoil-id",
prompt="Object-id of ballot to spoil",
help="The object-id of a ballot within the ballots file to spoil.",
type=click.STRING,
default=None,
prompt_required=False,
)
@click.option(
"--output-record",
help="A file name for saving an output election record (e.g. './election.zip')."
+ " If no value provided then an election record will not be generated.",
type=click.Path(
exists=False,
dir_okay=False,
file_okay=True,
),
default=None,
)
@click.option(
"--output-keys",
help="A directory for saving the private and public guardian keys (e.g. './guardian-keys')."
+ " If no value provided then no keys will be output.",
type=click.Path(exists=False, dir_okay=True, file_okay=False, resolve_path=True),
default=None,
)
def E2eCommand(
guardian_count: int,
quorum: int,
manifest: TextIOWrapper,
ballots: str,
spoil_id: str,
output_record: str,
output_keys: str,
) -> None:
"""Runs through an end-to-end election."""
# get user inputs
election_inputs = E2eInputRetrievalStep().get_inputs(
guardian_count, quorum, manifest, ballots, spoil_id, output_record, output_keys
)
# perform election
joint_key = KeyCeremonyStep().run_key_ceremony(election_inputs.guardians)
build_election_results = ElectionBuilderStep().build_election_with_key(
election_inputs, joint_key
)
encrypt_results = EncryptVotesStep().encrypt(
election_inputs.ballots, build_election_results
)
data_store = SubmitVotesStep().submit(
election_inputs, build_election_results, encrypt_results
)
(ciphertext_tally, spoiled_ballots) = TallyStep().get_from_ballot_store(
build_election_results, data_store
)
decrypt_results = DecryptStep().decrypt(
ciphertext_tally,
spoiled_ballots,
election_inputs.guardians,
build_election_results,
election_inputs.manifest,
)
# print results
PrintResultsStep().print_election_results(decrypt_results)
# publish election record
E2ePublishStep().export(
election_inputs,
build_election_results,
encrypt_results,
decrypt_results,
data_store,
)
|
StarcoderdataPython
|
244785
|
<filename>Terrain.py
import pygame.sprite
class Terrain(pygame.sprite.Sprite):
all = pygame.sprite.Group()
def __init__(self, x, y, width, height):
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill((0, 0, 0))
self.rect = self.image.get_rect(x=x, y=y)
self.all.add(self)
|
StarcoderdataPython
|
142804
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 4 17:44:53 2021
@author: mlampert
"""
import os
import copy
#FLAP imports and settings
import flap
import flap_nstx
import flap_mdsplus
flap_nstx.register('NSTX_GPI')
flap_nstx.register('NSTX_THOMSON')
flap_mdsplus.register('NSTX_MDSPlus')
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,"../flap_nstx.cfg")
flap.config.read(file_name=fn)
#Scientific imports
import numpy as np
def get_nstx_thomson_gradient(exp_id=None,
pressure=False,
temperature=False,
density=False,
r_pos=None,
spline_data=False,
output_name=None,
device_coordinates=True,
flux_coordinates=False):
#Data is RADIUS x TIME
if pressure+density+temperature != 1:
raise ValueError('Only one of the inputs should be set (pressure, temperature, density)!')
if device_coordinates+flux_coordinates !=1:
raise ValueError('Either device_coordinates or flux_coordinates can be set, not both.')
# thomson=flap_nstx_thomson_data(exp_id,
# pressure=pressure,
# temperature=temperature,
# density=density,
# output_name='THOMSON_FOR_GRADIENT')
thomson=flap.get_data('NSTX_THOMSON',
exp_id=exp_id,
object_name='THOMSON_FOR_GRADIENT',
options={'pressure':pressure,
'temperature':temperature,
'density':density,
'force_mdsplus':True})
# thomson_spline=flap_nstx_thomson_data(exp_id,
# pressure=pressure,
# temperature=temperature,
# density=density,
# spline_data=True,
# output_name=None)
thomson_spline=flap.get_data('NSTX_THOMSON',
exp_id=exp_id,
object_name=None,
options={'pressure':pressure,
'temperature':temperature,
'density':density,
'spline_data':True,
'force_mdsplus':True})
if device_coordinates:
radial_coordinate=thomson.coordinate('Device R')[0][:,0]
spline_radial_coordinate=thomson_spline.coordinate('Device R')[0][:,0]
if flux_coordinates:
radial_coordinate=thomson.coordinate('Flux r')[0][:,0]
spline_radial_coordinate=thomson_spline.coordinate('Flux r')[0][:,0]
time_vector=thomson.coordinate('Time')[0][0,:]
data=thomson.data
error=thomson.error
interp_data=thomson_spline.data
#Calculation of the numerical gradient and interpolating the values for the given r_pos
data_gradient=np.asarray([(data[2:,i]-data[:-2,i])/(2*(radial_coordinate[2:]-radial_coordinate[:-2])) for i in range(len(time_vector))]).T
data_gradient_error=np.asarray([(np.abs(error[2:,i])+np.abs(error[:-2,i]))/(2*(radial_coordinate[2:]-radial_coordinate[:-2])) for i in range(len(time_vector))]).T
interp_data_gradient=np.asarray([(interp_data[2:,i]-interp_data[:-2,i])/(2*(spline_radial_coordinate[2:]-spline_radial_coordinate[:-2])) for i in range(len(time_vector))]).T
#Interpolation for the r_pos
if r_pos is not None:
r_pos_gradient=np.asarray([np.interp(r_pos, radial_coordinate[1:-1], data_gradient[:,i]) for i in range(len(time_vector))])
r_pos_gradient_spline=np.asarray([np.interp(r_pos, spline_radial_coordinate[1:-1], interp_data_gradient[:,i]) for i in range(len(time_vector))])
ind_r=np.argmin(np.abs(radial_coordinate[1:-1]-r_pos))
if radial_coordinate[ind_r] < r_pos:
R1=radial_coordinate[1:-1][ind_r]
R2=radial_coordinate[1:-1][ind_r+1]
ind_R1=ind_r
ind_R2=ind_r+1
else:
R1=radial_coordinate[1:-1][ind_r-1]
R2=radial_coordinate[1:-1][ind_r]
ind_R1=ind_r-1
ind_R2=ind_r
#Result of error propagation (basically average biased error between the two neighboring radii)
r_pos_gradient_error=np.abs((r_pos-R1)/(R2-R1))*data_gradient_error[ind_R2,:]+\
np.abs((r_pos-R2)/(R2-R1))*data_gradient_error[ind_R1,:]
coord = []
coord.append(copy.deepcopy(flap.Coordinate(name='Time',
unit='s',
mode=flap.CoordinateMode(equidistant=True),
start=time_vector[0],
step=time_vector[1]-time_vector[0],
#shape=time_arr.shape,
dimension_list=[0]
)))
coord.append(copy.deepcopy(flap.Coordinate(name='Sample',
unit='n.a.',
mode=flap.CoordinateMode(equidistant=True),
start=0,
step=1,
dimension_list=[0]
)))
if device_coordinates:
grad_unit='/m'
if flux_coordinates:
grad_unit='/psi'
if pressure:
data_unit = flap.Unit(name='Pressure gradient',unit='kPa'+grad_unit)
elif temperature:
data_unit = flap.Unit(name='Temperature gradient',unit='keV'+grad_unit)
elif density:
data_unit = flap.Unit(name='Density gradient',unit='m-3'+grad_unit)
if not spline_data:
d = flap.DataObject(exp_id=exp_id,
data_array=r_pos_gradient,
error=r_pos_gradient_error,
data_unit=data_unit,
coordinates=coord,
data_title='NSTX Thomson gradient')
else:
d = flap.DataObject(exp_id=exp_id,
data_array=r_pos_gradient_spline,
data_unit=data_unit,
coordinates=coord,
data_title='NSTX Thomson gradient spline')
else:
coord = []
coord.append(copy.deepcopy(flap.Coordinate(name='Time',
unit='s',
mode=flap.CoordinateMode(equidistant=True),
start=time_vector[0],
step=time_vector[1]-time_vector[0],
#shape=time_arr.shape,
dimension_list=[1]
)))
coord.append(copy.deepcopy(flap.Coordinate(name='Sample',
unit='n.a.',
mode=flap.CoordinateMode(equidistant=True),
start=0,
step=1,
dimension_list=[1]
)))
if pressure:
data_unit = flap.Unit(name='Pressure gradient',unit='kPa/m')
elif temperature:
data_unit = flap.Unit(name='Temperature gradient',unit='keV/m')
elif density:
data_unit = flap.Unit(name='Density gradient',unit='m-3/m')
if device_coordinates:
radial_coordinate_name='Device R'
radial_unit='m'
if flux_coordinates:
radial_coordinate_name='Flux r'
radial_unit=''
if not spline_data:
coord.append(copy.deepcopy(flap.Coordinate(name=radial_coordinate_name,
unit=radial_unit,
mode=flap.CoordinateMode(equidistant=False),
values=radial_coordinate[1:-1],
shape=radial_coordinate[1:-1].shape,
dimension_list=[0]
)))
d = flap.DataObject(exp_id=exp_id,
data_array=data_gradient,
error=data_gradient_error,
data_unit=data_unit,
coordinates=coord,
data_title='NSTX Thomson gradient')
else:
coord.append(copy.deepcopy(flap.Coordinate(name=radial_coordinate_name,
unit=radial_unit,
mode=flap.CoordinateMode(equidistant=False),
values=spline_radial_coordinate[1:-1],
shape=spline_radial_coordinate[1:-1].shape,
dimension_list=[0]
)))
d = flap.DataObject(exp_id=exp_id,
data_array=interp_data_gradient,
data_unit=data_unit,
coordinates=coord,
data_title='NSTX Thomson gradient spline')
if output_name is not None:
flap.add_data_object(d,output_name)
return d
|
StarcoderdataPython
|
1689698
|
#!/usr/bin/env python3
# This file is part of the MicroPython project, http://micropython.org/
# The MIT License (MIT)
# Copyright (c) 2019 <NAME>
import os
import subprocess
import sys
import argparse
sys.path.append('../tools')
import pyboard
# Paths for host executables
CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3')
MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../ports/unix/micropython_coverage')
NATMOD_EXAMPLE_DIR = '../examples/natmod/'
# Supported tests and their corresponding mpy module
TEST_MAPPINGS = {
'btree': 'btree/btree_$(ARCH).mpy',
'framebuf': 'framebuf/framebuf_$(ARCH).mpy',
'uheapq': 'uheapq/uheapq_$(ARCH).mpy',
'urandom': 'urandom/urandom_$(ARCH).mpy',
'ure': 'ure/ure_$(ARCH).mpy',
'uzlib': 'uzlib/uzlib_$(ARCH).mpy',
}
# Code to allow a target MicroPython to import an .mpy from RAM
injected_import_hook_code = """\
import sys, uos, uio
class __File(uio.IOBase):
def __init__(self):
self.off = 0
def ioctl(self, request, arg):
return 0
def readinto(self, buf):
buf[:] = memoryview(__buf)[self.off:self.off + len(buf)]
self.off += len(buf)
return len(buf)
class __FS:
def mount(self, readonly, mkfs):
pass
def chdir(self, path):
pass
def stat(self, path):
if path == '__injected.mpy':
return tuple(0 for _ in range(10))
else:
raise OSError(-2) # ENOENT
def open(self, path, mode):
return __File()
uos.mount(__FS(), '/__remote')
uos.chdir('/__remote')
sys.modules['{}'] = __import__('__injected')
"""
class TargetSubprocess:
def __init__(self, cmd):
self.cmd = cmd
def close(self):
pass
def run_script(self, script):
try:
p = subprocess.run(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input=script)
return p.stdout, None
except subprocess.CalledProcessError as er:
return b'', er
class TargetPyboard:
def __init__(self, pyb):
self.pyb = pyb
self.pyb.enter_raw_repl()
def close(self):
self.pyb.exit_raw_repl()
self.pyb.close()
def run_script(self, script):
try:
self.pyb.enter_raw_repl()
output = self.pyb.exec_(script)
output = output.replace(b'\r\n', b'\n')
return output, None
except pyboard.PyboardError as er:
return b'', er
def run_tests(target_truth, target, args, stats):
for test_file in args.files:
# Find supported test
for k, v in TEST_MAPPINGS.items():
if test_file.find(k) != -1:
test_module = k
test_mpy = v.replace('$(ARCH)', args.arch)
break
else:
print('---- {} - no matching mpy'.format(test_file))
continue
# Read test script
with open(test_file, 'rb') as f:
test_file_data = f.read()
# Create full test with embedded .mpy
try:
with open(NATMOD_EXAMPLE_DIR + test_mpy, 'rb') as f:
test_script = b'__buf=' + bytes(repr(f.read()), 'ascii') + b'\n'
except OSError:
print('---- {} - mpy file not compiled'.format(test_file))
continue
test_script += bytes(injected_import_hook_code.format(test_module), 'ascii')
test_script += test_file_data
# Run test under MicroPython
result_out, error = target.run_script(test_script)
# Work out result of test
extra = ''
if error is None and result_out == b'SKIP\n':
result = 'SKIP'
elif error is not None:
result = 'FAIL'
extra = ' - ' + str(error)
else:
# Check result against truth
try:
with open(test_file + '.exp', 'rb') as f:
result_exp = f.read()
error = None
except OSError:
result_exp, error = target_truth.run_script(test_file_data)
if error is not None:
result = 'TRUTH FAIL'
elif result_out != result_exp:
result = 'FAIL'
print(result_out)
else:
result = 'pass'
# Accumulate statistics
stats['total'] += 1
if result == 'pass':
stats['pass'] += 1
elif result == 'SKIP':
stats['skip'] += 1
else:
stats['fail'] += 1
# Print result
print('{:4} {}{}'.format(result, test_file, extra))
def main():
cmd_parser = argparse.ArgumentParser(description='Run dynamic-native-module tests under MicroPython')
cmd_parser.add_argument('-p', '--pyboard', action='store_true', help='run tests via pyboard.py')
cmd_parser.add_argument('-d', '--device', default='/dev/ttyACM0', help='the device for pyboard.py')
cmd_parser.add_argument('-a', '--arch', default='x64', help='native architecture of the target')
cmd_parser.add_argument('files', nargs='*', help='input test files')
args = cmd_parser.parse_args()
target_truth = TargetSubprocess([CPYTHON3])
if args.pyboard:
target = TargetPyboard(pyboard.Pyboard(args.device))
else:
target = TargetSubprocess([MICROPYTHON])
stats = {'total': 0, 'pass': 0, 'fail':0, 'skip': 0}
run_tests(target_truth, target, args, stats)
target.close()
target_truth.close()
print('{} tests performed'.format(stats['total']))
print('{} tests passed'.format(stats['pass']))
if stats['fail']:
print('{} tests failed'.format(stats['fail']))
if stats['skip']:
print('{} tests skipped'.format(stats['skip']))
if stats['fail']:
sys.exit(1)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11329185
|
<reponame>JaredAllen13/insightconnect-plugins
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Receive events based on criteria"
class Input:
EVENT_ID = "event_id"
class Output:
ERRORS = "errors"
EVENT = "event"
MESSAGE = "message"
class FindEventInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"event_id": {
"type": "string",
"title": "Event ID",
"description": "Event ID e.g. 123",
"order": 1
}
},
"required": [
"event_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class FindEventOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"errors": {
"type": "array",
"title": "Errors",
"description": "Errors",
"items": {
"type": "string"
},
"order": 3
},
"event": {
"$ref": "#/definitions/Event",
"title": "Event",
"description": "Event",
"order": 1
},
"message": {
"type": "string",
"title": "Message",
"description": "Message",
"order": 2
}
},
"definitions": {
"Event": {
"type": "object",
"title": "Event",
"properties": {
"Org": {
"$ref": "#/definitions/SharingGroup",
"title": "Org",
"order": 16
},
"Orgc": {
"$ref": "#/definitions/SharingGroup",
"title": "Orgc",
"order": 13
},
"SharingGroup": {
"$ref": "#/definitions/SharingGroup",
"title": "Sharing Group",
"description": "Sharing group",
"order": 3
},
"Tag": {
"type": "array",
"title": "Tag",
"items": {
"$ref": "#/definitions/Tag"
},
"order": 12
},
"analysis": {
"type": "string",
"title": "Analysis",
"order": 11
},
"attribute_count": {
"type": "string",
"title": "Attribute Count",
"order": 9
},
"date": {
"type": "string",
"title": "Date",
"order": 15
},
"disable_correlation": {
"type": "boolean",
"title": "Disable Correlation",
"order": 8
},
"distribution": {
"type": "string",
"title": "Distribution",
"order": 17
},
"id": {
"type": "string",
"title": "ID",
"order": 19
},
"info": {
"type": "string",
"title": "Info",
"description": "Info",
"order": 1
},
"locked": {
"type": "boolean",
"title": "Locked",
"order": 4
},
"org_id": {
"type": "string",
"title": "Org ID",
"order": 10
},
"orgc_id": {
"type": "string",
"title": "Org ID",
"description": "Org ID",
"order": 2
},
"proposal_email_lock": {
"type": "boolean",
"title": "Proposal Email Lock",
"order": 18
},
"publish_timestamp": {
"type": "string",
"title": "Publish Timestamp",
"order": 21
},
"published": {
"type": "boolean",
"title": "Published",
"order": 14
},
"sharing_group_id": {
"type": "string",
"title": "Sharing Group ID",
"order": 6
},
"threat_level_id": {
"type": "string",
"title": "Threat Level ID",
"order": 20
},
"timestamp": {
"type": "string",
"title": "Timestamp",
"order": 7
},
"uuid": {
"type": "string",
"title": "UUID",
"order": 5
}
},
"definitions": {
"SharingGroup": {
"type": "object",
"title": "SharingGroup",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 1
},
"name": {
"type": "string",
"title": "Name",
"description": "Name",
"order": 2
}
}
},
"Tag": {
"type": "object",
"title": "Tag",
"properties": {
"colour": {
"type": "string",
"title": "Colour",
"description": "Colour",
"order": 4
},
"exportable": {
"type": "boolean",
"title": "Exportable",
"description": "Exportable",
"order": 3
},
"hide_tag": {
"type": "boolean",
"title": "Hide Tag",
"description": "Hide Tag",
"order": 5
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name",
"order": 1
}
}
}
}
},
"SharingGroup": {
"type": "object",
"title": "SharingGroup",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 1
},
"name": {
"type": "string",
"title": "Name",
"description": "Name",
"order": 2
}
}
},
"Tag": {
"type": "object",
"title": "Tag",
"properties": {
"colour": {
"type": "string",
"title": "Colour",
"description": "Colour",
"order": 4
},
"exportable": {
"type": "boolean",
"title": "Exportable",
"description": "Exportable",
"order": 3
},
"hide_tag": {
"type": "boolean",
"title": "Hide Tag",
"description": "Hide Tag",
"order": 5
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name",
"order": 1
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
StarcoderdataPython
|
6442439
|
<filename>TranskribusDU/crf/Model_SSVM_AD3_Multitype.py
# -*- coding: utf-8 -*-
"""
Train, test, predict steps for a CRF model
- CRF model is EdgeFeatureGraphCRF (unary and pairwise potentials)
- Train using SSM
- Predict using AD3
Copyright Xerox(C) 2016 <NAME>
Developed for the EU project READ. The READ project has received funding
from the European Union�s Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
from sklearn.utils.class_weight import compute_class_weight
from pystruct.models import NodeTypeEdgeFeatureGraphCRF
try: #to ease the use without proper Python installation
from common.trace import traceln
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
from common.trace import traceln
from common.chrono import chronoOn, chronoOff
from common.TestReport import TestReport, TestReportConfusion
from crf.Model_SSVM_AD3 import Model_SSVM_AD3
from common.TestReport import TestReport, TestReportConfusion
class Model_SSVM_AD3_Multitype(Model_SSVM_AD3):
#default values for the solver
C = .1
njobs = 4
inference_cache = 50
tol = .1
save_every = 50 #save every 50 iterations,for warm start
max_iter = 1000
def __init__(self, sName, sModelDir):
"""
a CRF model, that uses SSVM and AD3, with a name and a folder where it will be stored or retrieved from
"""
Model_SSVM_AD3.__init__(self, sName, sModelDir)
# ------------------------------------------- TYPED CRF MEMO -------------------------------------------
# Parameters
# ----------
# n_types : number of node types
#
# l_n_states : list of int, default=None
# Number of states per type of variables.
#
# l_n_features : list of int, default=None
# Number of features per type of node.
#
# a_n_edge_features: an array of shape (n_types, n_types) giving the number of features per pair of types
#
# NOTE: there should always be at least 1 feature for any pairs of types which has some edge in the graph.
# To mimic GraphCRF, pass 1 and make a constant feature of 1.0 for all those edges.
#
# class_weight : None, or list of array-like (ndim=1)
# Class weights. If a list of array-like is passed, the Ith one must have length equal to l_n_states[i]
# None means equal class weights (across node types)
#
# X and Y
# -------
# Node features are given as a list of n_types arrays of shape (n_type_nodes, n_type_features):
# - n_type_nodes is the number of nodes of that type
# - n_type_features is the number of features for this type of node
#
# Edges are given as a list of n_types x n_types arrays of shape (n_type_edges, 2).
# Columns are resp.: node index (in corresponding node type), node index (in corresponding node type)
#
# Edge features are given as a list of n_types x n_types arrays of shape (n_type_type_edge, n_type_type_edge_features)
# - n_type_type_edge is the number of edges of type type_type
# - n_type_type_edge_features is the number of features for edge of type type_type
#
# An instance ``X`` is represented as a tuple ``([node_features, ..], [edges, ..], [edge_features, ..])``
#
# Labels ``Y`` are given as one array of shape (n_nodes)
# Labels are numbered from 0 so that each label across types is encoded by a unique integer.
#
# Look at flattenY and unflattentY if you want to pass/obtain list of labels per type, with first label of each type being encoded by 0
# ------------------------------------------------------------------------------------------------------
def get_lX_lY(self, lGraph):
"""
Compute node and edge features and return one X matrix for each graph as a list
return a list of X, a list of Y matrix
"""
lX, lY = Model_SSVM_AD3.get_lX_lY(self, lGraph)
nbEdge = sum( e.shape[0] for (_lNF, lE, _lEF) in lX for e in lE)
traceln(" CRF multi-type model: %d edges" % nbEdge)
return lX, lY
def get_lX(self, lGraph):
"""
Compute node and edge features and return one X matrix for each graph as a list
return a list of X, a list of Y matrix
"""
lX = Model_SSVM_AD3.get_lX(self, lGraph)
nbEdge = sum( e.shape[0] for (_lNF, lE, _lEF) in lX for e in lE)
traceln(" CRF multi-type model: %d edges" % nbEdge)
return lX
# --- UTILITIES -------------------------------------------------------------
def _computeModelCaracteristics(self, lX):
"""
We discover dynamically the number of features. Pretty convenient for developer.
Drawback: if the feature extractor code changes, predicting with a stored model will crash without beforehand catch
return a triplet:
0 - nbClass
1 - list of node feature number per type
2 - list of edge feature number per type x type
"""
lNF, lE, lEF = lX[0] #we assume the lX is properly constructed (and they have all correct shape! even if dim0=0
self.nbType = len(lNF)
assert len(lE) == self.nbType*self.nbType, \
"SW Error: Badly constructed X: " \
"expected %d Edge matrices, got %d" % (self.nbType*self.nbType,
len(lE))
assert len(lEF) == self.nbType*self.nbType, \
"SW Error: Badly constructed X: " \
"expected %d Edge Feature matrices" \
", got %d" % (self.nbType*self.nbType, len(lEF))
self.lNodeFeatNb = [NF.shape[1] for NF in lNF]
self.lEdgeFeatNb = [ [lEF[i*self.nbType+j].shape[1] for i in range(self.nbType)] for j in range(self.nbType)]
return self.nbType, self.lNodeFeatNb, self.lEdgeFeatNb
def _getNbFeatureAsText(self):
"""
return the number of node features and the number of edge features as a textual message
"""
return " %d types - #features: (nodes) %s (edges) %s"%(self.nbType, self.lNodeFeatNb, self.lEdgeFeatNb)
# --- TRAIN / TEST / PREDICT BASELINE MODELS ------------------------------------------------
def _getXY_forType(self, lX, lY, type_index):
"""
The node features are grouped by type of nodes.
Given a type, we need to stack the feature of nodes of that type and extract their labels
"""
X_flat = np.vstack( node_features[type_index] for (node_features, _, _) in lX )
lY_type = []
for X, Y in zip(lX, lY):
node_features = X[0] #list of node feature matrices, per type
n_node_before_the_type = sum( node_features[i].shape[0] for i in range(type_index) ) #how many node in previous types?
n_node_of_type = node_features[type_index].shape[0]
Y_type = Y[n_node_before_the_type:n_node_before_the_type+n_node_of_type]
lY_type.append( Y_type )
Y_flat = np.hstack(lY_type)
del lY_type
return X_flat, Y_flat
def _trainBaselines(self, lX, lY):
"""
Train the baseline models, if any
"""
if self._lMdlBaseline:
for itype in range(self.nbType):
X_flat, Y_flat = self._getXY_forType(lX, lY, itype)
if False:
with open("XY_flat_Type%d.pkl"%(itype), "wb") as fd:
pickle.dump((X_flat, Y_flat), fd)
for mdlBaseline in self._lMdlBaseline:
chronoOn()
traceln("\t - training baseline model: %s"%str(mdlBaseline))
mdlBaseline[itype].fit(X_flat, Y_flat)
traceln("\t [%.1fs] done\n"%chronoOff())
del X_flat, Y_flat
return True
def _testBaselines(self, lX, lY, lLabelName=None, lsDocName=None):
"""
test the baseline models,
return a test report list, one per baseline method
"""
if lsDocName: assert len(lX) == len(lsDocName), "Internal error"
lTstRpt = []
if self._lMdlBaseline:
for itype in range(self.nbType):
X_flat, Y_flat = self._getXY_forType(lX, lY, itype)
traceln("\t\t type %d #nodes=%d #features=%d"%(itype, X_flat.shape[0], X_flat.shape[1]))
for mdl in self._lMdlBaseline: #code in extenso, to call del on the Y_pred_flat array...
chronoOn("_testBaselines_T")
Y_pred_flat = mdl[itype].predict(X_flat)
traceln("\t\t [%.1fs] done\n"%chronoOff("_testBaselines_T"))
lTstRpt.append( TestReport(str(mdl), Y_pred_flat, Y_flat, lLabelName, lsDocName=lsDocName) )
del X_flat, Y_flat, Y_pred_flat
return lTstRpt
def _testBaselinesEco(self, lX, lY, lLabelName=None, lsDocName=None):
"""
test the baseline models, WITHOUT MAKING A HUGE X IN MEMORY
return a test report list, one per baseline method
"""
if lsDocName: assert len(lX) == len(lsDocName), "Internal error"
lTstRpt = []
for mdl in self._lMdlBaseline: #code in extenso, to call del on the Y_pred_flat array...
chronoOn("_testBaselinesEco_T")
#using a COnfusionMatrix-based test report object, we can accumulate results
oTestReportConfu = TestReportConfusion(str(mdl), list(), lLabelName, lsDocName=lsDocName)
for X,Y in zip(lX, lY):
for itype in range(self.nbType):
X_flat, Y_flat = self._getXY_forType([X], [Y], itype)
Y_flat_pred = mdl[itype].predict(X_flat)
oTestReportConfu.accumulate( TestReport(str(mdl), Y_flat_pred, Y_flat, lLabelName, lsDocName=lsDocName) )
traceln("\t\t [%.1fs] done\n"%chronoOff("_testBaselinesEco_T"))
lTstRpt.append( oTestReportConfu )
return lTstRpt
# def predictBaselines(self, X):
# """
# predict with the baseline models,
# return a list of 1-dim numpy arrays
# """
# return [mdl.predict(X) for mdl in self._lMdlBaseline]
# --- EDGE BASELINE -------------------------------------------------------------
#no time to write this code
def getEdgeModel(self):
"""
Logisitic regression model for edges
"""
return None
def _getEdgeXEdgeY(self, lX, lY):
"""
return X,Y for each edge
The edge label is in [0, ntype^2-1]
"""
return None
def _trainEdgeBaseline(self, lX, lY):
"""
Here we train a logistic regression model to predict the pair of labels of each edge.
This code assume single type
"""
return True
def _testEdgeBaselines(self, lX, lY, lLabelName=None, lsDocName=None):
"""
test the edge baseline model,
return a test report list (a singleton for now)
"""
return []
# --- TRAIN / TEST / PREDICT ------------------------------------------------
def _getCRFModel(self, clsWeights=None):
crf = NodeTypeEdgeFeatureGraphCRF(self.nbType, # How many node types?
self._nbClass, # How many states per type?
self.lNodeFeatNb, # How many node features per type?
self.lEdgeFeatNb, # How many edge features per type x type?
inference_method="ad3",
l_class_weight = clsWeights
)
return crf
def computeClassWeight_balanced(self, lY):
"""
We compute a normalized balanced set of weights per type
UNUSED as of March 2018 (showed worse results on ABP table)
"""
l_class_weights = []
iTypPrev = 0
Y = np.hstack(lY)
for ityp in range(self.nbType):
iTypNext = iTypPrev + self._nbClass[ityp]
Y_typ = np.extract(np.logical_and(iTypPrev <= Y, Y < iTypNext), Y)
Y_typ_unique = np.unique(Y_typ)
class_weights = compute_class_weight("balanced", Y_typ_unique, Y_typ)
l_class_weights.append( class_weights / np.linalg.norm(class_weights) )
del Y_typ, Y_typ_unique
iTypPrev = iTypNext
del Y
return l_class_weights
|
StarcoderdataPython
|
207635
|
<filename>NATOTranslate.py
# <NAME>
# NATO Phonetic/Morse Code Translator
# 7/29/2020
MORSE_CODE_DICT = { 'A':'.-', 'B':'-...',
'C':'-.-.', 'D':'-..', 'E':'.',
'F':'..-.', 'G':'--.', 'H':'....',
'I':'..', 'J':'.---', 'K':'-.-',
'L':'.-..', 'M':'--', 'N':'-.',
'O':'---', 'P':'.--.', 'Q':'--.-',
'R':'.-.', 'S':'...', 'T':'-',
'U':'..-', 'V':'...-', 'W':'.--',
'X':'-..-', 'Y':'-.--', 'Z':'--..'}
NATO_PHONETIC_DICT = { 'A':'Alpha', 'B':'Bravo',
'C':'Charlie', 'D':'Delta', 'E':'Echo',
'F':'Foxtrot', 'G':'Golf', 'H':'Hotel',
'I':'India', 'J':'Juliet', 'K':'Kilo',
'L':'Lima', 'M':'Mike', 'N':'November',
'O':'Oscar', 'P':'Papa', 'Q':'Quebec',
'R':'Romeo', 'S':'Sierra', 'T':'Tango',
'U':'Uniform', 'V':'Victor', 'W':'Whiskey',
'X':'Xray', 'Y':'Yankee', 'Z':'Zulu'}
sentence = input("What phrase would you like to convert to NATO Phonetic/Morse Code or decrypt (Only alphabet)? ")
sentence = sentence.upper()
sentence = sentence.replace(" ", "")
nato = ""
morse = ""
for i in sentence:
nato = nato + NATO_PHONETIC_DICT[i] + " "
morse = morse + MORSE_CODE_DICT[i] + " "
print("Nato Phonetic: " + nato)
print("Morse Code: " + morse)
|
StarcoderdataPython
|
11355467
|
<reponame>Asifadam93/FiltreMusical<gh_stars>0
#!/usr/bin/env python
import dspplot
data = [
0.015625,
0.046875,
0.078125,
0.109375,
0.140625,
0.171875,
0.203125,
0.234375,
0.265625,
0.296875,
0.328125,
0.359375,
0.390625,
0.421875,
0.453125,
0.484375,
0.515625,
0.546875,
0.578125,
0.609375,
0.640625,
0.671875,
0.703125,
0.734375,
0.765625,
0.796875,
0.828125,
0.859375,
0.890625,
0.921875,
0.953125,
0.984375,
0.984375,
0.953125,
0.921875,
0.890625,
0.859375,
0.828125,
0.796875,
0.765625,
0.734375,
0.703125,
0.671875,
0.640625,
0.609375,
0.578125,
0.546875,
0.515625,
0.484375,
0.453125,
0.421875,
0.390625,
0.359375,
0.328125,
0.296875,
0.265625,
0.234375,
0.203125,
0.171875,
0.140625,
0.109375,
0.078125,
0.046875,
0.015625,
]
dspplot.plot(data, freqresp=True, dots=True, padwidth=1024, log_freq=False, horizontal=False, normalized_freq=True, title='Triangular window', file='../svg/window_triangular.svg')
|
StarcoderdataPython
|
26768
|
<filename>data-pipeline/src/data_pipeline/datasets/exac/exac_regional_missense_constraint.py
import hail as hl
def prepare_exac_regional_missense_constraint(path):
ds = hl.import_table(
path,
missing="",
types={
"transcript": hl.tstr,
"gene": hl.tstr,
"chr": hl.tstr,
"amino_acids": hl.tstr,
"genomic_start": hl.tint,
"genomic_end": hl.tint,
"obs_mis": hl.tfloat,
"exp_mis": hl.tfloat,
"obs_exp": hl.tfloat,
"chisq_diff_null": hl.tfloat,
"region_name": hl.tstr,
},
)
ds = ds.annotate(obs_mis=hl.int(ds.obs_mis))
ds = ds.annotate(start=hl.min(ds.genomic_start, ds.genomic_end), stop=hl.max(ds.genomic_start, ds.genomic_end))
ds = ds.drop("amino_acids", "chr", "gene", "genomic_start", "genomic_end", "region_name")
ds = ds.transmute(transcript_id=ds.transcript.split("\\.")[0])
ds = ds.group_by("transcript_id").aggregate(regions=hl.agg.collect(ds.row_value))
ds = ds.annotate(regions=hl.sorted(ds.regions, lambda region: region.start))
ds = ds.select(exac_regional_missense_constraint_regions=ds.regions)
return ds
|
StarcoderdataPython
|
1679355
|
# This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
import locale
import os.path
import random
import re
from os import stat as os_stat, lstat as os_lstat
from collections import deque
from time import time
from ranger.container.fsobject import BAD_INFO, FileSystemObject
from ranger.core.loader import Loadable
from ranger.ext.mount_path import mount_path
from ranger.container.file import File
from ranger.ext.accumulator import Accumulator
from ranger.ext.lazy_property import lazy_property
from ranger.ext.human_readable import human_readable
from ranger.container.settings import LocalSettings
from ranger.ext.vcs import Vcs
def sort_by_basename(path):
"""returns path.relative_path (for sorting)"""
return path.relative_path
def sort_by_basename_icase(path):
"""returns case-insensitive path.relative_path (for sorting)"""
return path.relative_path_lower
def sort_by_directory(path):
"""returns 0 if path is a directory, otherwise 1 (for sorting)"""
return 1 - path.is_directory
def sort_naturally(path):
return path.basename_natural
def sort_naturally_icase(path):
return path.basename_natural_lower
def sort_unicode_wrapper_string(old_sort_func):
def sort_unicode(path):
return locale.strxfrm(old_sort_func(path))
return sort_unicode
def sort_unicode_wrapper_list(old_sort_func):
def sort_unicode(path):
return [locale.strxfrm(str(c)) for c in old_sort_func(path)]
return sort_unicode
def accept_file(file, filters):
"""
Returns True if file shall be shown, otherwise False.
Parameters:
file - an instance of FileSystemObject
filters - an array of lambdas, each expects a file and
returns True if file shall be shown,
otherwise False.
"""
for filter in filters:
if filter and not filter(file):
return False
return True
def walklevel(some_dir, level):
some_dir = some_dir.rstrip(os.path.sep)
followlinks = True if level > 0 else False
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir, followlinks=followlinks):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if level != -1 and num_sep + level <= num_sep_this:
del dirs[:]
def mtimelevel(path, level):
mtime = os.stat(path).st_mtime
for dirpath, dirnames, filenames in walklevel(path, level):
dirlist = [os.path.join("/", dirpath, d) for d in dirnames
if level == -1 or dirpath.count(os.path.sep) - path.count(os.path.sep) <= level]
mtime = max(mtime, max([-1] + [os.stat(d).st_mtime for d in dirlist]))
return mtime
class Directory(FileSystemObject, Accumulator, Loadable):
is_directory = True
enterable = False
load_generator = None
cycle_list = None
loading = False
progressbar_supported = True
flat = 0
filenames = None
files = None
files_all = None
filter = None
temporary_filter = None
inode_type_filter = None
marked_items = None
scroll_begin = 0
mount_path = '/'
disk_usage = 0
last_update_time = -1
load_content_mtime = -1
order_outdated = False
content_outdated = False
content_loaded = False
vcs = None
has_vcschild = False
_cumulative_size_calculated = False
sort_dict = {
'basename': sort_by_basename,
'natural': sort_naturally,
'size': lambda path: -(path.size or 1),
'mtime': lambda path: -(path.stat and path.stat.st_mtime or 1),
'ctime': lambda path: -(path.stat and path.stat.st_ctime or 1),
'atime': lambda path: -(path.stat and path.stat.st_atime or 1),
'random': lambda path: random.random(),
'type': lambda path: path.mimetype or '',
'extension': lambda path: path.extension or '',
}
def __init__(self, path, **kw):
assert not os.path.isfile(path), "No directory given!"
Loadable.__init__(self, None, None)
Accumulator.__init__(self)
FileSystemObject.__init__(self, path, **kw)
self.marked_items = list()
for opt in ('sort_directories_first', 'sort', 'sort_reverse',
'sort_case_insensitive'):
self.settings.signal_bind('setopt.' + opt,
self.request_resort, weak=True, autosort=False)
for opt in ('hidden_filter', 'show_hidden'):
self.settings.signal_bind('setopt.' + opt,
self.refilter, weak=True, autosort=False)
self.settings = LocalSettings(path, self.settings)
if self.settings.vcs_aware:
self.vcs = Vcs(self)
self.use()
def request_resort(self):
self.order_outdated = True
def request_reload(self):
self.content_outdated = True
def get_list(self):
return self.files
def mark_item(self, item, val):
item._mark(val)
if val:
if item in self.files and item not in self.marked_items:
self.marked_items.append(item)
else:
while True:
try:
self.marked_items.remove(item)
except ValueError:
break
def toggle_mark(self, item):
self.mark_item(item, not item.marked)
def toggle_all_marks(self):
for item in self.files:
self.toggle_mark(item)
def mark_all(self, val):
for item in self.files:
self.mark_item(item, val)
if not val:
del self.marked_items[:]
self._clear_marked_items()
# XXX: Is it really necessary to have the marked items in a list?
# Can't we just recalculate them with [f for f in self.files if f.marked]?
def _gc_marked_items(self):
for item in list(self.marked_items):
if item.path not in self.filenames:
self.marked_items.remove(item)
def _clear_marked_items(self):
for item in self.marked_items:
item._mark(False)
del self.marked_items[:]
def get_selection(self):
"""READ ONLY"""
self._gc_marked_items()
if not self.files:
return []
if self.marked_items:
return [item for item in self.files if item.marked]
elif self.pointed_obj:
return [self.pointed_obj]
else:
return []
def refilter(self):
if self.files_all is None:
return # propably not loaded yet
self.last_update_time = time()
filters = []
if not self.settings.show_hidden and self.settings.hidden_filter:
hidden_filter = re.compile(self.settings.hidden_filter)
hidden_filter_search = hidden_filter.search
filters.append(lambda file: not hidden_filter_search(file.basename))
if self.filter:
filter_search = self.filter.search
filters.append(lambda file: filter_search(file.basename))
if self.inode_type_filter:
filters.append(self.inode_type_filter)
if self.temporary_filter:
temporary_filter_search = self.temporary_filter.search
filters.append(lambda file: temporary_filter_search(file.basename))
self.files = [f for f in self.files_all if accept_file(f, filters)]
# A fix for corner cases when the user invokes show_hidden on a
# directory that contains only hidden directories and hidden files.
if self.files and not self.pointed_obj:
self.pointed_obj = self.files[0]
elif not self.files:
self.content_loaded = False
self.pointed_obj = None
self.move_to_obj(self.pointed_obj)
# XXX: Check for possible race conditions
def load_bit_by_bit(self):
"""An iterator that loads a part on every next() call
Returns a generator which load a part of the directory
in each iteration.
"""
self.loading = True
self.percent = 0
self.load_if_outdated()
basename_is_rel_to = self.path if self.flat else None
try:
if self.runnable:
yield
mypath = self.path
self.mount_path = mount_path(mypath)
if self.flat:
filelist = []
for dirpath, dirnames, filenames in walklevel(mypath, self.flat):
dirlist = [os.path.join("/", dirpath, d) for d in dirnames
if self.flat == -1 or dirpath.count(os.path.sep) - mypath.count(os.path.sep) <= self.flat]
filelist += dirlist
filelist += [os.path.join("/", dirpath, f) for f in filenames]
filenames = filelist
self.load_content_mtime = mtimelevel(mypath, self.flat)
else:
filelist = os.listdir(mypath)
filenames = [mypath + (mypath == '/' and fname or '/' + fname)
for fname in filelist]
self.load_content_mtime = os.stat(mypath).st_mtime
if self._cumulative_size_calculated:
# If self.content_loaded is true, this is not the first
# time loading. So I can't really be sure if the
# size has changed and I'll add a "?".
if self.content_loaded:
if self.fm.settings.autoupdate_cumulative_size:
self.look_up_cumulative_size()
else:
self.infostring = ' %s' % human_readable(
self.size, separator='? ')
else:
self.infostring = ' %s' % human_readable(self.size)
else:
self.size = len(filelist)
self.infostring = ' %d' % self.size
if self.is_link:
self.infostring = '->' + self.infostring
yield
marked_paths = [obj.path for obj in self.marked_items]
files = []
disk_usage = 0
has_vcschild = False
for name in filenames:
try:
file_lstat = os_lstat(name)
if file_lstat.st_mode & 0o170000 == 0o120000:
file_stat = os_stat(name)
else:
file_stat = file_lstat
stats = (file_stat, file_lstat)
is_a_dir = file_stat.st_mode & 0o170000 == 0o040000
except Exception:
stats = None
is_a_dir = False
if is_a_dir:
try:
item = self.fm.get_directory(name)
item.load_if_outdated()
except Exception:
item = Directory(name, preload=stats, path_is_abs=True,
basename_is_rel_to=basename_is_rel_to)
item.load()
else:
if self.flat:
item.relative_path = os.path.relpath(item.path, self.path)
else:
item.relative_path = item.basename
item.relative_path_lower = item.relative_path.lower()
if item.vcs and item.vcs.track:
if item.vcs.is_root_pointer:
has_vcschild = True
else:
item.vcsstatus = item.vcs.rootvcs.status_subpath(
os.path.join(self.realpath, item.basename), is_directory=True)
else:
item = File(name, preload=stats, path_is_abs=True,
basename_is_rel_to=basename_is_rel_to)
item.load()
disk_usage += item.size
if self.vcs and self.vcs.track:
item.vcsstatus = self.vcs.rootvcs.status_subpath(
os.path.join(self.realpath, item.basename))
files.append(item)
self.percent = 100 * len(files) // len(filenames)
yield
self.has_vcschild = has_vcschild
self.disk_usage = disk_usage
self.filenames = filenames
self.files_all = files
self._clear_marked_items()
for item in self.files_all:
if item.path in marked_paths:
item._mark(True)
self.marked_items.append(item)
else:
item._mark(False)
self.sort()
if files:
if self.pointed_obj is not None:
self.sync_index()
else:
self.move(to=0)
else:
self.filenames = None
self.files_all = None
self.files = None
self.cycle_list = None
self.content_loaded = True
self.last_update_time = time()
self.correct_pointer()
finally:
self.loading = False
self.fm.signal_emit("finished_loading_dir", directory=self)
if self.vcs:
self.fm.ui.vcsthread.process(self)
def unload(self):
self.loading = False
self.load_generator = None
def load_content(self, schedule=None):
"""Loads the contents of the directory.
Use this sparingly since it takes rather long.
"""
self.content_outdated = False
if not self.loading:
if not self.loaded:
self.load()
if not self.accessible:
self.content_loaded = True
return
if schedule is None:
schedule = True # was: self.size > 30
if self.load_generator is None:
self.load_generator = self.load_bit_by_bit()
if schedule and self.fm:
self.fm.loader.add(self)
else:
for _ in self.load_generator:
pass
self.load_generator = None
elif not schedule or not self.fm:
for _ in self.load_generator:
pass
self.load_generator = None
def sort(self):
"""Sort the contained files"""
if self.files_all is None:
return
try:
sort_func = self.sort_dict[self.settings.sort]
except Exception:
sort_func = sort_by_basename
if self.settings.sort_case_insensitive and \
sort_func == sort_by_basename:
sort_func = sort_by_basename_icase
if self.settings.sort_case_insensitive and \
sort_func == sort_naturally:
sort_func = sort_naturally_icase
# XXX Does not work with usermade sorting functions :S
if self.settings.sort_unicode:
if sort_func in (sort_naturally, sort_naturally_icase):
sort_func = sort_unicode_wrapper_list(sort_func)
elif sort_func in (sort_by_basename, sort_by_basename_icase):
sort_func = sort_unicode_wrapper_string(sort_func)
self.files_all.sort(key=sort_func)
if self.settings.sort_reverse:
self.files_all.reverse()
if self.settings.sort_directories_first:
self.files_all.sort(key=sort_by_directory)
self.refilter()
def _get_cumulative_size(self):
if self.size == 0:
return 0
cum = 0
realpath = os.path.realpath
for dirpath, dirnames, filenames in os.walk(self.path,
onerror=lambda _: None):
for file in filenames:
try:
if dirpath == self.path:
stat = os_stat(realpath(dirpath + "/" + file))
else:
stat = os_stat(dirpath + "/" + file)
cum += stat.st_size
except Exception:
pass
return cum
def look_up_cumulative_size(self):
self._cumulative_size_calculated = True
self.size = self._get_cumulative_size()
self.infostring = ('-> ' if self.is_link else ' ') + \
human_readable(self.size)
@lazy_property
def size(self):
try:
if self.fm.settings.automatically_count_files:
size = len(os.listdir(self.path))
else:
size = None
except OSError:
self.infostring = BAD_INFO
self.accessible = False
self.runnable = False
return 0
else:
if size is None:
self.infostring = ''
else:
self.infostring = ' %d' % size
self.accessible = True
self.runnable = True
return size
@lazy_property
def infostring(self):
self.size # trigger the lazy property initializer
if self.is_link:
return '->' + self.infostring
return self.infostring
@lazy_property
def runnable(self):
self.size # trigger the lazy property initializer
return self.runnable
def sort_if_outdated(self):
"""Sort the containing files if they are outdated"""
if self.order_outdated:
self.order_outdated = False
self.sort()
return True
return False
def move_to_obj(self, arg):
try:
arg = arg.path
except Exception:
pass
self.load_content_once(schedule=False)
if self.empty():
return
Accumulator.move_to_obj(self, arg, attr='path')
def search_fnc(self, fnc, offset=1, forward=True):
if not hasattr(fnc, '__call__'):
return False
length = len(self)
if forward:
generator = ((self.pointer + (x + offset)) % length
for x in range(length - 1))
else:
generator = ((self.pointer - (x + offset)) % length
for x in range(length - 1))
for i in generator:
_file = self.files[i]
if fnc(_file):
self.pointer = i
self.pointed_obj = _file
self.correct_pointer()
return True
return False
def set_cycle_list(self, lst):
self.cycle_list = deque(lst)
def cycle(self, forward=True):
if self.cycle_list:
if forward is True:
self.cycle_list.rotate(-1)
elif forward is False:
self.cycle_list.rotate(1)
self.move_to_obj(self.cycle_list[0])
def correct_pointer(self):
"""Make sure the pointer is in the valid range"""
Accumulator.correct_pointer(self)
try:
if self == self.fm.thisdir:
self.fm.thisfile = self.pointed_obj
except Exception:
pass
def load_content_once(self, *a, **k):
"""Load the contents of the directory if not done yet"""
if not self.content_loaded:
self.load_content(*a, **k)
return True
return False
def load_content_if_outdated(self, *a, **k):
"""Load the contents of the directory if outdated"""
if self.load_content_once(*a, **k):
return True
if self.files_all is None or self.content_outdated:
self.load_content(*a, **k)
return True
try:
if self.flat:
real_mtime = mtimelevel(self.path, self.flat)
else:
real_mtime = os.stat(self.path).st_mtime
except OSError:
real_mtime = None
return False
if self.stat:
cached_mtime = self.load_content_mtime
else:
cached_mtime = 0
if real_mtime != cached_mtime:
self.load_content(*a, **k)
return True
return False
def get_description(self):
return "Loading " + str(self)
def use(self):
"""mark the filesystem-object as used at the current time"""
self.last_used = time()
def is_older_than(self, seconds):
"""returns whether this object wasn't use()d in the last n seconds"""
if seconds < 0:
return True
return self.last_used + seconds < time()
def go(self, history=True):
"""enter the directory if the filemanager is running"""
if self.fm:
return self.fm.enter_dir(self.path, history=history)
return False
def empty(self):
"""Is the directory empty?"""
return self.files is None or len(self.files) == 0
def _set_linemode_of_children(self, mode):
for f in self.files:
f._set_linemode(mode)
def __nonzero__(self):
"""Always True"""
return True
__bool__ = __nonzero__
def __len__(self):
"""The number of containing files"""
assert self.accessible
assert self.content_loaded
assert self.files is not None
return len(self.files)
def __eq__(self, other):
"""Check for equality of the directories paths"""
return isinstance(other, Directory) and self.path == other.path
def __neq__(self, other):
"""Check for inequality of the directories paths"""
return not self.__eq__(other)
def __hash__(self):
return hash(self.path)
|
StarcoderdataPython
|
3475212
|
<reponame>AaronFriel/pulumi-azuread
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'GetUsersResult',
'AwaitableGetUsersResult',
'get_users',
'get_users_output',
]
@pulumi.output_type
class GetUsersResult:
"""
A collection of values returned by getUsers.
"""
def __init__(__self__, id=None, ignore_missing=None, mail_nicknames=None, object_ids=None, return_all=None, user_principal_names=None, users=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ignore_missing and not isinstance(ignore_missing, bool):
raise TypeError("Expected argument 'ignore_missing' to be a bool")
pulumi.set(__self__, "ignore_missing", ignore_missing)
if mail_nicknames and not isinstance(mail_nicknames, list):
raise TypeError("Expected argument 'mail_nicknames' to be a list")
pulumi.set(__self__, "mail_nicknames", mail_nicknames)
if object_ids and not isinstance(object_ids, list):
raise TypeError("Expected argument 'object_ids' to be a list")
pulumi.set(__self__, "object_ids", object_ids)
if return_all and not isinstance(return_all, bool):
raise TypeError("Expected argument 'return_all' to be a bool")
pulumi.set(__self__, "return_all", return_all)
if user_principal_names and not isinstance(user_principal_names, list):
raise TypeError("Expected argument 'user_principal_names' to be a list")
pulumi.set(__self__, "user_principal_names", user_principal_names)
if users and not isinstance(users, list):
raise TypeError("Expected argument 'users' to be a list")
pulumi.set(__self__, "users", users)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ignoreMissing")
def ignore_missing(self) -> Optional[bool]:
return pulumi.get(self, "ignore_missing")
@property
@pulumi.getter(name="mailNicknames")
def mail_nicknames(self) -> Sequence[str]:
"""
The email aliases of the users.
"""
return pulumi.get(self, "mail_nicknames")
@property
@pulumi.getter(name="objectIds")
def object_ids(self) -> Sequence[str]:
"""
The object IDs of the users.
"""
return pulumi.get(self, "object_ids")
@property
@pulumi.getter(name="returnAll")
def return_all(self) -> Optional[bool]:
return pulumi.get(self, "return_all")
@property
@pulumi.getter(name="userPrincipalNames")
def user_principal_names(self) -> Sequence[str]:
"""
The user principal names (UPNs) of the users.
"""
return pulumi.get(self, "user_principal_names")
@property
@pulumi.getter
def users(self) -> Sequence['outputs.GetUsersUserResult']:
"""
A list of users. Each `user` object provides the attributes documented below.
"""
return pulumi.get(self, "users")
class AwaitableGetUsersResult(GetUsersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUsersResult(
id=self.id,
ignore_missing=self.ignore_missing,
mail_nicknames=self.mail_nicknames,
object_ids=self.object_ids,
return_all=self.return_all,
user_principal_names=self.user_principal_names,
users=self.users)
def get_users(ignore_missing: Optional[bool] = None,
mail_nicknames: Optional[Sequence[str]] = None,
object_ids: Optional[Sequence[str]] = None,
return_all: Optional[bool] = None,
user_principal_names: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUsersResult:
"""
Gets basic information for multiple Azure Active Directory users.
## API Permissions
The following API permissions are required in order to use this data source.
When authenticated with a service principal, this data source requires one of the following application roles: `User.Read.All` or `Directory.Read.All`
When authenticated with a user principal, this data source does not require any additional roles.
## Example Usage
```python
import pulumi
import pulumi_azuread as azuread
users = azuread.get_users(user_principal_names=[
"<EMAIL>",
"<EMAIL>",
])
```
:param bool ignore_missing: Ignore missing users and return users that were found. The data source will still fail if no users are found. Defaults to false.
:param Sequence[str] mail_nicknames: The email aliases of the users.
:param Sequence[str] object_ids: The object IDs of the users.
:param bool return_all: When `true`, the data source will return all users. Cannot be used with `ignore_missing`. Defaults to false.
:param Sequence[str] user_principal_names: The user principal names (UPNs) of the users.
"""
__args__ = dict()
__args__['ignoreMissing'] = ignore_missing
__args__['mailNicknames'] = mail_nicknames
__args__['objectIds'] = object_ids
__args__['returnAll'] = return_all
__args__['userPrincipalNames'] = user_principal_names
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azuread:index/getUsers:getUsers', __args__, opts=opts, typ=GetUsersResult).value
return AwaitableGetUsersResult(
id=__ret__.id,
ignore_missing=__ret__.ignore_missing,
mail_nicknames=__ret__.mail_nicknames,
object_ids=__ret__.object_ids,
return_all=__ret__.return_all,
user_principal_names=__ret__.user_principal_names,
users=__ret__.users)
@_utilities.lift_output_func(get_users)
def get_users_output(ignore_missing: Optional[pulumi.Input[Optional[bool]]] = None,
mail_nicknames: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,
object_ids: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,
return_all: Optional[pulumi.Input[Optional[bool]]] = None,
user_principal_names: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetUsersResult]:
"""
Gets basic information for multiple Azure Active Directory users.
## API Permissions
The following API permissions are required in order to use this data source.
When authenticated with a service principal, this data source requires one of the following application roles: `User.Read.All` or `Directory.Read.All`
When authenticated with a user principal, this data source does not require any additional roles.
## Example Usage
```python
import pulumi
import pulumi_azuread as azuread
users = azuread.get_users(user_principal_names=[
"<EMAIL>",
"<EMAIL>",
])
```
:param bool ignore_missing: Ignore missing users and return users that were found. The data source will still fail if no users are found. Defaults to false.
:param Sequence[str] mail_nicknames: The email aliases of the users.
:param Sequence[str] object_ids: The object IDs of the users.
:param bool return_all: When `true`, the data source will return all users. Cannot be used with `ignore_missing`. Defaults to false.
:param Sequence[str] user_principal_names: The user principal names (UPNs) of the users.
"""
...
|
StarcoderdataPython
|
390256
|
<filename>python/cinn/optim.py<gh_stars>1-10
from .core_api.optim import simplify
from .core_api.optim import ir_copy
|
StarcoderdataPython
|
8001002
|
<reponame>shkarupa-alex/nlpclean
import gc
import os
import resource
import unittest
from ..html import html_to_article, fragment_to_text
class TestHtmlToArticle(unittest.TestCase):
def test_comment(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_article', 'comment_no_space.html'), 'rt') as f:
without_space = html_to_article(f.read(), 'ru')
with open(os.path.join(os.path.dirname(__file__), 'html_to_article', 'comment_space.html'), 'rt') as f:
with_space = html_to_article(f.read(), 'ru')
with open(os.path.join(os.path.dirname(__file__), 'html_to_article', 'comment_ground.html'), 'rt') as f:
comment_ground = f.read()
self.assertEqual(with_space, without_space)
self.assertEqual(without_space, comment_ground)
# def test_memory(self):
# with open(os.path.join(os.path.dirname(__file__), 'html_to_article', 'page_source.html'), 'rt') as f:
# source = f.read()
#
# result = html_to_article(source, 'ru')
# memory_start = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# for _ in range(100):
# result = html_to_article(source, 'ru')
# memory_end = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
#
# self.assertGreater(len(result), 0)
# self.assertGreater(memory_start * 1.01, memory_end)
class TestFragmentToText(unittest.TestCase):
def test_break(self):
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'break_source.html'), 'rt') as f:
without_space = fragment_to_text(f.read())
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'break_ground.txt'), 'rt') as f:
comment_ground = f.read()
self.assertEqual(without_space, comment_ground)
def test_comment(self):
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'comment_no_space.html'), 'rt') as f:
without_space = fragment_to_text(f.read())
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'comment_space.html'), 'rt') as f:
with_space = fragment_to_text(f.read())
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'comment_ground.txt'), 'rt') as f:
comment_ground = f.read()
self.assertEqual(with_space, without_space)
self.assertEqual(without_space, comment_ground)
def test_article_1(self):
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article1_no_space.html'), 'rt') as f:
without_space = fragment_to_text(f.read())
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article1_space.html'), 'rt') as f:
with_space = fragment_to_text(f.read())
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article1_ground.txt'), 'rt') as f:
comment_ground = f.read()
self.assertEqual(with_space, without_space)
self.assertEqual(without_space, comment_ground)
def test_article_2(self):
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article2_source.html'), 'rt') as f:
source = fragment_to_text(f.read())
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article2_ground.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_article_3(self):
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article3_source.html'), 'rt') as f:
source = fragment_to_text(f.read())
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article3_ground.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_article_4(self):
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article4_source.html'), 'rt') as f:
source = fragment_to_text(f.read())
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article4_ground.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_empty(self):
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'empty_source.html'), 'rt') as f:
source = fragment_to_text(f.read())
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'empty_ground.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_memory(self):
with open(os.path.join(os.path.dirname(__file__), 'fragment_to_text', 'article1_space.html'), 'rt') as f:
source = f.read()
result = fragment_to_text(source)
memory_start = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
for _ in range(100):
result = fragment_to_text(source)
gc.collect()
memory_end = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
self.assertGreater(len(result), 0)
self.assertGreater(memory_start * 1.05, memory_end)
class TestHtmlToText(unittest.TestCase):
def test_drive2(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'drive2.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'drive2.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_gazeta(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'gazeta.html'), 'rb') as f:
source = f.read().decode('cp1251')
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'gazeta.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_habr(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'habr.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'habr.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_kinopoisk(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'kinopoisk.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'kinopoisk.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_kommersant(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'kommersant.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'kommersant.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_kp(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'kp.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'kp.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_lenta(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'lenta.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'lenta.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_livejournal1(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'livejournal1.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'livejournal1.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_livejournal2(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'livejournal2.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'livejournal2.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_livejournal3(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'livejournal3.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'livejournal3.txt'), 'rt') as f:
ground = f.read()
# with open(os.path.join(os.path.dirname(__file__), 'html_to_text', '_debug_0.txt'), 'wt') as f:
# f.write(source)
self.assertEqual(ground, source)
def test_mk(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'mk.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'mk.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_rbc(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'rbc.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'rbc.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_ria(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'ria.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'ria.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_sport(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'sport.html'), 'rb') as f:
source = f.read().decode('cp1251')
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'sport.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_vc(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'vc.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'vc.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_woman(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'woman.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'woman.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
def test_zen(self):
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'zen.html'), 'rt') as f:
source = f.read()
source = fragment_to_text(html_to_article(source, 'ru'))
with open(os.path.join(os.path.dirname(__file__), 'html_to_text', 'zen.txt'), 'rt') as f:
ground = f.read()
self.assertEqual(ground, source)
|
StarcoderdataPython
|
1846918
|
import time
from threading import Thread
from queue import Queue
class RateLimiter:
def __init__(self, limit, delay=0.01):
num = int(limit * delay)
if num < 1:
raise ValueError("limit * delay < 1")
self._limit_num = limit
self._delay = delay
self._num_per_delay = num
self._queue = Queue(limit)
self._thread = Thread(target=self._start)
self._thread.daemon = True
self._thread.start()
def _start(self):
total = int(self._limit_num * self._delay)
while True:
diff = total - self._queue.qsize()
while diff > 0:
self._queue.put(None)
diff -= 1
time.sleep(self._delay)
def get_token(self):
self._queue.get()
self._queue.task_done()
if __name__ == "__main__":
num = 100
r = RateLimiter(10, 0.1)
while num:
r.get_token()
print(num)
num -= 1
|
StarcoderdataPython
|
375596
|
<reponame>mumupy/mmdeeplearning<filename>src/mtensorflow/tf_rbm.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/26 12:45
# @Author : ganliang
# @File : tf_rbm.py
# @Desc : 受限玻尔兹曼机(RBM)是一个两层神经网络,第一层被称为可见层,第二层被称为隐藏层,因为网络只有两层,所以又被称为浅层神经网络。
# RBM 可以用于降维、特征提取和协同过滤,RBM 的训练可以分成三部分:正向传播、反向传播和比较。下面看看 RBM 的表达式。
|
StarcoderdataPython
|
9606432
|
from math import ceil, sqrt
plaintext = input().strip()
length = len(plaintext)
rows = int(sqrt(length))
cols = int(ceil(sqrt(length)))
if cols * rows < length:
rows += 1
encrypted = ''
for c in range(cols):
for r in range(rows):
if c + cols * r < length:
encrypted += plaintext[c + cols * r]
encrypted += ' '
print(encrypted)
|
StarcoderdataPython
|
1609324
|
<reponame>test-wiz-sec/pulumi-azure-nextgen
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'LocationThresholdRuleConditionArgs',
'ManagementEventAggregationConditionArgs',
'ManagementEventRuleConditionArgs',
'RetentionPolicyArgs',
'RuleEmailActionArgs',
'RuleManagementEventClaimsDataSourceArgs',
'RuleManagementEventDataSourceArgs',
'RuleMetricDataSourceArgs',
'RuleWebhookActionArgs',
'ThresholdRuleConditionArgs',
]
@pulumi.input_type
class LocationThresholdRuleConditionArgs:
def __init__(__self__, *,
failed_location_count: pulumi.Input[int],
odata_type: pulumi.Input[str],
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
A rule condition based on a certain number of locations failing.
:param pulumi.Input[int] failed_location_count: the number of locations that must fail to activate the alert.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "failed_location_count", failed_location_count)
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition')
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="failedLocationCount")
def failed_location_count(self) -> pulumi.Input[int]:
"""
the number of locations that must fail to activate the alert.
"""
return pulumi.get(self, "failed_location_count")
@failed_location_count.setter
def failed_location_count(self, value: pulumi.Input[int]):
pulumi.set(self, "failed_location_count", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
@pulumi.input_type
class ManagementEventAggregationConditionArgs:
def __init__(__self__, *,
operator: Optional[pulumi.Input[str]] = None,
threshold: Optional[pulumi.Input[float]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
How the data that is collected should be combined over time.
:param pulumi.Input[str] operator: the condition operator.
:param pulumi.Input[float] threshold: The threshold value that activates the alert.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
if operator is not None:
pulumi.set(__self__, "operator", operator)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
the condition operator.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
The threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
@pulumi.input_type
class ManagementEventRuleConditionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
aggregation: Optional[pulumi.Input['ManagementEventAggregationConditionArgs']] = None,
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None):
"""
A management event rule condition.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
:param pulumi.Input['ManagementEventAggregationConditionArgs'] aggregation: How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition')
if aggregation is not None:
pulumi.set(__self__, "aggregation", aggregation)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def aggregation(self) -> Optional[pulumi.Input['ManagementEventAggregationConditionArgs']]:
"""
How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
"""
return pulumi.get(self, "aggregation")
@aggregation.setter
def aggregation(self, value: Optional[pulumi.Input['ManagementEventAggregationConditionArgs']]):
pulumi.set(self, "aggregation", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@pulumi.input_type
class RetentionPolicyArgs:
def __init__(__self__, *,
days: pulumi.Input[int],
enabled: pulumi.Input[bool]):
"""
Specifies the retention policy for the log.
:param pulumi.Input[int] days: the number of days for the retention in days. A value of 0 will retain the events indefinitely.
:param pulumi.Input[bool] enabled: a value indicating whether the retention policy is enabled.
"""
pulumi.set(__self__, "days", days)
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def days(self) -> pulumi.Input[int]:
"""
the number of days for the retention in days. A value of 0 will retain the events indefinitely.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: pulumi.Input[int]):
pulumi.set(self, "days", value)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
a value indicating whether the retention policy is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class RuleEmailActionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
custom_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
send_to_service_owners: Optional[pulumi.Input[bool]] = None):
"""
Specifies the action to send email when the rule condition is evaluated. The discriminator is always RuleEmailAction in this case.
:param pulumi.Input[str] odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_emails: the list of administrator's custom email addresses to notify of the activation of the alert.
:param pulumi.Input[bool] send_to_service_owners: Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction')
if custom_emails is not None:
pulumi.set(__self__, "custom_emails", custom_emails)
if send_to_service_owners is not None:
pulumi.set(__self__, "send_to_service_owners", send_to_service_owners)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="customEmails")
def custom_emails(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
the list of administrator's custom email addresses to notify of the activation of the alert.
"""
return pulumi.get(self, "custom_emails")
@custom_emails.setter
def custom_emails(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "custom_emails", value)
@property
@pulumi.getter(name="sendToServiceOwners")
def send_to_service_owners(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
return pulumi.get(self, "send_to_service_owners")
@send_to_service_owners.setter
def send_to_service_owners(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "send_to_service_owners", value)
@pulumi.input_type
class RuleManagementEventClaimsDataSourceArgs:
def __init__(__self__, *,
email_address: Optional[pulumi.Input[str]] = None):
"""
The claims for a rule management event data source.
:param pulumi.Input[str] email_address: the email address.
"""
if email_address is not None:
pulumi.set(__self__, "email_address", email_address)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> Optional[pulumi.Input[str]]:
"""
the email address.
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_address", value)
@pulumi.input_type
class RuleManagementEventDataSourceArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
claims: Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']] = None,
event_name: Optional[pulumi.Input[str]] = None,
event_source: Optional[pulumi.Input[str]] = None,
level: Optional[pulumi.Input[str]] = None,
operation_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_provider_name: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
sub_status: Optional[pulumi.Input[str]] = None):
"""
A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case.
:param pulumi.Input[str] odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
:param pulumi.Input['RuleManagementEventClaimsDataSourceArgs'] claims: the claims.
:param pulumi.Input[str] event_name: the event name.
:param pulumi.Input[str] event_source: the event source.
:param pulumi.Input[str] level: the level.
:param pulumi.Input[str] operation_name: The name of the operation that should be checked for. If no name is provided, any operation will match.
:param pulumi.Input[str] resource_group_name: the resource group name.
:param pulumi.Input[str] resource_provider_name: the resource provider name.
:param pulumi.Input[str] resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] status: The status of the operation that should be checked for. If no status is provided, any status will match.
:param pulumi.Input[str] sub_status: the substatus.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource')
if claims is not None:
pulumi.set(__self__, "claims", claims)
if event_name is not None:
pulumi.set(__self__, "event_name", event_name)
if event_source is not None:
pulumi.set(__self__, "event_source", event_source)
if level is not None:
pulumi.set(__self__, "level", level)
if operation_name is not None:
pulumi.set(__self__, "operation_name", operation_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_provider_name is not None:
pulumi.set(__self__, "resource_provider_name", resource_provider_name)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
if status is not None:
pulumi.set(__self__, "status", status)
if sub_status is not None:
pulumi.set(__self__, "sub_status", sub_status)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def claims(self) -> Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']]:
"""
the claims.
"""
return pulumi.get(self, "claims")
@claims.setter
def claims(self, value: Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']]):
pulumi.set(self, "claims", value)
@property
@pulumi.getter(name="eventName")
def event_name(self) -> Optional[pulumi.Input[str]]:
"""
the event name.
"""
return pulumi.get(self, "event_name")
@event_name.setter
def event_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_name", value)
@property
@pulumi.getter(name="eventSource")
def event_source(self) -> Optional[pulumi.Input[str]]:
"""
the event source.
"""
return pulumi.get(self, "event_source")
@event_source.setter
def event_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_source", value)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
the level.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter(name="operationName")
def operation_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the operation that should be checked for. If no name is provided, any operation will match.
"""
return pulumi.get(self, "operation_name")
@operation_name.setter
def operation_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operation_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
the resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceProviderName")
def resource_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
the resource provider name.
"""
return pulumi.get(self, "resource_provider_name")
@resource_provider_name.setter
def resource_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_provider_name", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the operation that should be checked for. If no status is provided, any status will match.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="subStatus")
def sub_status(self) -> Optional[pulumi.Input[str]]:
"""
the substatus.
"""
return pulumi.get(self, "sub_status")
@sub_status.setter
def sub_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_status", value)
@pulumi.input_type
class RuleMetricDataSourceArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
metric_name: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None):
"""
A rule metric data source. The discriminator value is always RuleMetricDataSource in this case.
:param pulumi.Input[str] odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
:param pulumi.Input[str] metric_name: the name of the metric that defines what the rule monitors.
:param pulumi.Input[str] resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource')
if metric_name is not None:
pulumi.set(__self__, "metric_name", metric_name)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> Optional[pulumi.Input[str]]:
"""
the name of the metric that defines what the rule monitors.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@pulumi.input_type
class RuleWebhookActionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_uri: Optional[pulumi.Input[str]] = None):
"""
Specifies the action to post to service when the rule condition is evaluated. The discriminator is always RuleWebhookAction in this case.
:param pulumi.Input[str] odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
:param pulumi.Input[str] service_uri: the service uri to Post the notification when the alert activates or resolves.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction')
if properties is not None:
pulumi.set(__self__, "properties", properties)
if service_uri is not None:
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> Optional[pulumi.Input[str]]:
"""
the service uri to Post the notification when the alert activates or resolves.
"""
return pulumi.get(self, "service_uri")
@service_uri.setter
def service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_uri", value)
@pulumi.input_type
class ThresholdRuleConditionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
operator: pulumi.Input[str],
threshold: pulumi.Input[float],
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None,
time_aggregation: Optional[pulumi.Input[str]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
A rule condition based on a metric crossing a threshold.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
:param pulumi.Input[str] operator: the operator used to compare the data and the threshold.
:param pulumi.Input[float] threshold: the threshold value that activates the alert.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param pulumi.Input[str] time_aggregation: the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition')
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "threshold", threshold)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if time_aggregation is not None:
pulumi.set(__self__, "time_aggregation", time_aggregation)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
the operator used to compare the data and the threshold.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
the threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="timeAggregation")
def time_aggregation(self) -> Optional[pulumi.Input[str]]:
"""
the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
"""
return pulumi.get(self, "time_aggregation")
@time_aggregation.setter
def time_aggregation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_aggregation", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
|
StarcoderdataPython
|
9767268
|
from typing import List, Tuple
import clue
import clue.rules
from clue import turn_log
class ClueRepl:
"""
This will grab input from the user and then update the log and scoresheet.
> dave knif plum di olivia=no
input: "dave knif plum di olivia=no"
tokens: {player: "dave"}, {card: knife}, {card: plum}, {card: dining}, {card:..}
"""
def __init__(self, scoresheet):
self.scoresheet = scoresheet # this is NOT the module scoresheet.py
for player in scoresheet.players_names:
assert player not in scoresheet.game.all_cards
assert "=" not in player
@staticmethod
def prompt_players_list() -> List[str]:
"""
data = Scoresheet(["Dave", "Olivia", "Xeniya"])
"""
players_list = []
line = input("Enter the # of players > ")
print(
"Enter the player names in the order they should appear on the scoresheet"
)
number_of_players = int(line)
for player in range(number_of_players):
name = input(f"Name Player {player + 1} > ").strip()
players_list.append(name)
return players_list
@staticmethod
def select_player(player_list: List[str]) -> str:
"""
Prints a list like:
0) Player A
1) Player B
2) Player C
select which player you are> 1
:param player_list:
:return: str
"""
for i, player in enumerate(player_list):
print(f"{i+1}) Player {player}")
while True:
try:
line = int(input("select which player you are> "))
return player_list[line - 1]
except ValueError:
print(f"It MUST be a number.")
def analyze(self, turn_history):
print("Analysis Results:")
# need to do more passes until run_all() returns empty list
while True:
results = clue.rules.run_all(self.scoresheet, turn_history)
for result in results:
print(result)
self.scoresheet.set_fact(result)
if len(results) < 1:
break
else:
print("....")
print("")
def do_input(self):
current_entry = turn_log.LogEntry()
turn_history = []
while True:
print(f"Current Turn({len(turn_history) + 1}): {current_entry}")
line = input("> ")
try:
if line.strip().lower() == "quit":
return
elif line.strip().lower() == "sheet":
self.scoresheet.print_scoresheet()
elif line.strip().lower().startswith("set "): # can change to 'owner'
cards, player, state = ClueRepl.parse_set_line(
self.scoresheet.game, line
)
for card in cards:
self.scoresheet.set_ownership(player, card, state)
# print(f"scoresheet.data[card][player] = state")
if state == clue.BLANK:
print(f"setting {player} and {card} to 'unknown' ")
else:
verb = {
clue.HAS_CARD: "has",
clue.DOESNT_HAVE_CARD: "doesn't have",
}.get(state)
print(f"marking: {player} {verb} {card}")
elif line.strip().lower() == "next":
# check if anything is missing before allowing the player to go to the next turn
if current_entry.is_valid(self.scoresheet.player_count()):
turn_history.append(current_entry)
current_entry = turn_log.LogEntry()
self.analyze(turn_history) # runs "next" & "analyze"
else:
print(f"Current turn is not finished: {current_entry}")
elif line.strip().lower() == "clear": # clears out wrong current_entry
# command that resets the current entry:
current_entry = turn_log.LogEntry()
elif line.strip().lower() == "history": # prints out the turn history
for i, x in enumerate(turn_history):
print(i + 1, x)
elif line.strip().lower() == "analyze":
self.analyze(turn_history)
else:
asker, cards, answers = self.parse_line(line)
should_update = True
# don't let them put the same player as both "asker" and in the responses on the same line ONLY.
# and have a 'reset' command that resets the current entry from:
# Current Turn(2): B asks (Plum, Wrench, Gazebo), answers:{'a': False, 'b': True}
# print(f"asker={asker} answers={answers}")
asker_has_responded = [i for i in answers if asker in i]
if asker_has_responded:
print(f"Warning: Did you put the asker in responses?")
should_update = False
# confirm when I'm about overwrite existing info of a turn b4 removing a previous player
if asker and current_entry.asker and asker != current_entry.asker:
print(f"Warning: Did you forget to type 'next'?")
yn = input(
f"Enter 'y' to change the asker from {current_entry.asker} to {asker}. y/n>"
)
should_update = bool(yn.strip().lower() == "y")
# if yn.strip().lower() == "y":
# should_update = True
# else:
# should_update = False
if should_update:
ClueRepl.update_entry(
self.scoresheet.game, current_entry, asker, cards, answers
)
if current_entry.asker in current_entry.responses:
print(
f"WARNING: {current_entry.asker} is both the 'asker' and one of the responses"
)
else:
print(f"ignoring this input: {line}")
except Exception as ex: # pylint: disable=broad-except
print(ex)
@staticmethod
def parse_set_line(game, line):
"""
:param game:
:param line: is a line like "set plum dave=yes" | yes,no, or ? for blank ... or even 'set plum dave=' for blank
:return: ([clue.PLUM]: List, "dave": str, clue.HAS_CARD: int)
"""
line = line.strip().lower()
tokens = line.split()
cards = []
if len(tokens) < 3 or "=" not in tokens[-1]:
raise Exception(f"Invalid 'set <card> player=' line: {line}")
if tokens[0] != "set":
raise Exception()
for card in tokens[1:-1]: # it must be clue cards
# this will return ["Dining", "Drawing"] if input "d":
matches = ClueRepl.resolve_card(game, card)
if len(matches) != 1:
# we don't know which card they meant
raise Exception(
f"bad input: unable to match '{card}' to a single card. Matches={matches}"
)
cards.append(matches[0])
player, response = tokens[-1].split("=", maxsplit=1)
if response == "":
state = clue.BLANK
elif ClueRepl.response_bool(response):
state = clue.HAS_CARD
else:
state = clue.DOESNT_HAVE_CARD
return (
cards,
player,
state,
) # -> ([clue.PLUM]: List, "dave": str, clue.HAS_CARD: int)
@staticmethod
def update_entry(
game: clue.Game,
current_entry,
asker: str,
cards: List[str],
answers: List[Tuple[str, bool]],
):
"""
Updates `current_entry` using information parsed from parse_line()
:param game:
:param current_entry: the entry object to modify
:param asker: the "asker" returned by parse_line(); should be None if there was no asker in the line
:param cards: list of cards returned by parse_line()
:param answers: list of tuples of (player:str, response:bool))
"""
if asker:
current_entry.asker = asker
suspect = None
weapon = None
room = None
for card in cards:
if card in game.suspects:
if suspect:
raise Exception(
f"Two suspects in one line are not accepted: {suspect}, {card}"
)
else:
suspect = card
elif card in game.rooms:
if room:
raise Exception(
f"Two rooms in one line are not accepted: {room}, {card}"
)
else:
room = card
elif card in game.weapons:
if weapon:
raise Exception(
f"Two weapons in one line are not accepted: {weapon}, {card}"
)
else:
weapon = card
if suspect:
current_entry.suspect = suspect
if weapon:
current_entry.weapon = weapon
if room:
current_entry.room = room
for a in answers: # answers - list of tuples
player, response = a # a - tuple of two things
current_entry.responses[player] = response
@staticmethod # validate answers:'y','n','nope','none','nothing','i_have_one' ...
def response_bool(s: str) -> bool:
truthy = ["y", "yes", "yep", "have", "has", "t", "true", "do", "does"]
falsy = [
"n",
"no",
"not",
"nope",
"none",
"f",
"false",
"don't",
"dont",
"nothing",
]
if s.lower() in truthy:
return True
elif s.lower() in falsy:
return False
else:
raise Exception(f"invalid entry: {s}")
def parse_line(self, line):
"""
This parses an input str from a user on a 'guess' turn into tokens. Maybe rename to smt like parse_guess_line
:param line: asker card card card responder1=yes responder2=no
:return: tokens
"""
tokens = line.strip().split()
asker = None
cards = []
answers = []
for token in tokens: # when you parse a str you parse it into pieces - tokens
if token.upper() in self.scoresheet.players_names: # it must be the asker
asker = token.upper()
elif "=" in token:
player, response = token.split(
"=", maxsplit=1
) # maxsplit is number of times to use split()
# make sure its a valid player! split on '=' also if bob=yes => error
if player.upper() not in self.scoresheet.players_names:
raise Exception(f"bad input: invalid player {player}")
answers.append((player.upper(), ClueRepl.response_bool(response)))
else:
# it must be a clue card
matches = ClueRepl.resolve_card(
self.scoresheet.game, token
) # this will return ["Dining", "Drawing"] if input "d"
if len(matches) != 1:
# we don't know which card they meant
raise Exception(
f"bad input: unable to match {token} to a single card. Matches={matches}"
)
cards.append(matches[0])
return asker, cards, answers
@staticmethod
def resolve_card(game: clue.Game, prefix: str) -> List[str]:
"""
# prefix="gr" -> ["GREEN"]
# prefix="D" -> ["DINING", "DRAWING"]
# prefix="z" -> []
:return: list of EVERY card that matches prefix
"""
results = []
prefix = prefix.upper()
for card in game.all_cards:
if card.upper().startswith(prefix):
results.append(card)
return results
|
StarcoderdataPython
|
11274422
|
<filename>CHRLINE/e2ee.py
"""
Author: YinMo
Version: 0.0.1-beta
Description: died
"""
import hashlib, json, os
import axolotl_curve25519 as Curve25519
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from Crypto.Cipher import AES
class E2EE():
def generateSharedSecret(self, private_key, public_key):
return Curve25519.calculateAgreement(bytes(private_key), bytes(public_key))
def _xor(self, buf):
buf_length = int(len(buf) / 2)
buf2 = bytearray(buf_length)
for i in range(buf_length):
buf2[i] = buf[i] ^ buf[buf_length + i]
return bytes(buf2)
def getSHA256Sum(self, *args):
instance = hashlib.sha256()
for arg in args:
if isinstance(arg, str):
arg = arg.encode()
instance.update(arg)
return instance.digest()
def _encryptAESECB(self, aes_key, plain_data):
aes = AES.new(aes_key, AES.MODE_ECB)
return aes.encrypt(plain_data)
def decryptKeyChain(self, publicKey, privateKey, encryptedKeyChain):
shared_secret = self.generateSharedSecret(privateKey, publicKey)
aes_key = self.getSHA256Sum(shared_secret, 'Key')
aes_iv = self._xor(self.getSHA256Sum(shared_secret, 'IV'))
aes = AES.new(aes_key, AES.MODE_CBC, aes_iv)
keychain_data = aes.decrypt(encryptedKeyChain)
key = keychain_data.hex()
key = bin2bytes(key)
key = self.TCompactProtocol(key, passProtocol=True).res
public_key = bytes(key[0][4])
private_key = bytes(key[0][5])
return [private_key, public_key]
def encryptDeviceSecret (self, publicKey, privateKey, encryptedKeyChain):
shared_secret = self.generateSharedSecret(privateKey, publicKey)
aes_key = self.getSHA256Sum(shared_secret, 'Key')
encryptedKeyChain = self._xor(self.getSHA256Sum(encryptedKeyChain))
keychain_data = self._encryptAESECB(aes_key, encryptedKeyChain)
return keychain_data
def generateAAD(self, a, b, c, d, e=2, f=0):
aad = b''
aad += a.encode()
aad += b.encode()
aad += bytes(self.getIntBytes(c))
aad += bytes(self.getIntBytes(d))
aad += bytes(self.getIntBytes(e)) #e2ee version
aad += bytes(self.getIntBytes(f)) # content type
return aad
def encryptE2EETextMessage(self, senderKeyId, receiverKeyId, keyData, specVersion, text, to ,_from):
#selfKey = self.getE2EEKeys(self.mid)
salt = os.urandom(16)
gcmKey = self.getSHA256Sum(keyData, salt, b'Key')
gcmIV = self.getSHA256Sum(keyData, salt, b'IV')
aad = self.generateAAD(to, _from, senderKeyId, receiverKeyId, specVersion, 0)
sign = os.urandom(16)
data = json.dumps({
'text': text
}).encode()
encData = self.encryptE2EEMessageV2(data, gcmKey, sign, aad)
self.log(f'senderKeyId: {senderKeyId} ({self.getIntBytes(senderKeyId)})', True)
self.log(f'receiverKeyId: {receiverKeyId} ({self.getIntBytes(receiverKeyId)})', True)
return [salt, encData, sign, bytes(self.getIntBytes(senderKeyId)), bytes(self.getIntBytes(receiverKeyId))]
def decryptE2EETextMessage(self, messageObj):
chunks = messageObj[20]
salt = chunks[0]
message = chunks[1]
sign = chunks[2]
senderKeyId = __byte2int(chunks[3])
receiverKeyId = __byte2int(chunks[4])
_key = self.negotiateE2EEPublicKey(messageObj[0]) # todo: to or _from
aesKey = self.generateSharedSecret(self.getPrivateKey(self.mid), _key[2][4])
gcmKey = self.getSHA256Sum(aesKey, salt, b'Key')
s = hashlib.sha256()
s.update(aesKey)
s.update(salt)
s.update(b'IV')
iv = s.digest()
aad = self.generateAAD(message[0], message[1], senderKeyId, receiverKeyId)
aesgcm = AESGCM(gcmKey)
decrypted = aesgcm.decrypt(sign, message, aad)
self.log(f'decrypted: {decrypted}', True)
return json.loads(decrypted)['text'] # todo: contentType
def decryptE2EETextMessageV2(self, to , _from, chunks, privK, pubK):
for i in range(len(chunks)):
if isinstance(chunks[i], str):
chunks[i] = chunks[i].encode()
salt = chunks[0]
message = chunks[1]
sign = chunks[2]
senderKeyId = byte2int(chunks[3])
receiverKeyId = byte2int(chunks[4])
self.log(f'senderKeyId: {senderKeyId}', True)
self.log(f'receiverKeyId: {receiverKeyId}', True)
aesKey = self.generateSharedSecret(privK, pubK)
gcmKey = self.getSHA256Sum(aesKey, salt, b'Key')
iv = self.getSHA256Sum(aesKey, salt, b'IV')
aad = self.generateAAD(to, _from, senderKeyId, receiverKeyId)
aesgcm = AESGCM(gcmKey)
decrypted = aesgcm.decrypt(sign, message, aad)
self.log(f'decrypted: {decrypted}', True)
return json.loads(decrypted)['text']
def encryptE2EEMessageV2(self, data, gcmKey, nonce, aad):
aesgcm = AESGCM(gcmKey)
return aesgcm.encrypt(nonce, data, aad)
def byte2int(t):
e = 0
i = 0
s = len(t)
for i in range(s):
e = 256 * e + t[i]
return e
def bin2bytes(k):
e = []
for i in range(int(len(k) / 2)):
_i = int(k[i * 2:i * 2 + 2], 16)
e.append(_i)
return bytearray(e)
|
StarcoderdataPython
|
69610
|
<reponame>lifning/picotool
"""The map section of a PICO-8 cart.
The map region consists of 4096 bytes. The .p8 representation is 32
lines of 256 hexadecimal digits (128 bytes).
The map is 128 tiles wide by 64 tiles high. Each tile is one of the
256 tiles from the spritesheet. Map memory describes the top 32 rows
(128 * 32 = 4096). If the developer draws tiles in the bottom 32 rows,
this is stored in the bottom of the gfx memory region.
"""
__all__ = ['Map']
from .. import util
class Map(util.BaseSection):
"""The map region of a PICO-8 cart."""
HEX_LINE_LENGTH_BYTES = 128
def __init__(self, *args, **kwargs):
"""The initializer.
The Map initializer takes an optional gfx keyword argument
whose value is a reference to the Gfx instance where lower map
data is stored.
"""
self._gfx = None
if 'gfx' in kwargs:
self._gfx = kwargs['gfx']
del kwargs['gfx']
super().__init__(*args, **kwargs)
@classmethod
def empty(cls, version=4, gfx=None):
"""Creates an empty instance.
Args:
version: The PICO-8 file version.
gfx: The Gfx object where lower map data is written.
Returns:
A Map instance.
"""
return cls(data=bytearray(b'\x00' * 4096), version=version, gfx=gfx)
@classmethod
def from_lines(cls, *args, **kwargs):
gfx = None
if 'gfx' in kwargs:
gfx = kwargs['gfx']
del kwargs['gfx']
result = super().from_lines(*args, **kwargs)
result._gfx = gfx
return result
@classmethod
def from_bytes(cls, *args, **kwargs):
gfx = None
if 'gfx' in kwargs:
gfx = kwargs['gfx']
del kwargs['gfx']
result = super().from_bytes(*args, **kwargs)
result._gfx = gfx
return result
def get_cell(self, x, y):
"""Gets the tile ID for a map cell.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. Map must have a Gfx if y > 31.
(0-63)
Returns:
The tile ID for the cell.
"""
assert 0 <= x <= 127
assert (0 <= y <= 31) or ((0 <= y <= 63) and self._gfx is not None)
if y <= 31:
return self._data[y * 128 + x]
return self._gfx._data[4096 + (y - 32) * 128 + x]
def set_cell(self, x, y, val):
"""Sets the tile ID for a map cell.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. (0-63) If y > 31, Map must have a
Gfx, and this method updates the shared data region in the Gfx.
val: The new tile ID for the cell. (0-255)
"""
assert 0 <= x <= 127
assert (0 <= y <= 31) or ((0 <= y <= 63) and self._gfx is not None)
assert 0 <= val <= 255
if y <= 31:
self._data[y * 128 + x] = val
else:
self._gfx._data[4096 + (y - 32) * 128 + x] = val
def get_rect_tiles(self, x, y, width=1, height=1):
"""Gets a rectangle of map tiles.
The map is a grid of 128x32 tiles, or 128x64 if using the
gfx/map shared memory for map data. This method returns a
rectangle of tile IDs on the map, as a list of bytearrays.
If the requested rectangle size goes off the edge of the map,
the off-edge tiles are returned as 0. The bottom edge is
always assumed to be beyond the 64th row in the gfx/map shared
memory region.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. (0-63) If y + height > 31, Map
must have a Gfx.
width: The width of the rectangle, as a number of tiles.
height: The height of the rectangle, as a number of tiles.
Returns:
The rectangle of tile IDs, as a list of bytearrays.
"""
assert 0 <= x <= 127
assert 1 <= width
assert 1 <= height
assert ((0 <= y + height <= 32) or
((0 <= y + height <= 64) and self._gfx is not None))
result = []
for tile_y in range(y, y + height):
row = bytearray()
for tile_x in range(x, x + width):
if (tile_y > 63) or (tile_x > 127):
row.append(0)
else:
row.append(self.get_cell(tile_x, tile_y))
result.append(row)
return result
def set_rect_tiles(self, rect, x, y):
"""Writes a rectangle of tiles to the map.
If writing the given rectangle at the given coordinates causes
the rectangle to extend off the edge of the map, the remainer
is discarded.
Args:
rect: A rectangle of tile IDs, as an iterable of iterables of IDs.
x: The map tile x coordinate (column) of the upper left corner to
start writing.
y: The map tile y coordinate (row) of the upper left corner to
start writing.
"""
for tile_y, row in enumerate(rect):
for tile_x, val in enumerate(row):
if ((tile_y + y) > 127) or ((tile_x + x) > 127):
continue
self.set_cell(tile_x + x, tile_y + y, val)
def get_rect_pixels(self, x, y, width=1, height=1):
"""Gets a rectangel of map tiles as pixels.
This is similar to get_rect_tiles() except the tiles are
extracted from Gfx data and returned as a rectangle of pixels.
Just like PICO-8, tile ID 0 is rendered as empty (all 0's),
not the actual tile at ID 0.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. (0-63) If y + height > 31, Map
must have a Gfx.
width: The width of the rectangle, as a number of tiles.
height: The height of the rectangle, as a number of tiles.
Returns:
The rectangle of pixels, as a list of bytearrays of pixel colors.
"""
assert self._gfx is not None
assert 0 <= x <= 127
assert 1 <= width
assert 1 <= height
assert 0 <= y + height <= 64
tile_rect = self.get_rect_tiles(x, y, width, height)
result = []
for tile_row in tile_rect:
pixel_row = [bytearray(), bytearray(), bytearray(), bytearray(),
bytearray(), bytearray(), bytearray(), bytearray()]
for id in tile_row:
if id == 0:
sprite = [bytearray(b'\x00' * 8)] * 8
else:
sprite = self._gfx.get_sprite(id)
for i in range(0, 8):
pixel_row[i].extend(sprite[i])
for i in range(0, 8):
result.append(pixel_row[i])
return result
|
StarcoderdataPython
|
12823985
|
import unittest
from click.testing import CliRunner
import yoda
class PortScanTest(unittest.TestCase):
"""
Test for the following commands:
| Module: dev
| command: portscan
"""
def __init__(self, methodName="runTest"):
super(PortScanTest, self).__init__()
self.runner = CliRunner()
def runTest(self):
result = self.runner.invoke(
yoda.cli, ["dev", "portscan"], input="manparvesh.com"
)
self.assertIsNone(result.exception)
|
StarcoderdataPython
|
6596216
|
<reponame>chiragkhandhar/ODAS
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import UserProfile
from doctor_login.models import docDetails
class RegistrationForm(UserCreationForm):
email=forms.EmailField(required=True)
class Meta:
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'<PASSWORD>',
'<PASSWORD>'
)
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.email = self.cleaned_data['email']
user.phone=self.cleaned_data['phone']
if commit:
user.save()
return user
class EditProfileForm(UserChangeForm):
class Meta:
model = User
fields =(
'email',
'first_name',
'last_name',
'password'
)
SLOT_LIST=(('slot1','Slot 1'),('slot2','Slot 2'),('slot3','Slot 3'),('slot4','Slot 4'),('slot5','Slot 5'),('slot6','Slot 6'),('slot7','Slot 7'))
class scheduleForm(forms.Form):
selected_slot= forms.ChoiceField(choices=SLOT_LIST)
|
StarcoderdataPython
|
3522371
|
<gh_stars>0
#!/usr/bin/env python
import sys
import os
class Menu(object):
'''Create and maintain a simple menu for PopQ'''
program_name = 'PopQ'
def __init__(self):
self.choices = {'1': self.add_questions, '2': self.start_questionnaire, 'x': self.exit_program}
def add_choices(self):
'''Add more question files to PopQ'''
pass
def start_questionnaire(self):
'''Start a questionnaire session'''
pass
def exit_program(self):
'''Terminate PopQ'''
pass
def get_choice(self):
'''Get and validate some menu input'''
self.print_menu()
ans = ''
while ans.lower() not in self.choices:
ans = input('> ')
return ans
def print_menu(self):
'''Print PopQ menu'''
os.system("clear")
print('\n'.join([self.program_name,'-'*len(self.program_name)]) + \
'''
1) Choose categories
2) Start questionnaire
X) Quit
''')
def start_menu_loop(self):
'''Create and maintain the menu input/output'''
pass
|
StarcoderdataPython
|
6697833
|
from functools import reduce
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def mergeAllTime(dfs:list[pd.DataFrame]):
''' Layer 1 - not useful?
combines multiple mutlicolumned dataframes.
to support disparate frequencies,
outter join fills in missing values with previous value.
So this isn't really important anymore becauase I realized
it'll not be needed anywhere I think, maybe for the live
updates models and stream but that's for later.
'''
if dfs is pd.DataFrame:
return dfs
if len(dfs) == 0:
return None
if len(dfs) == 1:
return dfs[0]
for df in dfs:
df.index = pd.to_datetime(df.index)
return reduce(
lambda left, right: pd.merge(
left,
right,
how='outer',
left_index=True,
right_index=True)
# can't use this for merge because we don't want to fill the targetColumn
.fillna(method='ffill'),
#.fillna(method='bfill'),
# don't bfill here, in many cases its fine to bfill, but not in all.
# maybe we will bfill in model. always bfill After ffill.
dfs)
def merge(dfs:list[pd.DataFrame], targetColumn:'str|tuple[str]'):
''' Layer 1
combines multiple mutlicolumned dataframes.
to support disparate frequencies,
outter join fills in missing values with previous value.
filters down to the target column observations.
'''
if len(dfs) == 0:
return None
if len(dfs) == 1:
return dfs[0]
for ix, item in enumerate(dfs):
if targetColumn in item.columns:
dfs.insert(0, dfs.pop(ix))
break
# if we get through this loop without hitting the if
# we could possibly use that as a trigger to use the
# other merge function, also if targetColumn is None
# why would we make a dataset without target though?
for df in dfs:
df.index = pd.to_datetime(df.index)
return reduce(
lambda left, right:
pd.merge_asof(left, right, left_index=True, right_index=True),
dfs)
def appendInsert(df:pd.DataFrame, incremental:pd.DataFrame):
''' Layer 2
after datasets merged one cannot merely append a dataframe.
we must insert the incremental at the correct location.
this function is more of a helper function after we gather,
to be used by models, it doesn't talk to disk directly.
incremental is should be a multicolumn, one row DataFrame.
'''
df.index = pd.to_datetime(df.index)
incremental.index = pd.to_datetime(incremental.index)
if incremental.index.values[0] in df.index.values:
df.loc[incremental.index, [x for x in incremental.columns]] = incremental
else:
df = df.append(incremental).sort_index()
return df.fillna(method='ffill')
|
StarcoderdataPython
|
9643871
|
<reponame>seongcheoljeon/HoudiniRunRender<gh_stars>0
#!/usr/bin/env python
# encoding=utf-8
# created date: 2020.09.16
# author: <NAME>
# email: <EMAIL>
import os
import sys
import inspect
import traceback
class Default(object):
def __init__(self):
pass
# path parameters join
@staticmethod
def set_path_sep(*args):
return os.pathsep.join(args)
@staticmethod
def chk_uid():
if os.getuid() == 0:
return True
return False
@staticmethod
def get_common_brg():
return os.getenv("DEF_FXHOME_COMMON_BRIDGE_DIR")
@staticmethod
def get_opt_dir():
return os.getenv("DEF_OPT_DIR")
@staticmethod
def get_hou_brg():
return os.getenv("DEF_HOU_BRG")
@staticmethod
def get_default_hou_ver():
return [os.getenv("DEF_HOUDINI_VERSION")]
@staticmethod
def get_default_uname():
default_uname = os.getenv("DEF_FXUSER")
if default_uname is None:
Default.error_message("FXUSER variable is not set.", _exit=True)
return default_uname
@staticmethod
def exists_dir(_dir):
if not os.path.isdir(_dir):
_file, _line, _func = inspect.getframeinfo(inspect.currentframe())[:3]
Default.error_message(
"{0} is not a directory.".format(_dir),
_exit=True, _file=_file, _line=_line, _func=_func)
if not os.path.exists(_dir):
_file, _line, _func = inspect.getframeinfo(inspect.currentframe())[:3]
Default.error_message(
"{0} is a nonexistent directory.".format(_dir),
_exit=True, _file=_file, _line=_line, _func=_func)
return True
@staticmethod
def get_fxhome():
fxhome = os.getenv("DEF_FXHOME")
Default.exists_dir(fxhome)
return fxhome
@staticmethod
def get_fxhome_user_brg():
return os.getenv("DEF_FXHOME_USER_BRIDGE_DIR")
@staticmethod
def merge_path(*args):
ss = ""
if args[0][0] != os.sep:
ss = os.sep
for arg in args:
ss = os.path.join(ss, arg)
if ss[0] != os.sep:
return ss
return os.path.normpath(ss)
@staticmethod
def get_file_info():
_file, _line, _func = inspect.getframeinfo(inspect.currentframe())[:3]
d = dict()
d.setdefault("file", _file)
d.setdefault("line", _line)
d.setdefault("func", _func)
return d
@staticmethod
def confirm_message(msg):
print()
print("-" * 88)
print("[CONFIRM]: {0}".format(msg))
print("-" * 88)
@staticmethod
def warning_message(msg, _file="", _func="", _line=""):
print()
print("*" * 100)
print("[WARNING]: {0}".format(msg))
print("File: {0}".format(_file))
print("Function: {0}".format(_func))
print("Line: {0}".format(_line))
print("*" * 100)
print()
@staticmethod
def error_message(msg, _exit=False, _file="", _func="", _line=""):
sys.stderr.write("\n")
sys.stderr.write("*" * 88)
sys.stderr.write("\n")
sys.stderr.write("[ERROR]: {0}".format(msg))
sys.stderr.write("\n")
sys.stderr.write("File: {0}".format(_file))
sys.stderr.write("\n")
sys.stderr.write("Function: {0}".format(_func))
sys.stderr.write("\n")
sys.stderr.write("Line: {0}".format(_line))
sys.stderr.write("\n")
sys.stderr.write("*" * 88)
sys.stderr.write("\n")
traceback.print_exc()
if _exit:
sys.exit(1)
if __name__ == "__main__":
dft = Default()
print("fxhome:", dft.get_fxhome())
print("user default:", dft.get_default_uname())
print("houdini version default:", dft.get_default_hou_ver())
print("fxhome user bridge:", dft.get_fxhome_user_brg())
|
StarcoderdataPython
|
4863718
|
__version__ = """1.7.0"""
|
StarcoderdataPython
|
5177997
|
import pickle
import torch
from torch import nn
import numpy as np
from scipy import linalg
from tqdm import tqdm
from calc_inception import load_patched_inception_v3
import pdb
import os
import csv
from glob import glob
import math
from torch.nn import functional as F
from matplotlib import pyplot as plt
@torch.no_grad()
def extract_features(batch_generator, inception, args):
n_batches = args.n_sample // args.batch_size
features = []
for _ in tqdm(range(n_batches)):
img = batch_generator.generate_fid()
feat = inception(img)[0].view(img.shape[0], -1)
features.append(feat.to("cpu"))
features = torch.cat(features, 0)
return features.numpy()
def calc_fid(sample_mean, sample_cov, real_mean, real_cov, eps=1e-6):
cov_sqrt, _ = linalg.sqrtm(sample_cov @ real_cov, disp=False)
if not np.isfinite(cov_sqrt).all():
print('product of cov matrices is singular')
offset = np.eye(sample_cov.shape[0]) * eps
cov_sqrt = linalg.sqrtm((sample_cov + offset) @ (real_cov + offset))
if np.iscomplexobj(cov_sqrt):
if not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=1e-3):
m = np.max(np.abs(cov_sqrt.imag))
raise ValueError(f'Imaginary component {m}')
cov_sqrt = cov_sqrt.real
mean_diff = sample_mean - real_mean
mean_norm = mean_diff @ mean_diff
trace = np.trace(sample_cov) + np.trace(real_cov) - 2 * np.trace(cov_sqrt)
fid = mean_norm + trace
return fid
if __name__ == '__main__':
from utils_metrics import load_args
args = load_args()
# assertations
assert 'ckpt_dir' in args.__dict__
assert 'inception' in args.__dict__
assert 'device' in args.__dict__
assert 'n_sample' in args.__dict__
assert 'batch_size' in args.__dict__
import sys
if 'cookgan' in args.ckpt_dir:
sys.path.append('../cookgan/')
from generate_batch import BatchGenerator
device = args.device
print(f'load real image statistics from {args.inception}')
with open(args.inception, 'rb') as f:
embeds = pickle.load(f)
real_mean = embeds['mean']
real_cov = embeds['cov']
filename = os.path.join(args.ckpt_dir, f'fid_{args.n_sample}.csv')
# load values that are already computed
computed = []
if os.path.exists(filename):
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
computed += [row[0]]
# prepare to write
f = open(filename, mode='a')
writer = csv.writer(f, delimiter=',')
# load inception model
inception = load_patched_inception_v3()
inception = inception.eval().to(device)
ckpt_paths = glob(os.path.join(args.ckpt_dir, '*.ckpt')) + glob(os.path.join(args.ckpt_dir, '*.pt'))+glob(os.path.join(args.ckpt_dir, '*.pth'))
ckpt_paths = sorted(ckpt_paths)
print('records:', ckpt_paths)
print('computed:', computed)
for ckpt_path in ckpt_paths:
print()
print(f'working on {ckpt_path}')
iteration = os.path.basename(ckpt_path).split('.')[0]
if iteration in computed:
print('already computed')
continue
args.ckpt_path = ckpt_path
batch_generator = BatchGenerator(args)
features = extract_features(batch_generator, inception, args)
print(f'extracted {features.shape[0]} features')
sample_mean = np.mean(features, 0)
sample_cov = np.cov(features, rowvar=False)
fid = calc_fid(sample_mean, sample_cov, real_mean, real_cov)
print(f'{iteration}, fid={fid}')
writer.writerow([iteration, fid])
f.close()
fids = []
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
fid = float(row[1])
fids += [fid]
fig = plt.figure(figsize=(6,6))
plt.plot(fids)
plt.savefig(os.path.join(args.ckpt_dir, f'fid_{args.n_sample}.png'))
|
StarcoderdataPython
|
1807399
|
<filename>torchbenchmark/util/model.py
import json
import os
import pandas as pd
import typing
from collections.abc import Iterable
import torch
from contextlib import contextmanager
import warnings
import inspect
import os
@contextmanager
def no_grad(val):
"""Some meta-learning models (e.g. maml) may need to train a target(another) model
in inference runs
"""
old_state = torch.is_grad_enabled()
try:
torch.set_grad_enabled(not val)
yield
finally:
torch.set_grad_enabled(old_state)
class BenchmarkModel():
"""
A base class for adding models to torch benchmark.
See [Adding Models](#../models/ADDING_MODELS.md)
"""
def __init__(self, *args, **kwargs):
pass
def train(self):
raise NotImplementedError()
def eval(self):
raise NotImplementedError()
def set_eval(self):
self._set_mode(False)
def set_train(self):
self._set_mode(True)
def eval_in_nograd(self):
return True
def _set_mode(self, train):
(model, _) = self.get_module()
model.train(train)
def check_opt_vs_noopt_jit(self):
if not self.jit:
return
model_name = inspect.getfile(self.__class__).split(os.sep)[-2]
print(f"model_name={model_name} , {inspect.getfile(self.__class__)}")
model_blacklist = [
'demucs', # set up issue
'yolov3', # set up issue
'BERT_pytorch', # set up issue
'moco', # set up issue
'Super_SloMo', # results don't match, might be due to the way TE CUDA handles rand?
'attention_is_all_you_need_pytorch', # results don't match, might be due to the way TE CUDA handles rand?
]
if model_name in model_blacklist:
warnings.warn(UserWarning(f"{model_name}.get_module() doesn't support `check_results` yet!"))
return
# if a model doesn't support `get_module`
# we should let it throw and then
# override `check_results` for that model
try:
model, inputs = self.get_module()
except NotImplementedError:
warnings.warn(UserWarning(f"{model_name}.get_module() doesn't support `check_results` yet!"))
return
def bench_allclose(a, b):
if isinstance(a, torch.Tensor):
assert(isinstance(b, torch.Tensor))
assert(a.allclose(b))
elif isinstance(a, tuple) or isinstance (b, list):
assert(type(a) == type(b))
assert(len(a) == len(b))
for i in range(len(a)):
bench_allclose(a[i], b[i])
else:
raise RuntimeError("Encountered an supported type.\n" +
"Please add the type or override `bench_allclose`")
try:
opt = model(*inputs)
except Exception as e:
print(e)
warnings.warn(UserWarning(f"{model_name}.eval() doesn't support `check_results` yet!"))
return
# disable optimizations and force a recompilation
# to a baseline version
fwd = model._c._get_method("forward")
fwd._debug_flush_compilation_cache()
torch._C._set_graph_executor_optimize(False)
base = model(*inputs)
torch._C._set_graph_executor_optimize(True)
bench_allclose(base, opt)
|
StarcoderdataPython
|
1886497
|
<reponame>shaun95/google-research<filename>symbolic_functionals/syfes/xc/mgga_test.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xc.mgga."""
import tempfile
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
from pyscf.dft import libxc
import pyscf.gto
from pyscf.lib import parameters
from symbolic_functionals.syfes.xc import mgga
jax.config.update('jax_enable_x64', True)
class XCMGGATest(parameterized.TestCase):
def setUp(self):
super().setUp()
parameters.TMPDIR = tempfile.mkdtemp(dir=flags.FLAGS.test_tmpdir)
mol = pyscf.gto.M(
atom="""O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587
""",
basis='def2svpd',
verbose=1)
ks = pyscf.dft.RKS(mol)
ks.xc = 'pbe,pbe'
ks.kernel()
ao = ks._numint.eval_ao(ks.mol, coords=ks.grids.coords, deriv=2)
self.weights = ks.grids.weights
self.rho_and_derivs = ks._numint.eval_rho2(
ks.mol, ao, mo_coeff=ks.mo_coeff, mo_occ=ks.mo_occ, xctype='MGGA')
self.rho, gradx, grady, gradz, self.lapl, self.tau = self.rho_and_derivs
self.sigma = gradx**2 + grady**2 + gradz**2
# construct a spin polarized density to test spin polarized case
zeta = 0.2
self.rho_and_derivs_a = 0.5 * (1 + zeta) * self.rho_and_derivs
self.rho_and_derivs_b = 0.5 * (1 - zeta) * self.rho_and_derivs
self.rhoa, self.rhob = self.rho_and_derivs_a[0], self.rho_and_derivs_b[0]
self.sigma_aa = (0.5 * (1 + zeta))**2 * self.sigma
self.sigma_ab = ((0.5 * (1 + zeta)) * (0.5 * (1 - zeta))) * self.sigma
self.sigma_bb = (0.5 * (1 - zeta))**2 * self.sigma
self.tau_a, self.tau_b = self.rho_and_derivs_a[5], self.rho_and_derivs_b[5]
# NOTE(htm): The parameters used in mgga.e_xc_wb97mv_* functions are checked
# againast libxc, but the resulting e_xc shows small deviations from libxc
# results (on the order of 1e-3). The reason for the deviation is uncertain.
# The difference on the integrated E_xc energy is very small (< 1e-5 Hartree).
# In the following tests the tolerance for assert_allclose has taken this
# deviation into account.
@parameterized.parameters(
('hyb_mgga_xc_wb97m_v', mgga.e_xc_wb97mv_unpolarized),
('mgga_xc_b97m_v', mgga.e_xc_b97mv_unpolarized)
)
def test_mgga_xc_unpolarized_against_libxc(self, xc_name, xc_fun):
eps_xc_ref, rhograds_ref, _, _ = libxc.eval_xc(
xc_name, self.rho, spin=0, relativity=0, deriv=1)
vrho_ref, vsigma_ref, _, vtau_ref = rhograds_ref
e_xc_ref = eps_xc_ref * self.rho
e_xc = xc_fun(self.rho, self.sigma, self.tau)
e_xc, grads = jax.vmap(jax.value_and_grad(xc_fun, argnums=(0, 1, 2)))(
self.rho, self.sigma, self.tau)
vrho, vsigma, vtau = grads
np.testing.assert_allclose(
np.sum(e_xc * self.weights), np.sum(e_xc_ref * self.weights), atol=1e-5)
np.testing.assert_allclose(e_xc, e_xc_ref, rtol=1e-2, atol=1.e-12)
np.testing.assert_allclose(vrho, vrho_ref, atol=2e-4)
# TODO(leyley,htm): Fix vsigma with libxc 5.
del vsigma, vsigma_ref
# np.testing.assert_allclose(vsigma, vsigma_ref, atol=1e-2)
np.testing.assert_allclose(vtau, vtau_ref, atol=1e-5)
@parameterized.parameters(
('hyb_mgga_xc_wb97m_v', mgga.e_xc_wb97mv_polarized),
('mgga_xc_b97m_v', mgga.e_xc_b97mv_polarized)
)
def test_wb97xv_polarized_against_libxc(self, xc_name, xc_fun):
eps_xc_ref, rhograds_ref, _, _ = libxc.eval_xc(
xc_name, (self.rho_and_derivs_a, self.rho_and_derivs_b),
spin=1, relativity=0, deriv=1)
e_xc_ref = eps_xc_ref * self.rho
vrho_ref, vsigma_ref, _, vtau_ref = rhograds_ref
e_xc, grads = jax.vmap(jax.value_and_grad(
xc_fun, argnums=(0, 1, 2, 3, 4, 5, 6)))(
self.rhoa, self.rhob,
self.sigma_aa, self.sigma_ab, self.sigma_bb,
self.tau_a, self.tau_b)
vrhoa, vrhob, vsigma_aa, vsigma_ab, vsigma_bb, vtau_a, vtau_b = grads
np.testing.assert_allclose(
np.sum(e_xc * self.weights), np.sum(e_xc_ref * self.weights), atol=1e-5)
np.testing.assert_allclose(e_xc, e_xc_ref, rtol=1e-2, atol=1.e-12)
np.testing.assert_allclose(vrhoa, vrho_ref[:, 0], atol=2e-4)
np.testing.assert_allclose(vrhob, vrho_ref[:, 1], atol=2e-4)
# TODO(leyley,htm): Fix vsigma with libxc 5.
del vsigma_aa, vsigma_bb
# np.testing.assert_allclose(vsigma_aa, vsigma_ref[:, 0], atol=1e-2)
np.testing.assert_allclose(vsigma_ab, vsigma_ref[:, 1], atol=1e-2)
# np.testing.assert_allclose(vsigma_bb, vsigma_ref[:, 2], atol=1e-2)
np.testing.assert_allclose(vtau_a, vtau_ref[:, 0], atol=1e-5)
np.testing.assert_allclose(vtau_b, vtau_ref[:, 1], atol=1e-5)
if __name__ == '__main__':
absltest.main()
|
StarcoderdataPython
|
1681289
|
<gh_stars>0
"""
some utilities for dealing with midi
This uses python package python-midi
"""
import traceback
import math
import json
import midi
import os, glob, sys, string
from midi.events import *
import copy
def is_ascii(s):
return all(ord(c) < 128 for c in s)
import base64
def put(dic, key, val):
if key in dic:
dic[key].append(val)
else:
dic[key] = [val]
class PVEvent:
def clone(self):
return copy.copy(self)
class ProgChangeEvent(PVEvent):
def __init__(self, t0, ch, instrument):
self.t0 = t0
self.channel = ch
self.instrument = instrument
def rescaleTime(self, sf):
self.t0 *= sf
def getT0(self):
return self.t0
def setT0(self, t0):
self.t0 = t0
def toDict(self):
d = {"type": "programChange",
"channel": self.channel,
"instrument": self.instrument,
"t0": self.t0}
return d
def toList(self):
return ["programChange", self.t0, self.channel, self.instrument]
class TempoEvent(PVEvent):
def __init__(self, t0, bpm, mpqn):
self.t0 = t0
self.bpm = bpm
self.mpqn = mpqn
def getT0(self):
return self.t0
def rescaleTime(self, sf):
self.t0 *= sf
def setT0(self, t0):
self.t0 = t0
def toDict(self):
return {"type": "tempo",
"t0": self.t0,
"bpm": self.bpm,
"mpqn": self.mpqn}
def toList(self):
return ["tempo", self.t0, self.bpm, self.mpqn]
class Note(PVEvent):
def __init__(self, channel, pitch, t0, velocity, dur=None):
self.channel = channel
self.pitch = pitch
self.t0 = t0
self.velocity = velocity
self.dur = dur
self.parts = [[t0,velocity]]
if dur:
self.finish(t0+dur)
def rescaleTime(self, sf):
t0 = self.getT0()
self.t0 = sf*t0
self.dur *= sf
for part in self.parts:
part[0] = sf*t0
def setT0(self, t0):
self.t0 = t0
self.parts[0][0] = t0
def setVelocity(self, v):
self.velocity = v
self.parts[0][1] = v
def getT0(self):
return self.parts[0][0]
def extend(self, t, velocity):
self.parts.append([t,velocity])
def finish(self, t):
self.parts.append([t,0])
def toDict(self):
t0 = self.parts[0][0]
v = self.parts[0][1]
dur = self.parts[-1][0] - t0
d = {"type": "note",
"channel": self.channel,
"t0": t0,
"pitch": self.pitch,
"dur": dur,
"v": v}
if len(self.parts) != 2:
print "Note with other than less than 2 parts"
d['parts'] = self.parts
return d
def toList(self):
t0 = self.parts[0][0]
v = self.parts[0][1]
dur = self.parts[-1][0] - t0
lst = ["note", t0, self.channel, self.pitch, v, dur]
if len(self.parts) != 2:
print "Note with %d parts" % len(self.parts)
lst.append(self.parts)
return lst
class TrackObj:
def __init__(self, trackOrPath=None, trackName=None):
self.tMax = None
self.trackName = trackName
self.events = {}
self.instrument = None
self.channels = set()
self.instruments = set()
if trackOrPath == None:
return
if type(trackOrPath) in [type("str"), type(u"str")]:
self.loadJSON(trackOrPath)
else:
self.observeTrack(trackOrPath)
def merge(self, tobj):
"""
Copy all notes from other track to play in parallel.
"""
self.addEvents(tobj.allEvents())
self.instruments = self.instruments.union(tobj.instruments)
self.channels = self.channels.union(tobj.channels)
def append(self, tobj):
tMax = self.getMaxTime()
notes = tobj.allNotes()
for note in notes:
nnote = Note(note.channel, note.pitch, note.t0+tMax, note.velocity, note.dur)
self.addNote(nnote)
self.tMax = tMax + tobj.getMaxTime()
def getNumNotes(self):
return len(self.allNotes())
def getMinTime(self):
if not self.events:
return 0
return min(self.events.keys())
def getDur(self):
return self.getMaxTime()
def setMaxTime(self, tMax):
self.tMax = tMax
def getMaxTime(self):
if self.tMax != None:
return self.tMax
if not self.events:
return 0
times = self.events.keys()
times.sort()
t1 = times[-1]
tMax = t1
t = tMax
#print "t1:",t1
for evt in self.events[t1]:
if isinstance(evt, Note):
#print evt, evt.dur
if evt.dur:
t = t1 + evt.dur
tMax = max(tMax, t)
self.tMax = tMax
return t
def scalePower(self, s0, s1=None):
t0 = self.getMinTime()
t1 = self.getMaxTime()
dur = t1-t0
print "t0:", t0, " t1:", t1, " dur:", dur
if s1 == None:
s1 = s0
for note in self.allNotes():
t = note.t0
f = (t - t0)/float(dur)
s = s0 + f*(s1-s0)
v = int(note.velocity * s)
print "%5d %6.4f %3d -> %3d" % (t, s, note.velocity, v)
note.setVelocity(v)
def scalePowerBySin(self):
t0 = self.getMinTime()
t1 = self.getMaxTime()
dur = t1-t0
print "t0:", t0, " t1:", t1, " dur:", dur
for note in self.allNotes():
t = note.t0
f = (t - t0)/float(dur)
s = math.sin(f*math.pi)
v = int(note.velocity * s)
print "%5d %6.4f %3d -> %3d" % (t, s, note.velocity, v)
note.setVelocity(v)
def getRescaleTimeMap(self, s0=1, s1=2, maxTime=None):
if maxTime == None:
maxTime = self.getMaxTime()
maxTime = int(maxTime)
j = 0
tmap = {}
for i in range(maxTime+1):
s = s0 + (s1-s0)*i/(maxTime-1.0)
j += 1/s
#print "%5d %7.2f %7.2f" % (i, s, j)
tmap[i] = int(math.floor(j))
return tmap
def rescaleByTimeMap(self, tmap=None):
if tmap == None:
tmap = self.getRescaleTimeMap()
tobj = TrackObj()
keys = self.events.keys()
keys.sort()
for t in keys:
nt = tmap[t]
for note in self.events[t]:
nnote = Note(note.channel, note.pitch, nt, note.velocity, note.dur)
tobj.addNote(nnote)
return tobj
def rescaleTime(self, sf):
tMax = self.getMaxTime()
self.setMaxTime(sf*tMax)
events = self.allEvents()
self.events = {}
for evt in events:
evt.rescaleTime(sf)
self.addEvent(evt)
def allNotes(self):
notes = []
for ev in self.events.values():
for e in ev:
if isinstance(e, Note):
notes.append(e)
return notes
def allEvents(self):
events = []
for ev in self.events.values():
for e in ev:
events.append(e)
return events
def addEvents(self, evts):
for ev in evts:
self.addEvent(note)
def addNote(self, note):
self.addEvent(note)
def addEvent(self, evt):
t0 = evt.getT0()
if t0 in self.events:
self.events[t0].append(evt)
else:
self.events[t0] = [evt]
def addTempoEvent(self, tempoEvent):
self.addEvent(tempoEvent)
def addProgramChangeEvent(self, event):
self.addEvent(event)
def observeTrack(self, track):
tn = 0
openNotes = {}
for evt in track:
tick = evt.tick
tn += tick
#print "tn: %s tick: %s evt: %s" % (tn, tick, evt)
if isinstance(evt, NoteEvent):
ch = evt.channel
#if ch != 0:
# print "ch:", ch
pitch = evt.pitch
v = evt.velocity
#print tn, evt.name, evt.pitch, evt.velocity
if isinstance(evt, NoteOnEvent):
self.channels.add(ch)
if pitch in openNotes:
if openNotes[pitch].parts[-1] == [tn,v]:
print "*** ignoring redundant NoteOn ****"
else:
openNotes[pitch].extend(tn,v)
if v != 0:
print "*** extending note", openNotes[pitch].parts
else:
if v == 0:
print "**** Warning ignoring note with velocity 0 ****"
continue
openNotes[pitch] = Note(ch, pitch, tn, v)
if v == 0:
note = openNotes[pitch]
if note.dur == None:
note.dur = tn - note.t0
#print "added note.dur", note.dur
self.addNote(note)
del openNotes[pitch]
elif isinstance(evt, NoteOffEvent):
if v != 64:
print "NoteOff with v != 64"
if pitch in openNotes:
note = openNotes[pitch]
note.finish(tn)
self.addEvent(note)
del openNotes[pitch]
else:
print "NoteOff for unstarted note"
else:
print "Unexpected note type", evt.name
elif isinstance(evt, TrackNameEvent):
print "TrackName", evt.text
if is_ascii(evt.text):
self.trackName = evt.text
else:
print "**** Non ascii name", evt.text
elif isinstance(evt, PitchWheelEvent):
#print "PitchWheel", evt.pitch
pass
elif isinstance(evt, ProgramChangeEvent):
ch = evt.channel
instrument = evt.value
self.channels.add(ch)
self.instruments.add(instrument)
print "ProgramChange", ch, instrument
self.addProgramChangeEvent(ProgChangeEvent(tn, ch, evt.value))
#if self.instrument != None and self.instrument != evt.value:
# print "**** Changing instrument within track"
self.instrument = evt.value
elif isinstance(evt, TimeSignatureEvent):
n = evt.numerator
d = evt.denominator
met = evt.metronome
s30 = evt.thirtyseconds
print "TimeSignature %s/%s met: %s s30: %s" % \
(n,d, met, s30)
elif isinstance(evt, SetTempoEvent):
bpm = evt.bpm
mpqn = evt.mpqn
print "TempoEvent bpm: %s mpqn: %s" % (bpm, mpqn)
self.addTempoEvent(TempoEvent(tn, bpm, mpqn))
elif isinstance(evt, EndOfTrackEvent):
print "End of track"
elif isinstance(evt, ControlChangeEvent):
print "ControlChange"
pass
else:
print "Unrecognized event type:", evt.name
#
# Now must close any open notes
for pitch in openNotes:
note = openNotes[pitch]
note.finish(tn)
self.addNote(note)
def toDict(self):
seq = []
keys = self.events.keys()
keys.sort()
for t in keys:
eventsAtT = []
evts = self.events[t]
for evt in evts:
eventsAtT.append(evt.toDict())
seq.append([t, eventsAtT])
obj = {'type': 'TrackObj',
'seq': seq}
obj['numNotes'] = self.getNumNotes()
obj['instruments'] = list(self.instruments)
obj['channels'] = list(self.channels)
if self.instrument != None:
obj['instrument'] = self.instrument
if self.trackName:
obj['trackName'] = self.trackName
if self.tMax != None:
obj['tMax'] = self.tMax
return obj
def saveAsJSON(self, path):
print "Saving TrackObj to", path
json.dump(self.toDict(), file(path, "w"),indent=4, sort_keys=True)
def loadJSON(self, path):
print "Loading TrackObj from", path
obj = json.load(file(path))
if 'tMax' in obj:
self.tMax = obj['tMax']
evList = obj['seq']
for ev in evList:
t, noteList = ev
notes = []
for n in noteList:
#note = Note(n[2], n[1], n[3], n[4])
note = Note(n[1], n[3], n[2], n[4], n[5])
notes.append(note)
self.events[t] = notes
def dump(self):
print "%d complete notes" % len(self.notes)
i = 0
for note in self.notes:
print note
if i >= 10:
break
class MidiObj:
def __init__(self, path=None, resolution=1000):
self.tracks = []
self.instruments = set()
self.channels = set()
self.resolution = resolution # this is ticksPerBeat
self.bpm = 100
self.format = 1
self.loop = False
if path:
self.load(path)
def setResolution(self, r):
self.resolution = r
def setBPM(self, bpm):
self.bpm = bpm
def rescaleTime(self, sf):
for track in self.tracks:
track.rescaleTime(sf)
def getTicksPerSec(self):
# resolution is ticksPerBeat
return self.resolution*self.bpm/60.0
def ticksFromTime(self, t):
return t*self.getTicksPerSec()
def timeFromTicks(self, ticks):
return ticks/self.getTicksPerSec()
def dumpInfo(self):
print "Num tracks", len(self.tracks)
i = 0
print "Num Name Notes Dur Instruments Channels"
for track in self.tracks:
i += 1
print "%3d %-18s %4d %7d %-18s %-18s" % \
(i, track.trackName, track.getNumNotes(),
track.getDur(), list(track.instruments),
list(track.channels))
def load(self, midiPath):
pattern = midi.read_midifile(midiPath)
self.resolution = pattern.resolution
self.format = pattern.format
print "pattern.resolution:", pattern.resolution
print "pattern.format:", pattern.format
i = 0;
print type(pattern)
ntracks = len(pattern)
print "ntracks:", ntracks
for track in pattern:
i += 1
trackName = "Track%d" % i
#if ntracks > 1:
# jpath = path.replace(".mid", "%d.json" % i)
print trackName
self.addTrack(TrackObj(track, trackName))
def addTrack(self, trackObj):
self.instruments = self.instruments.union(trackObj.instruments)
self.channels = self.channels.union(trackObj.channels)
self.tracks.append(trackObj)
def toDict(self):
return {'type': 'MidiObj',
'instruments': list(self.instruments),
'channels': list(self.channels),
'format': self.format,
'resolution': self.resolution,
'loop': self.loop,
'tracks': map(TrackObj.toDict, self.tracks)}
def saveAsJSON(self, jpath):
print "Save MidiObj to", jpath
json.dump(self.toDict(), file(jpath, "w"), indent=4)
def saveAsMidi(self, mpath, loop=False):
print "Save MidiObj to", mpath
pattern = midi.Pattern(resolution=self.resolution)
for track in self.tracks:
mtrack = midi.Track()
mevents = {}
#put(mevents, 0, midi.ProgramChangeEvent(channel=0, tick=0, value=0))
for ev in track.allEvents():
if isinstance(ev, ProgChangeEvent):
t = ev.t0
me = midi.ProgramChangeEvent(channel=ev.channel, tick=0, value=ev.instrument)
put(mevents, t, me)
elif isinstance(ev, Note):
t = ev.t0
dur = ev.dur
me = midi.NoteOnEvent(channel=ev.channel, tick=0, velocity=ev.velocity, pitch=ev.pitch)
put(mevents, t, me)
me = midi.NoteOffEvent(channel=ev.channel, tick=0, pitch=ev.pitch)
put(mevents, t+dur, me)
else:
continue
tvals = mevents.keys()
tvals.sort()
prevT = 0
for t in tvals:
dt = t - prevT
prevT = t
mevs = mevents[t]
i = 0
for mev in mevs:
i += 1
mev.tick = 0
if i == 1:
mev.tick = int(dt)
mtrack.append(mev)
if loop:
mtrack.append(midi.TrackLoopEvent(tick=0))
mtrack.append(midi.EndOfTrackEvent(tick=0))
pattern.append(mtrack)
#print pattern
midi.write_midifile(mpath, pattern)
def convertToJSON(path, jpath=None):
if not jpath:
jpath = path.replace(".mid", ".json")
midiObj = MidiObj(path)
midiObj.dumpInfo()
midiObj.saveAsJSON(jpath)
"""
Convert one of the base64 coded data urls found
in euphomy tracks directory to proper .mid file.
"""
def convert(b64path, mpath):
str = file(b64path).read()
i = str.find(",")
data = str[i+1:]
mid = base64.b64decode(data)
file(mpath,"wb").write(mid)
def processAll(force=False):
#fnames = os.listdir(".")
fnames = glob.glob("*") + glob.glob("*/*")
ids = {}
for fname in fnames:
fname = fname.replace("\\", "/")
name, ext = os.path.splitext(fname)
ext = ext.lower()
if ext not in [".mb64", ".mid"]:
continue
ids[name] = fname
if ext == ".mb64" and os.path.exists(name+".mid"):
print "Skipping conversion to .mid"
continue
print name
if os.path.exists(name+".json") and not force:
print "Skipping %s because .json exists" % name
continue
process(fname, force)
names = ids.keys()
names.sort()
json.dump(names, file("compositions.json", "w"), indent=4)
def process(path=None, force=False):
if path == None:
return processAll(force)
print "="*64
print "path:", path
if path.endswith(".mb64"):
mpath = path.replace(".mb64", ".mid")
convert(path, mpath)
path = mpath
jpath = path.replace(".mid", ".json")
if force or not os.path.exists(jpath):
convertToJSON(path)
#pattern = midi.read_midifile(path)
#print pattern
def playMelody(name):
try:
playMelody_(name)
except:
traceback.print_exc()
def playMelody_(name):
import websocket
#ws = websocket.create_connection("ws://echo.websocket.org/")
ws = websocket.create_connection("ws://localhost:8100/")
msg = {'msgType': 'midi.play', 'name': name}
jstr = json.dumps(msg)
ws.send(jstr)
result = ws.recv()
print "result"
def run():
process("cello2.mid", force=True)
return
process("EarthAndSky.mid")
process("DistantDrums.mid")
process("BluesRhythm1.mid")
return
process("shimauta1.mid")
process("minute_waltz.mid")
process("jukebox.mid")
process("beethovenSym5m1.mb64")
process("chopin69.mb64")
process("wtc0.mb64")
process("passac.mid")
if __name__ == '__main__':
run()
|
StarcoderdataPython
|
6461503
|
from pymongo import MongoClient
from bson.objectid import ObjectId
import datetime
import os
MONGODB_URI = os.environ.get('MONGODB_URI', 'mongodb://localhost/fmtrends')
MONGODB_URI = 'mongodb://heroku_gxwl3h33:<EMAIL>:55852/heroku_gxwl3h33'
print 'MONGODB_URI:',MONGODB_URI
mongo_client = MongoClient(MONGODB_URI)
db = mongo_client.get_default_database()
def play_near(play, plays):
minutes = 150
for p in plays:
d = (p['played_at'] - play['played_at']).total_seconds()
if 0 < d and d < (minutes * 60) and p['_id'] is not play['_id']:
# delete p
q = {
'_id': p['_id']
}
# print '\n\n'
# print db.plays.remove(q)
print play
print d
print p
# print '\n\n'
def main():
tracks = db.tracks.find({
'count': {
'$gt': 1
}
})
for track in tracks[:]:
plays = db.plays.find({
'track_id': track['_id']
})
# print track
plays = list(plays)
for play in plays:
d = play['played_at'] - plays[0]['played_at']
play_near(play, plays)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9624360
|
<filename>hlwtadmin/migrations/0016_auto_20200225_1006.py
# Generated by Django 3.0 on 2020-02-25 09:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hlwtadmin', '0015_auto_20200224_1447'),
]
operations = [
migrations.AlterField(
model_name='historicalorganisation',
name='active',
field=models.BooleanField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalorganisation',
name='verified',
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.AlterField(
model_name='organisation',
name='active',
field=models.BooleanField(blank=True, null=True),
),
migrations.AlterField(
model_name='organisation',
name='verified',
field=models.BooleanField(blank=True, default=False, null=True),
),
]
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.