blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37d1cdab1ee696f8552bdf74cc8f123c227bb56b | 9ab8eab22ce622f5128bd4e2f96f1c3088602b8e | /journals/tests.py | 81c5c7e9a0953ac92112670b5a333ce02f5d66f7 | [] | no_license | ThiraTheNerd/myblog | f72e76f09002255365bf6cc49b4a3be8f7c19e10 | 876715566201fbd553d1539b5deeb3057f2252a2 | refs/heads/master | 2023-07-31T20:56:37.594433 | 2021-09-21T06:18:22 | 2021-09-21T06:18:22 | 408,414,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | from django.test import TestCase
from .models import Editor,Post,Comment,tags
# Create your tests here.
class EditorTestClass(TestCase):
#setup method
def setUp(self):
self.editor1 = Editor(username = "JohnTheDon" , first_name = "John", last_name = "Clifford", email = '[email protected]', profile_pic = 'print.jpg')
def test_instance(self):
self.assertTrue(isinstance(self.editor1, Editor)) | [
"[email protected]"
] | |
a9f117c2a1de07cab83bd0941826967b22984c90 | 017e33c898ff6f11cde9d2214c4bed6e030580f4 | /tictactoe/tictactoe/views.py | 3f6841d40f93be9c58aaf83dc0154b29fedcb5b5 | [] | no_license | harry100/tictactoe | e9ad8d19dd51fb8d51a8501b2a3bb3df71c229d5 | 2069af5a6f577edb4c3ca6aef05841f2b58c3030 | refs/heads/master | 2020-05-20T06:36:45.493203 | 2019-05-16T13:12:37 | 2019-05-16T13:12:37 | 185,432,480 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | from django.shortcuts import render, redirect
def welcome(request):
if request.user.is_authenticated:
return redirect('player_home')
return render(request, 'tictactoe/welcome.html', {'title': 'Welcome'})
| [
"[email protected]"
] | |
0a19173af67ce08b82b31af183ae1dd6e4f0520f | 6804a73850284947f999c6610d75678968ba1cd7 | /problems/exercism/python/raindrops/raindrops.py | 9da437b432340e1938c4e857a16fe95e165b9c88 | [] | no_license | atriple/Programming-Practice | ffa6745ac87577861c09bf2fa5ab97fdfe100f54 | 5abdb1750e088db9565247870f97a0a800b930c5 | refs/heads/master | 2021-06-26T07:19:53.492591 | 2020-11-03T16:33:24 | 2020-11-03T16:33:24 | 175,411,014 | 0 | 0 | null | 2020-10-18T03:46:04 | 2019-03-13T11:52:46 | JavaScript | UTF-8 | Python | false | false | 298 | py | def convert(number):
raindrops = ''
if number % 3 == 0 :
raindrops = raindrops + 'Pling'
if number % 5 == 0 :
raindrops = raindrops + 'Plang'
if number % 7 == 0 :
raindrops = raindrops + 'Plong'
return str(number) if raindrops == '' else raindrops | [
"[email protected]"
] | |
832ac85d470e5d42fb562d62709a205791dbb1ec | 61d3b368fe222b6bbb8b32f9fad217f0a1a0f2b7 | /IMU/berryIMU.py | 182d4c478a56c2eb32b1a115e26a904c013e41b0 | [] | no_license | Ddeluwa/rpi_rotator | 372153d25a6246c9cff7e653d86193d2d8e36dc7 | 07c3092d7ff328424de43e009fe0d1b99461fd89 | refs/heads/master | 2023-08-22T21:43:29.793178 | 2021-10-21T06:39:39 | 2021-10-21T06:39:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,591 | py | #!/usr/bin/python
#
# This program includes a number of calculations to improve the
# values returned from a BerryIMU. If this is new to you, it
# may be worthwhile first to look at berryIMU-simple.py, which
# has a much more simplified version of code which is easier
# to read.
#
#
# The BerryIMUv1, BerryIMUv2 and BerryIMUv3 are supported
#
# This script is python 2.7 and 3 compatible
#
# Feel free to do whatever you like with this code.
# Distributed as-is; no warranty is given.
#
# https://ozzmaker.com/berryimu/
import time
import math
import IMU
import datetime
import os
import sys
RAD_TO_DEG = 57.29578
M_PI = 3.14159265358979323846
G_GAIN = 0.070 # [deg/s/LSB] If you change the dps for gyro, you need to update this value accordingly
AA = 0.40 # Complementary filter constant
tmp = 0
################# Compass Calibration values ############
# Use calibrateBerryIMU.py to get calibration values
# Calibrating the compass isnt mandatory, however a calibrated
# compass will result in a more accurate heading value.
magXmin = 0
magYmin = 0
magZmin = 0
magXmax = 0
magYmax = 0
magZmax = 0
'''
Here is an example:
magXmin = -1748
magYmin = -1025
magZmin = -1876
magXmax = 959
magYmax = 1651
magZmax = 708
Dont use the above values, these are just an example.
'''
############### END Calibration offsets #################
#Kalman filter variables
Q_angle = 0.02
Q_gyro = 0.0015
R_angle = 0.005
y_bias = 0.0
x_bias = 0.0
XP_00 = 0.0
XP_01 = 0.0
XP_10 = 0.0
XP_11 = 0.0
YP_00 = 0.0
YP_01 = 0.0
YP_10 = 0.0
YP_11 = 0.0
KFangleX = 0.0
KFangleY = 0.0
def kalmanFilterY ( accAngle, gyroRate, DT):
y=0.0
S=0.0
global KFangleY
global Q_angle
global Q_gyro
global y_bias
global YP_00
global YP_01
global YP_10
global YP_11
KFangleY = KFangleY + DT * (gyroRate - y_bias)
YP_00 = YP_00 + ( - DT * (YP_10 + YP_01) + Q_angle * DT )
YP_01 = YP_01 + ( - DT * YP_11 )
YP_10 = YP_10 + ( - DT * YP_11 )
YP_11 = YP_11 + ( + Q_gyro * DT )
y = accAngle - KFangleY
S = YP_00 + R_angle
K_0 = YP_00 / S
K_1 = YP_10 / S
KFangleY = KFangleY + ( K_0 * y )
y_bias = y_bias + ( K_1 * y )
YP_00 = YP_00 - ( K_0 * YP_00 )
YP_01 = YP_01 - ( K_0 * YP_01 )
YP_10 = YP_10 - ( K_1 * YP_00 )
YP_11 = YP_11 - ( K_1 * YP_01 )
return KFangleY
def kalmanFilterX ( accAngle, gyroRate, DT):
x=0.0
S=0.0
global KFangleX
global Q_angle
global Q_gyro
global x_bias
global XP_00
global XP_01
global XP_10
global XP_11
KFangleX = KFangleX + DT * (gyroRate - x_bias)
XP_00 = XP_00 + ( - DT * (XP_10 + XP_01) + Q_angle * DT )
XP_01 = XP_01 + ( - DT * XP_11 )
XP_10 = XP_10 + ( - DT * XP_11 )
XP_11 = XP_11 + ( + Q_gyro * DT )
x = accAngle - KFangleX
S = XP_00 + R_angle
K_0 = XP_00 / S
K_1 = XP_10 / S
KFangleX = KFangleX + ( K_0 * x )
x_bias = x_bias + ( K_1 * x )
XP_00 = XP_00 - ( K_0 * XP_00 )
XP_01 = XP_01 - ( K_0 * XP_01 )
XP_10 = XP_10 - ( K_1 * XP_00 )
XP_11 = XP_11 - ( K_1 * XP_01 )
return KFangleX
IMU.detectIMU() #Detect if BerryIMU is connected.
if(IMU.BerryIMUversion == 99):
print(" No BerryIMU found... exiting ")
sys.exit()
IMU.initIMU() #Initialise the accelerometer, gyroscope and compass
gyroXangle = 0.0
gyroYangle = 0.0
gyroZangle = 0.0
CFangleX = 0.0
CFangleY = 0.0
kalmanX = 0.0
kalmanY = 0.0
a = datetime.datetime.now()
while True:
fileIMU = open("./heading.txt",'a')
#Read the accelerometer,gyroscope and magnetometer values
ACCx = IMU.readACCx()
ACCy = IMU.readACCy()
ACCz = IMU.readACCz()
GYRx = IMU.readGYRx()
GYRy = IMU.readGYRy()
GYRz = IMU.readGYRz()
MAGx = IMU.readMAGx()
MAGy = IMU.readMAGy()
MAGz = IMU.readMAGz()
#Apply compass calibration
MAGx -= (magXmin + magXmax) /2
MAGy -= (magYmin + magYmax) /2
MAGz -= (magZmin + magZmax) /2
##Calculate loop Period(LP). How long between Gyro Reads
b = datetime.datetime.now() - a
a = datetime.datetime.now()
LP = b.microseconds/(1000000*1.0)
#Convert Gyro raw to degrees per second
rate_gyr_x = GYRx * G_GAIN
rate_gyr_y = GYRy * G_GAIN
rate_gyr_z = GYRz * G_GAIN
#Calculate the angles from the gyro.
gyroXangle+=rate_gyr_x*LP
gyroYangle+=rate_gyr_y*LP
gyroZangle+=rate_gyr_z*LP
#Convert Accelerometer values to degrees
AccXangle = (math.atan2(ACCy,ACCz)*RAD_TO_DEG)
AccYangle = (math.atan2(ACCz,ACCx)+M_PI)*RAD_TO_DEG
#convert the values to -180 and +180
if AccYangle > 90:
AccYangle -= 270.0
else:
AccYangle += 90.0
#Complementary filter used to combine the accelerometer and gyro values.
CFangleX=AA*(CFangleX+rate_gyr_x*LP) +(1 - AA) * AccXangle
CFangleY=AA*(CFangleY+rate_gyr_y*LP) +(1 - AA) * AccYangle
#Kalman filter used to combine the accelerometer and gyro values.
kalmanY = kalmanFilterY(AccYangle, rate_gyr_y,LP)
kalmanX = kalmanFilterX(AccXangle, rate_gyr_x,LP)
#Calculate heading
heading = 180 * math.atan2(MAGy,MAGx)/M_PI
#Only have our heading between 0 and 360
if heading < 0:
heading += 360
if abs(tmp-heading) <= 5 :
tmp = heading
elif abs(tmp-heading) > 5 :
tmp = heading
fileIMU.write(str(heading)+"\n")
#slow program down a bit, makes the output more readable
time.sleep(0.5)
fileIMU.close()
| [
"[email protected]"
] | |
94d16670486958d751dffd735301e0e2e8992843 | 17883f75b816c84b67f512ca41493bfb8915f7f1 | /src/scripts/themis/metaprograms/sender_completion_times/sender_completion_times.py | 0453ee2fac9b4c81441138ff51c8e7dad23d334d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | alexras/themis_tritonsort | ff5c1c44e11c0c5a711e3f886ac5bd80e5102f3b | cd1f6dbf93978471de1d0beb4b026625787edd9c | refs/heads/master | 2022-09-30T20:56:42.009833 | 2017-11-02T20:45:37 | 2017-11-02T20:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | #!/usr/bin/env python
import os, sys, argparse, re, numpy
started_regex = re.compile(r"^\[STATUS\] Started .*\((\d+)\)")
closed_regex = re.compile(r"^\[STATUS\] sender (\d+) closing connection (\d+) to peer (\d+)\((\d+)\)")
def sender_completion_times(log_file):
start_time = None
completion_time_dict = {}
completed_connections = []
with open(log_file, "r") as fp:
for line in fp:
line = line.strip()
if started_regex.match(line) is not None:
matched_groups = started_regex.match(line)
start_time = int(matched_groups.group(1))
elif closed_regex.match(line) is not None:
matched_groups = closed_regex.match(line)
sender = int(matched_groups.group(1))
connection = int(matched_groups.group(2))
peer = int(matched_groups.group(3))
closed_time = int(matched_groups.group(4))
if sender not in completion_time_dict:
completion_time_dict[sender] = {}
if peer not in completion_time_dict[sender]:
completion_time_dict[sender][peer] = {}
# Convert to seconds
completion_time = (closed_time - start_time) / 1000000.0
completion_time_dict[sender][peer][connection] = completion_time
# Store as a tuple so we can rank order connections
completed_connections.append(
(completion_time, sender, peer, connection))
# Print all completion times
print "Sender completion times:"
for sender in completion_time_dict:
for peer in completion_time_dict[sender]:
for connection in completion_time_dict[sender][peer]:
completion_time = completion_time_dict[sender][peer][connection]
print "Sender %d, Peer %d, Connection %d: %f" % (
sender, peer, connection, completion_time)
# Now sort the 4-tuples and print in ascending order.
completed_connections = sorted(
completed_connections, key=lambda connection_tuple: connection_tuple[0])
print ""
print "Rank ordered completion times:"
for index, (completion_time, sender, peer, connection) in \
enumerate(completed_connections):
print "#%d: Sender %d, Peer %d, Connection %d: %f" % (
index, sender, peer, connection, completion_time)
print ""
print "Completion time statistics"
# Get some basic statistics about the completion times, including aggregate
# mean, median and per-peer mean/median
num_peers = len(completion_time_dict[0])
completion_times = [x[0] for x in completed_connections]
print "Overall mean completion time: %f" % numpy.mean(completion_times)
print "Overall median completion time: %f" % numpy.median(completion_times)
# Get per peer statistics
for peer in xrange(num_peers):
completion_times = [x[0] for x in completed_connections if x[2] == peer]
print "Receiving peer %d mean completion time: %f" % (
peer, numpy.mean(completion_times))
print "Receiving peer %d median completion time: %f" % (
peer, numpy.median(completion_times))
def main():
parser = argparse.ArgumentParser(
description="display sender socket completion times")
parser.add_argument("log_file", help="a log file to check for completion "
"times")
args = parser.parse_args()
return sender_completion_times(**vars(args))
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
ad5357cbbee3d5cb6b8abc109655c3f528dc05ab | a9a16c414d7370b2ca6442b8125d6b6f9b3d6556 | /chapter_04_Date_and_Time/03_Extracting_Information.py | e998f380786111aef92954588070daeb7428402b | [] | no_license | dennisnderitu254/CodeCademy-Py | 99f1cb9fa011f1586d543650c5001de17f04b8b2 | 758067dc53fdb442ab18dd922dacd13cc8846ebb | refs/heads/master | 2021-07-12T10:01:58.854222 | 2017-10-12T20:03:56 | 2017-10-12T20:03:56 | 106,739,488 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | now = datetime.now()
print now.year
print now.month
print now.day | [
"[email protected]"
] | |
557a25a22ad42e6819d10ff6b720c6d425377be4 | 3cceb0931af9052bf8ecba0ec227545ebd1ad41a | /hw01/DC02_03_decode2_201502023_김민기.py | 61dc2abf65e55c46247405bc8e6054edb61f826c | [] | no_license | minkinew/CNU-2019-Spring-DataCommunication | 99450a3ff555544d307a4df9c28e0305541a4dda | 9ab8ad5f4a4d217867d40ec2742008796487883c | refs/heads/master | 2020-08-11T14:53:25.852812 | 2019-10-20T11:32:31 | 2019-10-20T11:32:31 | 214,581,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,320 | py | from __future__ import print_function
import sys
import wave
from io import StringIO
import alsaaudio
import colorama
import numpy as np
from reedsolo import RSCodec, ReedSolomonError
from termcolor import cprint
from pyfiglet import figlet_format
HANDSHAKE_START_HZ = 4100
HANDSHAKE_END_HZ = 4100 + 1540
START_HZ = 1024
STEP_HZ = 256
BITS = 2
FEC_BYTES = 4
def stereo_to_mono(input_file, output_file):
inp = wave.open(input_file, 'r')
params = list(inp.getparams())
params[0] = 1 # nchannels
params[3] = 0 # nframes
out = wave.open(output_file, 'w')
out.setparams(tuple(params))
frame_rate = inp.getframerate()
frames = inp.readframes(inp.getnframes())
data = np.fromstring(frames, dtype=np.int16)
left = data[0::2]
out.writeframes(left.tostring())
inp.close()
out.close()
def yield_chunks(input_file, interval):
wav = wave.open(input_file)
frame_rate = wav.getframerate()
chunk_size = int(round(frame_rate * interval))
total_size = wav.getnframes()
while True:
chunk = wav.readframes(chunk_size)
if len(chunk) == 0:
return
yield frame_rate, np.fromstring(chunk, dtype=np.int16)
def dominant(frame_rate, chunk):
w = np.fft.fft(chunk)
freqs = np.fft.fftfreq(len(chunk))
peak_coeff = np.argmax(np.abs(w))
peak_freq = freqs[peak_coeff]
return abs(peak_freq * frame_rate) # in Hz
def match(freq1, freq2):
return abs(freq1 - freq2) < 20
def decode_bitchunks(chunk_bits, chunks):
out_bytes = []
next_read_chunk = 0
next_read_bit = 0
byte = 0
bits_left = 8
while next_read_chunk < len(chunks):
can_fill = chunk_bits - next_read_bit
to_fill = min(bits_left, can_fill)
offset = chunk_bits - next_read_bit - to_fill
byte <<= to_fill
shifted = chunks[next_read_chunk] & (((1 << to_fill) - 1) << offset)
byte |= shifted >> offset;
bits_left -= to_fill
next_read_bit += to_fill
if bits_left <= 0:
out_bytes.append(byte)
byte = 0
bits_left = 8
if next_read_bit >= chunk_bits:
next_read_chunk += 1
next_read_bit -= chunk_bits
return out_bytes
def decode_file(input_file, speed):
wav = wave.open(input_file)
if wav.getnchannels() == 2:
mono = StringIO()
stereo_to_mono(input_file, mono)
mono.seek(0)
input_file = mono
wav.close()
offset = 0
for frame_rate, chunk in yield_chunks(input_file, speed / 2):
dom = dominant(frame_rate, chunk)
print("{} => {}".format(offset, dom))
offset += 1
def extract_packet(freqs):
freqs = freqs[::2]
bit_chunks = [int(round((f - START_HZ) / STEP_HZ)) for f in freqs]
bit_chunks = [c for c in bit_chunks[1:] if 0 <= c < (2 ** BITS)]
return bytearray(decode_bitchunks(BITS, bit_chunks))
def display(s):
cprint(figlet_format(s.replace(' ', ' '), font='doom'), 'yellow')
def listen_linux(frame_rate=44100, interval=0.1):
mic = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, device="default")
mic.setchannels(1)
mic.setrate(44100)
mic.setformat(alsaaudio.PCM_FORMAT_S16_LE)
num_frames = int(round((interval / 2) * frame_rate))
mic.setperiodsize(num_frames)
print("start...")
in_packet = False
packet = []
while True:
l, data = mic.read()
if not l:
continue
chunk = np.fromstring(data, dtype=np.int16)
dom = dominant(frame_rate, chunk)
if in_packet and match(dom, HANDSHAKE_END_HZ):
byte_stream = extract_packet(packet)
try:
byte_stream = RSCodec(FEC_BYTES).decode(byte_stream)
byte_stream = byte_stream.decode("utf-8")
display(byte_stream)
except ReedSolomonError as e:
pass
#print("{}: {}".format(e, byte_stream))
packet = []
in_packet = False
elif in_packet:
packet.append(dom)
elif match(dom, HANDSHAKE_START_HZ):
in_packet = True
if __name__ == '__main__':
colorama.init(strip=not sys.stdout.isatty())
#decode_file(sys.argv[1], float(sys.argv[2]))
listen_linux()
| [
"[email protected]"
] | |
c76a2e3b1bfd1cf46459c26c87c7880e3c526200 | 8e431d2e6d9e2b685d03dfb6def0e1aef94f7dc2 | /Lesson7(Game_OOP)/player.py | e4e135d449b612f5412315f52da79d58bac23c53 | [] | no_license | Artem3824/Python_OOP | 9d4c257d0d0952aa5efa14cb49260fc605543427 | d1d354ffe80029897cd10bf32a1f5451136f3dfa | refs/heads/main | 2023-01-11T04:54:50.724472 | 2020-11-10T23:55:22 | 2020-11-10T23:55:22 | 311,809,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | class Player:
health = 100
default_damage = 10
position = [0, 0]
| [
"[email protected]"
] | |
fd838e2debe2321f89d65488f9b3b6a38e02c5f7 | 5e10929633f676c92a4b7b9b36a04a6056af220b | /cifar/ResNet.py | 012d4302a6c050ebc90ad1ef800837e899cfb16c | [] | no_license | Shirosakirukia/bitslice_sparsity | 8dbd5957c11fac09b3968b385ea8208c95fbb35a | adcfbce31ddecf4e440b474e52d5ee24d768498b | refs/heads/master | 2023-06-01T08:22:48.485154 | 2020-10-12T18:22:18 | 2020-10-12T18:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,429 | py | from __future__ import print_function
import math
import nics_fix_pt as nfp
import nics_fix_pt.nn_fix as nnf
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
__all__ = ["resnet"]
BITWIDTH = 8
NEW_BITWIDTH = 6
def _generate_default_fix_cfg(names, scale=0, bitwidth=8, method=0):
return {
n: {
"method": torch.autograd.Variable(
torch.IntTensor(np.array([method])), requires_grad=False
),
"scale": torch.autograd.Variable(
torch.IntTensor(np.array([scale])), requires_grad=False
),
"bitwidth": torch.autograd.Variable(
torch.IntTensor(np.array([bitwidth])), requires_grad=False
),
}
for n in names
}
#################################### ugly version ####################################
def conv3x3(in_planes, out_planes, nf_fix_params, stride=1):
kwargs = {'kernel_size': 3, 'stride': stride, 'padding': 1, 'bias': False}
"3x3 convolution with padding"
return nnf.Conv2d_fix(
in_planes, out_planes, nf_fix_params=nf_fix_params, **kwargs
)
#return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
# padding=1, bias=False)
class BasicBlock(nnf.FixTopModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.downsample = downsample
self.stride = stride
# initialize some fix configurations
self.conv1_fix_params = _generate_default_fix_cfg(
["weight"], method=1, bitwidth=BITWIDTH)
self.conv2_fix_params = _generate_default_fix_cfg(
["weight"], method=1, bitwidth=BITWIDTH)
'''
self.bn1_fix_params = _generate_default_fix_cfg(
["weight", "bias", "running_mean", "running_var"],
method=1, bitwidth=BITWIDTH,
)
self.bn2_fix_params = _generate_default_fix_cfg(
["weight", "bias", "running_mean", "running_var"],
method=1, bitwidth=BITWIDTH,
)
'''
activation_num = 7 if not downsample else 8
self.fix_params = [
_generate_default_fix_cfg(["activation"], method=1, bitwidth=BITWIDTH)
for _ in range(activation_num)
]
# initialize layers with corresponding configurations
self.conv1 = conv3x3(inplanes, planes, self.conv1_fix_params, stride)
self.bn1 = nn.BatchNorm2d(planes)
#self.bn1 = nnf.BatchNorm2d_fix(planes, nf_fix_params=self.bn1_fix_params)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, self.conv2_fix_params)
self.bn2 = nn.BatchNorm2d(planes)
#self.bn2 = nnf.BatchNorm2d_fix(planes, nf_fix_params=self.bn1_fix_params)
# initialize activation fix modules
for i in range(len(self.fix_params)):
setattr(self, "fix"+str(i), nnf.Activation_fix(nf_fix_params=self.fix_params[i]))
# initialize weights
for m in self.modules():
if isinstance(m, nnf.Conv2d_fix):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
#m.bias.data.zero_()
def forward(self, x):
x = self.fix0(x)
residual = x
out = self.fix1(self.conv1(x))
out = self.fix2(self.bn1(out))
out = self.relu(self.fix3(out))
out = self.fix4(self.conv2(out))
out = self.fix5(self.bn2(out))
if self.downsample is not None:
residual = self.fix7(self.downsample(x))
out += residual
out = self.relu(self.fix6(out))
return out
class Bottleneck(nnf.FixTopModule):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.downsample = downsample
self.stride = stride
# initialize some fix configurations
self.conv1_fix_params = _generate_default_fix_cfg(
["weight"], method=1, bitwidth=BITWIDTH)
self.conv2_fix_params = _generate_default_fix_cfg(
["weight"], method=1, bitwidth=BITWIDTH)
self.conv3_fix_params = _generate_default_fix_cfg(
["weight"], method=1, bitwidth=BITWIDTH)
'''
self.bn1_fix_params = _generate_default_fix_cfg(
["weight", "bias", "running_mean", "running_var"],
method=1, bitwidth=BITWIDTH,
)
self.bn2_fix_params = _generate_default_fix_cfg(
["weight", "bias", "running_mean", "running_var"],
method=1, bitwidth=BITWIDTH,
)
self.bn3_fix_params = _generate_default_fix_cfg(
["weight", "bias", "running_mean", "running_var"],
method=1, bitwidth=BITWIDTH,
)
'''
activation_num = 10 if not downsample else 11
self.fix_params = [
_generate_default_fix_cfg(["activation"], method=1, bitwidth=BITWIDTH)
for _ in range(activation_num)
]
# initialize activation fix modules
for i in range(len(self.fix_params)):
setattr(self, "fix"+str(i), nnf.Activation_fix(nf_fix_params=self.fix_params[i]))
'''
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
'''
self.conv1 = nnf.Conv2d_fix(inplanes, planes, kernel_size=1, bias=False, nf_fix_params=self.conv1_fix_params)
#self.bn1 = nnf.BatchNorm2d_fix(planes, nf_fix_params=self.bn1_fix_params)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nnf.Conv2d_fix(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False, nf_fix_params=self.conv2_fix_params)
#self.bn2 = nnf.BatchNorm2d_fix(planes, nf_fix_params=self.bn2_fix_params)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nnf.Conv2d_fix(planes, planes * 4, kernel_size=1, bias=False, nf_fix_params=self.conv3_fix_params)
#self.bn3 = nnf.BatchNorm2d_fix(planes * 4, nf_fix_params=self.bn3_fix_params)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.fix0(x)
residual = x
out = self.fix1(self.conv1(x))
out = self.fix2(self.bn1(out))
out = self.fix3(self.relu(out))
out = self.fix4(self.conv2(out))
out = self.fix5(self.bn2(out))
out = self.fix6(self.relu(out))
out = self.fix7(self.conv3(out))
out = self.fix8(self.bn3(out))
if self.downsample is not None:
residual = self.fix11(self.downsample(x))
out += residual
out = self.fix9(self.relu(out))
return out
class ResNet(nnf.FixTopModule):
def __init__(self, depth, num_classes=10, block_name='BasicBlock'):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
# initialize fix configurations
self.conv1_fix_params = _generate_default_fix_cfg(
["weight"], method=1, bitwidth=BITWIDTH)
self.conv2_fix_params = _generate_default_fix_cfg(
["weight"], method=1, bitwidth=BITWIDTH)
'''
self.bn1_fix_params = _generate_default_fix_cfg(
["weight", "bias", "running_mean", "running_var"],
method=1, bitwidth=BITWIDTH,
)
self.bn2_fix_params = _generate_default_fix_cfg(
["weight", "bias", "running_mean", "running_var"],
method=1, bitwidth=BITWIDTH,
)
'''
self.fc_fix_params = _generate_default_fix_cfg(
["weight", "bias"], method=1, bitwidth=BITWIDTH)
self.fix_params = [
_generate_default_fix_cfg(["activation"], method=1, bitwidth=BITWIDTH)
for _ in range(6)
]
self.inplanes = 16
self.conv1 = nnf.Conv2d_fix(3, 16, kernel_size=3, padding=1,
bias=False, nf_fix_params=self.conv1_fix_params)
#self.bn1 = nnf.BatchNorm2d_fix(16, nf_fix_params=self.bn1_fix_params)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nnf.Linear_fix(64 * block.expansion, num_classes, nf_fix_params=self.fc_fix_params)
# initialize activation fix modules
for i in range(len(self.fix_params)):
setattr(self, "fix"+str(i), nnf.Activation_fix(nf_fix_params=self.fix_params[i]))
for m in self.modules():
if isinstance(m, nnf.Conv2d_fix):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nnf.Conv2d_fix(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False, nf_fix_params=self.conv2_fix_params),
#nnf.BatchNorm2d_fix(planes * block.expansion, nf_fix_params=self.bn2_fix_params),
nn.BatchNorm2d(planes * block.expansion)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.fix0(x)
x = self.fix1(self.conv1(x))
x = self.fix2(self.bn1(x))
x = self.fix3(self.relu(x)) # 32x32
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.fix4(self.avgpool(x))
x = x.view(x.size(0), -1)
x = self.fix5(self.fc(x))
return x
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs) | [
"[email protected]"
] | |
88bb39a3d98653873b5e154bc63ead95fdeec94e | e74beee6b2e1046a9cd3c87723676e3be1405612 | /AvinodeProxy/DatabaseConnector.py | 870c1ec74b894f9044c8af91f70b24f129ec0954 | [] | no_license | dahobt123/AvinodeProxy | f15ec11217325191e6454dc256a8bc1ae314c855 | 1340fe744238986b292d0626f212b8d2591a94f8 | refs/heads/master | 2023-08-15T20:27:42.295883 | 2021-10-11T21:21:02 | 2021-10-11T21:21:02 | 416,090,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,770 | py | import json
import os
import cglogging as cgl
import psycopg2
from SecretManagertConnection import SecretManagerConnection
logger_Class = cgl.cglogging()
logger = logger_Class.setup_logging()
class DatabaseConnector:
database_key = os.environ['CAG_DATABASE_CREDENTIALS']
region = os.environ['CAG_REGION']
host = ""
user = ""
password = ""
database = "CharterAndGO"
port = ""
databaseConnection = None
databaseCursor = None
@classmethod
def connect_to_database(cls):
secret = SecretManagerConnection.get_secrets(cls.database_key, cls.region)
logger.debug("inside database connector")
if 'username' in secret['SecretString']:
extracted_secret = json.loads(secret['SecretString'])
cls.user = extracted_secret['username']
cls.password = extracted_secret['password']
cls.host = extracted_secret["host"]
cls.port = extracted_secret['port']
cls.engine = extracted_secret['engine']
try:
cls.databaseConnection = psycopg2.connect(user=cls.user, password=cls.password, database=cls.database,
host=cls.host, port=cls.port)
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed".format(error))
return 1, " "
# -----------------------------------orders---------------------------------------
@classmethod
def readOrders(cls, orders):
logger.debug("inside Read orders database ")
sql = """SELECT charter_supplier_id, order_id
FROM public.reporting_orders WHERE charter_supplier_id = %s AND order_id = %s"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (orders["supplierId"], orders["orderId"]))
resultList = cursor.fetchall()
orderResult = []
for result in resultList:
orderResult.append({
"supplierId": result[0],
"orderId": result[1],
})
return orderResult
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed".format(error))
@classmethod
def createOrders(cls, orders):
logger.debug(" inside create orders database ")
sql = """INSERT INTO public.reporting_orders( trip_locator, status, charter_supplier_id, business_profile_id,
business_profile_name, book_date, departure_date, arrival_date, trip_level_origin_airport,
trip_level_destination_airport, passenger_segment_tax, federal_excise_tax, charterid, order_id, total_cost,
total_base_price, total_charter_fees, base_price_override, order_substatus, international_tax, total_fuel_cost,
total_cabin_cost, total_aircraft_hull_cost, total_maintenance_cost, total_supporting_services_cost,
total_airport_fees, total_government_taxes, charter_trip_type, trip_name, trip_org_first_name,
trip_org_last_name, trip_billing_company_name, trip_billing_company_address, trip_billing_company_city,
trip_billing_company_state, trip_billing_first_name, trip_billing_last_name, trip_org_address,
trip_org_city, trip_org_state, payment, trip_billing_phone, order_source_detail, passenger_category,
trip_catergory, shopping_id, order_source, offer_id, trip_org_country, trip_org_pcode,
trip_billing_country, trip_billing_pcode) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
try:
cursor = cls.databaseConnection.cursor()
cls.databaseCursor = cls.databaseConnection.cursor()
cursor.execute(sql, (orders["tripLocator"],
orders["orderStatus"],
orders["supplierId"],
orders["businessProfileId"],
orders["businessProfileName"],
orders["bookData"],
orders["tripStartDate"],
orders["tripEndDate"],
orders["originAirport"],
orders["destinationAirport"],
orders["passengerSegmentTax"],
orders["federalExciseTax"],
orders["charterId"],
orders["orderId"],
orders["totalCost"],
orders["totalBasePrice"],
orders["totalCharterFees"],
orders["basePriceOverride"],
orders["orderSubStatus"],
orders["internationalTax"],
orders["totalFuelCost"],
orders["totalCabinCost"],
orders["totalAircraftHullCost"],
orders["totalMaintenanceCost"],
orders["totalSupportingServicesCost"],
orders["totalAirportsFees"],
orders["totalGovernmentTaxes"],
orders["charterTripType"],
orders["tripName"],
orders["tripOrgFirstName"],
orders["tripOrgLastName"],
orders["tripBillingCompanyName"],
orders["tripBillingCompanyAddress"],
orders["tripBillingCompanyCity"],
orders["tripBillingCompanyState"],
orders["tripBillingFirstName"],
orders["tripBillingLastName"],
orders["tripOrgAddress"],
orders["tripOrgCity"],
orders["tripOrgState"],
orders["payment"],
orders["tripBillingPhone"],
orders["orderSourceDetail"],
orders["passengerCategory"],
orders["tripCategory"],
orders["shoppingId"],
orders["orderSource"],
orders["offerId"],
orders["tripOrgCounty"],
orders["tripOrgPcode"],
orders["tripBillingCompanyCounty"],
orders["tripBillingCompanyPcode"]
))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(cls.databaseCursor.query)
print(1, "sql failed to load orders".format(error))
return 1, "failed to load orders ", " "
@classmethod
def updateOrders(cls, orders):
logger.debug("inside update orders database ")
sql = """UPDATE public.reporting_orders SET trip_locator=%s, status=%s, charter_supplier_id=%s,
business_profile_id=%s, business_profile_name=%s, book_date=%s, departure_date=%s, arrival_date=%s,
trip_level_origin_airport=%s, trip_level_destination_airport=%s, passenger_segment_tax=%s, federal_excise_tax=%s,
charterid=%s, order_id=%s, total_cost=%s, total_base_price=%s, total_charter_fees=%s, base_price_override=%s,
order_substatus=%s, international_tax=%s, total_fuel_cost=%s, total_cabin_cost=%s, total_aircraft_hull_cost=%s,
total_maintenance_cost=%s, total_supporting_services_cost=%s, total_airport_fees=%s, total_government_taxes=%s,
charter_trip_type=%s, trip_name=%s, trip_org_first_name=%s, trip_org_last_name=%s, trip_billing_company_name=%s,
trip_billing_company_address=%s, trip_billing_company_city=%s, trip_billing_company_state=%s,
trip_billing_first_name=%s, trip_billing_last_name=%s, trip_org_address=%s, trip_org_city=%s, trip_org_state=%s,
payment=%s, trip_billing_phone=%s, order_source_detail=%s, passenger_category=%s, trip_catergory=%s,
shopping_id=%s, order_source=%s, offer_id=%s, trip_org_country=%s, trip_org_pcode=%s, trip_billing_country=%s,
trip_billing_pcode=%s WHERE charter_supplier_id = %s AND order_id = %s ;"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (orders["tripLocator"],
orders["orderStatus"],
orders["supplierId"],
orders["businessProfileId"],
orders["businessProfileName"],
orders["bookData"],
orders["tripStartDate"],
orders["tripEndDate"],
orders["originAirport"],
orders["destinationAirport"],
orders["passengerSegmentTax"],
orders["federalExciseTax"],
orders["charterId"],
orders["orderId"],
orders["totalCost"],
orders["totalBasePrice"],
orders["totalCharterFees"],
orders["basePriceOverride"],
orders["orderSubStatus"],
orders["internationalTax"],
orders["totalFuelCost"],
orders["totalCabinCost"],
orders["totalAircraftHullCost"],
orders["totalMaintenanceCost"],
orders["totalSupportingServicesCost"],
orders["totalAirportsFees"],
orders["totalGovernmentTaxes"],
orders["charterTripType"],
orders["tripName"],
orders["tripOrgFirstName"],
orders["tripOrgLastName"],
orders["tripBillingCompanyName"],
orders["tripBillingCompanyAddress"],
orders["tripBillingCompanyCity"],
orders["tripBillingCompanyState"],
orders["tripBillingFirstName"],
orders["tripBillingLastName"],
orders["tripOrgAddress"],
orders["tripOrgCity"],
orders["tripOrgState"],
orders["payment"],
orders["tripBillingPhone"],
orders["orderSourceDetail"],
orders["passengerCategory"],
orders["tripCategory"],
orders["shoppingId"],
orders["orderSource"],
orders["offerId"],
orders["tripOrgCounty"],
orders["tripOrgPcode"],
orders["tripBillingCompanyCounty"],
orders["tripBillingCompanyPcode"],
orders["supplierId"],
orders["orderId"]))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to update orders".format(error))
return 1, "failed to update orders"
@classmethod
def deleteOrders(cls, orders):
logger.debug("inside delete orders database ")
sql = """UPDATE public.reporting_orders
SET order_id=%s, trip_locator=%s, status=%s, charter_supplier_id=%s, business_profile_id=%s, business_profile_name=%s,
book_date=%s, departure_date=%s, trip_start_date=%s, trip_end_date=%s, trip_level_origin_airport=%s,
trip_level_destination_airport=%s, booking_fee=%s, booking_federal_excise_tax=%s, charterid=%s
WHERE charter_supplier_id = %s AND order_id = %s ;"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (orders["orderId"], orders["tripName"], orders["status"],
orders["supplierId"], orders["BusinessProfileId"],
orders["businessProfileName"], orders["bookData"],
orders["departureDate"], orders["startDate"],
orders["endDate"], orders["originAirport"],
orders["destinationAirport"], orders["booking_fee"],
orders["federalExciseTax"], orders["charterId"],
orders["supplierId"], orders["orderId"]))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to update orders".format(error))
return 1, "failed to update orders"
# -----------------------------------OrderItems---------------------------------------
@classmethod
def createOrderItems(cls, orderItems):
logger.debug(" inside create OrderItems database ")
sql = """INSERT INTO public.reporting_order_items( order_item_id, order_id, origin_airport, destination_airport,
departure_time, arrival_time, aircraft_cag_id, aircraft_tail_number, aircraft_make, aircraft_model,
num_passengers, trip_distance_miles, total_segment_cost, passenger_service_tax, airport_fees,
charter_supplier_id, segment_type, segment_status, charterid, departure_taxi_time, flight_duration,
arrival_taxi_time, override_flight_duration, aircraft_hull_cost, supporting_services_cost, maintenance_cost,
fuel_cost, cabin_cost) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s);"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (orderItems["orderItemId"],
orderItems["orderId"],
orderItems["originAirport"],
orderItems["destinationAirport"],
orderItems["startTime"],
orderItems["endTime"],
orderItems["aircraftCagId"],
orderItems["tailNumber"],
orderItems["make"],
orderItems["model"],
orderItems["numPassengers"],
orderItems["mileage"],
orderItems["totalCost"],
orderItems["passengerServiceTax"],
orderItems["airportFees"],
orderItems["charterSupplierId"],
orderItems["segmentType"],
orderItems["segmentStatus"],
orderItems["charterId"],
orderItems["departureTaxiTime"],
orderItems["flightDuration"],
orderItems["arrivalTaxiTime"],
orderItems["overrideFlightDuration"],
orderItems["aircraftHullCost"],
orderItems["supportingServicesCost"],
orderItems["maintenanceCost"],
orderItems["fuelCost"],
orderItems["cabinCost"]))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load orders".format(error))
return 1, "failed to load orders ", " "
@classmethod
def readOrderItems(cls, order):
logger.debug("inside Read OrderItems database ")
sql = """SELECT order_item_id, order_id, origin_airport, destination_airport, departure_time, arrival_time,
aircraft_cag_id, aircraft_tail_number, aircraft_make, aircraft_model, num_passengers, trip_distance_miles,
total_segment_cost, passenger_service_tax, airport_fees, charter_supplier_id, segment_type, segment_status,
charterid, departure_taxi_time, flight_duration, arrival_taxi_time, override_flight_duration,
aircraft_hull_cost, supporting_services_cost, maintenance_cost, fuel_cost, cabin_cost
FROM public.reporting_order_items
WHERE charter_supplier_id = %s AND order_id = %s """
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (order["supplierId"], order["orderId"]))
resultList = cursor.fetchall()
resultOrderItems = []
for result in resultList:
resultOrderItems.append({"orderItemId": result[0],
"orderId": result[1],
"originAirport": result[2],
"destinationAirport": result[3],
"startTime": result[4],
"endTime": result[5],
"aircraftCagId": result[6],
"tailNumber": result[7],
"make": result[8],
"model": result[9],
"numPassengers": result[10],
"mileage": result[11],
"totalCost": result[12],
"passengerServiceTax": result[13],
"airportFees": result[14],
"charterSupplierId": result[15],
"segmentType": result[16],
"segmentStatus": result[17],
"charterId": result[18],
"departureTaxiTime": result[19],
"flightDuration": result[20],
"arrivalTaxiTime": result[21],
"overrideFlightDuration": result[22],
"aircraftHullCost": result[23],
"supportingServicesCost": result[24],
"maintenanceCost": result[25],
"fuelCost": result[26],
"cabinCost": result[27],
})
if len(resultOrderItems):
return 0, " ", resultOrderItems
else:
return 0, " ", None
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load orders".format(error))
return 1, "failed to load orders ", " "
@classmethod
def updateOrderItems(cls, orderItems):
logger.debug("inside update OrderItems database ")
sql = """UPDATE public.reporting_order_items SET order_item_id=%s, order_id=%s, origin_airport=%s,
destination_airport=%s, departure_time=%s, arrival_time=%s, aircraft_cag_id=%s, aircraft_tail_number=%s,
aircraft_make=%s, aircraft_model=%s, num_passengers=%s, trip_distance_miles=%s, total_segment_cost=%s,
passenger_service_tax=%s, airport_fees=%s, charter_supplier_id=%s, segment_type=%s, segment_status=%s, charterid=%s,
departure_taxi_time=%s, flight_duration=%s, arrival_taxi_time=%s, override_flight_duration=%s,
aircraft_hull_cost=%s, supporting_services_cost=%s, maintenance_cost=%s, fuel_cost=%s, cabin_cost=%s
WHERE charter_supplier_id = %s AND order_item_id =%s AND order_id = %s;"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (orderItems["orderItemId"],
orderItems["orderId"],
orderItems["originAirport"],
orderItems["destinationAirport"],
orderItems["startTime"],
orderItems["endTime"],
orderItems["aircraftCagId"],
orderItems["tailNumber"],
orderItems["make"],
orderItems["model"],
orderItems["numPassengers"],
orderItems["mileage"],
orderItems["totalCost"],
orderItems["passengerServiceTax"],
orderItems["airportFees"],
orderItems["charterSupplierId"],
orderItems["segmentType"],
orderItems["segmentStatus"],
orderItems["charterId"],
orderItems["departureTaxiTime"],
orderItems["flightDuration"],
orderItems["arrivalTaxiTime"],
orderItems["overrideFlightDuration"],
orderItems["aircraftHullCost"],
orderItems["supportingServicesCost"],
orderItems["maintenanceCost"],
orderItems["fuelCost"],
orderItems["cabinCost"],
orderItems["charterSupplierId"],
orderItems["orderItemId"],
orderItems["orderId"]))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load orders".format(error))
return 1, "failed to load orders ", " "
@classmethod
def deleteOrderItems(cls, orderItems):
logger.debug("inside delete OrderItems database ")
sql = """DELETE FROM public.reporting_order_items
WHERE charter_supplier_id = %s AND order_item_id =%s AND order_id;"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (orderItems["supplierId"], orderItems["orderItemId"], orderItems["orderId"]))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load orders".format(error))
return 1, "failed to load orders ", " "
# -----------------------------------OrderItemCrew---------------------------------------
@classmethod
def createOrderItemCrew(cls, crew):
logger.debug(" inside create OrderItemCrew database ")
sql = """INSERT INTO public.reporting_order_item_crew_assignment( order_item_id, crew_profile_id, first_name,
last_name, order_id, charter_supplier_id, crew_type, charterid) VALUES (%s, %s, %s, %s, %s, %s, %s, %s);"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (crew["orderItemId"],
crew["crewProfileId"],
crew["firstName"],
crew["lastName"],
crew["orderId"],
crew["charterSupplierId"],
crew["crewType"],
crew["charterId"],
))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load crew".format(error))
return 1, "failed to load crew ", " "
@classmethod
def readOrderItemCrew(cls, orderId, charterId):
logger.debug("inside Read OrderItemCrew database ")
sql = """SELECT order_item_id, crew_profile_id, first_name, last_name, order_id, charter_supplier_id,
crew_type, charterid FROM public.reporting_order_item_crew_assignment
WHERE order_id = %s AND charter_supplier_id = %s;"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (orderId, charterId))
resultList = cursor.fetchall()
crewResult = []
for result in resultList:
crewResult.append({
"orderItemId": result[0],
"crewProfileId": result[1],
"firstName": result[2],
"lastName": result[3],
"orderId": result[4],
"supplierId": result[5],
"crewType": result[6],
"charterId": result[7]
})
if len(crewResult) > 0:
return 0, " ", crewResult
else:
return 0, " ", None
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load crew".format(error))
return 1, "failed to load crew ", None
@classmethod
def updateOrderItemCrew(cls, crew):
logger.debug("inside update OrderItemCrew database ")
sql = """UPDATE public.reporting_order_item_crew_assignment
SET order_item_id=%s, crew_profile_id=%s, first_name=%s, last_name=%s, order_id=%s, charter_supplier_id=%s,
crew_type=%s, charterid=%s WHERE charter_supplier_id =%s AND order_id=%s AND order_item_id =%s AND
crew_profile_id=%s; """
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (crew["orderItemId"], crew["crewProfileId"], crew["firstName"], crew["lastName"],
crew["orderId"], crew["charterSupplierId"], crew["crewType"], crew["charterId"],
crew["charterSupplierId"], crew["orderId"], crew["orderItemId"],
crew["crewProfileId"]))
cls.databaseConnection.commit
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load crew".format(error))
return 1, "failed to load crew ", " "
@classmethod
def deleteOrderItemCrew(cls, crew):
logger.debug("inside delete OrderItemCrew database ")
sql = """DELETE FROM public.reporting_order_item_crew_assignment
WHERE charter_supplier_id =%s AND order_id=%s AND order_item_id =%s AND crew_profile_id =%s ;"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (crew["supplierId"], crew["orderId"], crew["orderItemId"], crew["crewProfileId"]))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load crew".format(error))
return 1, "failed to load crew ", " "
# -----------------------------------OrderItemPassenger---------------------------------------
@classmethod
def createOrderItemPassenger(cls, passenger):
logger.debug(" inside create OrderItemPassenger database ")
sql = """INSERT INTO public.reporting_order_item_pax
(order_item_id, first_name, last_name, charter_supplier_id, order_id, charterid)
VALUES (%s, %s, %s, %s, %s, %s);"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (passenger["orderItemId"], passenger["firstName"], passenger["lastName"],
passenger["charterSupplierId"], passenger["orderId"], passenger["charterId"]))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load passenger".format(error))
return 1, "failed to load crew passenger"
@classmethod
def readOrderItemPassenger(cls, orderId, charterId):
logger.debug("inside Read OrderItemPassenger database ")
sql = """SELECT order_item_id, first_name, last_name, charter_supplier_id, order_id
FROM public.reporting_order_item_pax WHERE order_id = %s AND charter_supplier_id = %s; """
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (orderId, charterId))
resultList = cursor.fetchall()
passengerResult = []
for result in resultList:
passengerResult.append({
"orderItemId": result[0],
"firstName": result[1],
"lastName": result[2],
"supplierId": result[3],
"orderId": result[4],
})
if len(passengerResult) > 0:
return 0, " ", passengerResult
else:
return 0, " ", None
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load passenger".format(error))
return 1, "failed to load crew passenger", None
@classmethod
def updateOrderItemPassenger(cls, passenger):
logger.debug("inside update OrderItemPassenger database ")
sql = """UPDATE public.reporting_order_item_pax
SET order_item_id=%s, first_name=%s, last_name=%s, charter_supplier_id=%s, order_id=%s, charterid=%s
WHERE order_item_id= %s AND charter_supplier_id = %s AND order_id = %s AND first_name=%s
AND last_name=%s;"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (passenger["orderItemId"], passenger["firstName"], passenger["lastName"],
passenger["charterSupplierId"], passenger["orderId"], passenger["charterId"],
passenger["orderItemId"], passenger["charterSupplierId"], passenger["orderId"],
passenger["firstName"], passenger["lastName"]))
cls.databaseConnection.commit()
return 0, " "
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load passenger".format(error))
return 1, "failed to load crew passenger"
@classmethod
def deleteOrderItemPassenger(cls, passenger):
logger.debug("inside delete OrderItemPassenger database ")
sql = """DELETE FROM public.reporting_order_item_pax
WHERE order_item_id= %s AND user_id =%s AND charter_supplier_id = %s AND order_id = %s;"""
try:
cursor = cls.databaseConnection.cursor()
cursor.execute(sql, (passenger["orderItemId"], passenger["userId"],
passenger["supplierId"], passenger["orderId"]))
result = cursor.fetchall()
passengerResult = {
"orderItemId": result["order_item_id"],
"firstName": result["first_name"],
"lastName": result["last_name"],
"supplierId": result["charter_supplier_id"],
"orderId": result["order_id"],
"charterId": result["charterid"]
}
return 0, " ", passengerResult
except (Exception, psycopg2.Error) as error:
cls.databaseConnection.rollback()
print(1, "sql failed to load passenger".format(error))
return 1, "failed to load crew passenger", passengerResult
@classmethod
def commit(cls):
cls.databaseConnection.commit
@classmethod
def close_connection(cls):
cls.databaseConnection.close() | [
"[email protected]"
] | |
73797c806b77c2f4d742cc68217f727a39f283bc | b8c3946d8ae46ab96ed7fb0158c66e89997285d6 | /test_track_with_yolov5/yolov5_detect_video_tracking_sort.py | 692092ed8f796eab70468aab8fb4526f2e300495 | [] | no_license | Vuong02011996/MOT_tracking | b02fc879ecadedeb25e2cc1925dd127e2ff1431a | 240dbce5e376fb6fde8f166ee3b13af0ae447531 | refs/heads/master | 2023-07-16T01:51:03.685681 | 2021-08-20T13:36:18 | 2021-08-20T13:36:18 | 398,268,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,423 | py | import cv2
from queue import Queue
from yolov5_detect_image import Y5Detect, draw_boxes_tracking, draw_det_when_track
import time
from kthread import KThread
import numpy as np
from mot_sort.mot_sort_tracker import Sort
from mot_sort import untils_track
y5_model = Y5Detect(weights="../test_track_with_yolov5/model_head/y5headbody_v2.pt")
class_names = y5_model.class_names
mot_tracker = Sort(class_names)
class InfoCam(object):
def __init__(self, cam_name):
self.cap = cv2.VideoCapture(cam_name)
def video_capture(cam, frame_detect_queue, frame_origin_queue):
frame_count = 0
cam.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_count)
while cam.cap.isOpened():
ret, frame_ori = cam.cap.read()
# time.sleep(0.01)
if not ret:
break
image_rgb = cv2.cvtColor(frame_ori, cv2.COLOR_BGR2RGB)
frame_detect_queue.put(image_rgb)
frame_origin_queue.put([frame_ori, frame_count])
print("frame_count: ", frame_count)
frame_count += 1
cam.cap.release()
def inference(cam, frame_detect_queue, detections_queue): #, tracking_queue):
while cam.cap.isOpened():
image_rgb = frame_detect_queue.get()
boxes, labels, scores, detections_sort = y5_model.predict_sort(image_rgb, label_select=["head"])
# for i in range(len(scores)):
# detections_tracking = bboxes[i].append(scores[i])
detections_queue.put([boxes, labels, scores, image_rgb, detections_sort])
# tracking_queue.put([detections_tracking])
cam.cap.release()
def bbox2points(bbox):
"""
From bounding box yolo format
to corner points cv2 rectangle
"""
x, y, w, h = bbox
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def to_tlwh(tlbr):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = tlbr.copy()
# ret[2:] += ret[:2]
box = []
for bbox in ret:
w = int(bbox[2]) - int(bbox[0])
h = int(bbox[3]) - int(bbox[1])
box.append([int(bbox[0]) + w/2, int(bbox[1]) + h/2, w, h])
return box
def tracking(cam, frame_origin_queue, detections_queue, tracking_queue):
"""
:param cam:
:param frame_origin_queue:
:param detections_queue:
:param tracking_queue:
:return:
Tracking using SORT. Hungary + Kalman Filter.
Using mot_tracker.update()
Input: detections [[x1,y1,x2,y2,score,label],[x1,y1,x2,y2,score, label],...], use np.empty((0, 5)) for frames without detections
Output: [[x1,y1,x2,y2,id1, label],[x1,y1,x2,y2,id2, label],...]
"""
region_track = np.array([[0, 0],
[2560, 0],
[2560, 1440],
[0, 1440]])
while cam.cap.isOpened():
boxes, labels, scores, image_rgb, detections_sort = detections_queue.get()
if len(boxes) == 0:
detections = np.empty((0, 5))
else:
detections = detections_sort
# check and select the detection is inside region tracking
detections = untils_track.select_bbox_inside_polygon(detections, region_track)
track_bbs_ids, unm_trk_ext = mot_tracker.update(detections, image=image_rgb)
# print("labels, scores", labels, scores)
# print(track_bbs_ids)
tracking_queue.put([track_bbs_ids, boxes, labels, scores, unm_trk_ext])
cam.cap.release()
def drawing(cam, tracking_queue, frame_origin_queue, frame_final_queue, show_det=True):
while cam.cap.isOpened():
frame_origin, frame_count = frame_origin_queue.get()
track_bbs_ids, boxes, labels, scores, unm_trk_ext = tracking_queue.get()
if frame_origin is not None:
image = draw_boxes_tracking(frame_origin, track_bbs_ids, scores=scores, labels=labels,
class_names=class_names, track_bbs_ext=unm_trk_ext)
if show_det:
image = draw_det_when_track(frame_origin, boxes, scores=scores, labels=labels,
class_names=class_names)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if frame_final_queue.full() is False:
frame_final_queue.put([image, frame_count])
else:
time.sleep(0.001)
cam.cap.release()
def save_debug_image(frame_count, image):
cv2.imwrite("/home/vuong/Desktop/Project/GG_Project/green-clover-montessori/new_core/debug_image/test_" + str(
frame_count) + ".png", image)
def main():
frame_detect_queue = Queue(maxsize=1)
frame_origin_queue = Queue(maxsize=1)
detections_queue = Queue(maxsize=1)
tracking_queue = Queue(maxsize=1)
frame_final_queue = Queue(maxsize=1)
input_path = "/home/vuong/Downloads/output-1617592346.8862946_part3.mp4"
cam = InfoCam(input_path)
thread1 = KThread(target=video_capture, args=(cam, frame_detect_queue, frame_origin_queue))
thread2 = KThread(target=inference, args=(cam, frame_detect_queue, detections_queue))
thread3 = KThread(target=tracking, args=(cam, frame_origin_queue, detections_queue, tracking_queue))
thread4 = KThread(target=drawing, args=(cam, tracking_queue, frame_origin_queue, frame_final_queue))
thread_manager = []
thread1.daemon = True # sẽ chặn chương trình chính thoát khi thread còn sống.
thread1.start()
thread_manager.append(thread1)
thread2.daemon = True
thread2.start()
thread_manager.append(thread2)
thread3.daemon = True
thread3.start()
thread_manager.append(thread3)
thread4.daemon = True
thread4.start()
thread_manager.append(thread4)
while cam.cap.isOpened():
cv2.namedWindow('output')
image, frame_count = frame_final_queue.get()
image = cv2.resize(image, (1400, 640))
cv2.imshow('output', image)
if frame_count >= 1550 and (frame_count <= 1800):
KThread(target=save_debug_image, args=(frame_count, image)).start()
if cv2.waitKey(1) & 0xFF == ord("q"):
cv2.destroyWindow('output')
break
for t in thread_manager:
if t.is_alive():
t.terminate()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
64c73708bb629a89c9f4fbd8c2b03867966b7c9d | 7ec6bbd1ce4f29bab44ef3671af899451c1e8b4a | /groups/migrations/0001_initial.py | 2a5a36fe7d95551dede41389cfacd0ef8079c72a | [] | no_license | Anurag0197/Star-Geek-Social-Website | 77a13ff86d89843993c607dc56383cda549d2dcf | c1368b4e258a4012bfb5da2e53e8295fd33e5d0c | refs/heads/master | 2020-04-25T06:03:40.219643 | 2019-03-31T08:00:22 | 2019-03-31T08:00:22 | 172,549,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-02-23 13:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('slug', models.SlugField(allow_unicode=True, unique=True)),
('description', models.TextField(default='')),
('members', models.ManyToManyField(related_name='group', to='accounts.User')),
],
),
]
| [
"[email protected]"
] | |
2cc0912f0ae8dac58e79ed3b4021efa61986fd8e | d3ee7dce34fcde462804945b5f59221ee1c2e6be | /python_practice/game/game_round1.py | 672d438f7243ef1cb996893f71026da0d6cd7f80 | [] | no_license | linpeijie-python/linpeijiecode | 8c8f9c6abea80af8e5da466de31062f436cc0a30 | f3d45b6588174c1068ab5326fd8daad213e397a1 | refs/heads/master | 2023-01-22T22:45:31.411321 | 2020-11-25T14:24:08 | 2020-11-25T14:24:08 | 284,997,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/11 0:06
# @Author : Mark
# @File : game_round1.py
# @Software: PyCharm | [
"[email protected]"
] | |
3ee6bb27a1eefbac2fdff000c653df6497c93ab8 | fc5c08110687204b5a3eceb9aa1dc572fbd51e9a | /caseworks_project/settings.py | 840e29bd80e37ea937432d31b3d192035e020ca7 | [] | no_license | jedgodsey/Finch-collector-lab-views | 3d68d2174755bd2d78beb20a213af93ed5a66ac3 | 5c77abcb1e42ec6b5591f69d425bf2a229e93f8a | refs/heads/master | 2023-08-01T02:59:32.370047 | 2020-10-30T18:20:37 | 2020-10-30T18:20:37 | 319,246,121 | 0 | 0 | null | 2021-09-22T19:39:13 | 2020-12-07T08:04:23 | Python | UTF-8 | Python | false | false | 3,169 | py | """
Django settings for caseworks_project project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import django_on_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '51t7%h%g7@!h10v*qjr)10de+#=afd$^5d5)z4w_rb!qit*fy@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'main_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'caseworks_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'caseworks_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'staplerinjuries',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
django_on_heroku.settings(locals())
| [
"[email protected]"
] | |
ca6077ab3eec9f7e4c1ace9940048320626185e1 | 53012f36a939bb199d5443e91280e9c5d01a1e6d | /mapreduce/__init__.py | 42ef6c05515806be50972b63e1da421a25e1fb4d | [] | no_license | pascals-ager/hadoop-streaming | a771ae5dc1caf44c03ffa762450b44ccecce40ec | 7d1b95ae16610e73c3a3e167c642eafb3dfa9b6a | refs/heads/master | 2020-05-26T05:45:01.293620 | 2019-06-11T12:27:21 | 2019-06-11T12:27:21 | 188,125,576 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | from .mapper import Mapper
from .reducer import Reducer | [
"[email protected]"
] | |
a92948a8a3f0df9574808653aa318e4ec34289b2 | 2fa2b818807287977b7dec4128e4e8efce704749 | /Modules/pronunciations.py | fb596b187711da1c19134f3fa3f9767cbc4e5255 | [] | no_license | shriyachhabra/LogophileBot | 8698aaacb447d1d70e83ab0b4612ee8e22368c25 | aa9aa89bca9932f53396455a8efe046c37713102 | refs/heads/master | 2022-11-05T04:50:49.992068 | 2018-02-20T19:24:50 | 2018-02-20T19:24:50 | 121,655,742 | 1 | 1 | null | 2022-10-23T06:48:46 | 2018-02-15T16:58:42 | Python | UTF-8 | Python | false | false | 537 | py | import requests
import json
def pronunciation(word):
app_id = 'd520dc72'
app_key = '9636a335f1d422e70aa5a9b2381f6517'
language = 'en'
word_id = word
url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word_id.lower()
r = requests.get(url, headers = {'app_id': app_id, 'app_key': app_key})
code=r.status_code
if(code==404):
return "Word does not exist"
r=r.json();
res=r['results'][0]
le=res['lexicalEntries'][0]
pro=le['pronunciations'][0]
af=pro['audioFile']
print(af)
return af | [
"[email protected]"
] | |
0b111abb5bcf2f3d825a7c5209b853280ac2c8b1 | 19aa291198622834cc0fc04610d419189a098f24 | /stock/JSONData/gui_test.py | 90f13d4ede0443c727fff6e70fc086050731ea63 | [] | no_license | johnsonhongyi/pyQuant | 3127cc30a7fa07a9ca58a1a067c8ee1d289c29a3 | a1873ff29383c4f3a1cfb7206c2cb72ab0da8b3b | refs/heads/master | 2023-01-23T15:58:59.332695 | 2023-01-18T13:05:13 | 2023-01-18T13:05:13 | 47,158,933 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | from tkinter import *
from pandastable import Table, TableModel
# import sys
# sys.path.append('../')
import tdx_data_Day as tdd
code = '999999'
df = tdd.get_tdx_Exp_day_to_df(code, type='f', start=None, end=None, dl=30, newdays=None)
class TestApp(Frame):
"""Basic test frame for the table"""
def __init__(self, parent=None):
self.parent = parent
Frame.__init__(self)
self.main = self.master
self.main.geometry('800x600+200+100')
self.main.title('Table app')
f = Frame(self.main)
f.pack(fill=BOTH,expand=1)
# df = TableModel.getSampleData()
self.table = pt = Table(f, dataframe=df,
showtoolbar=True, showstatusbar=True)
pt.show()
return
app = TestApp()
app.mainloop() | [
"[email protected]"
] | |
0d81647ed6d049cdbf0f84dd57324110714438a6 | 1ecde4178548f331f15717f245e3f657b58b9993 | /zjl_crawler/scrapySchool_Australia/scrapySchool_England/spiders/UniversityofSouthernQueensland_p.py | 0f804f105ef33000ae0432aea8212170abadedfe | [] | no_license | gasbarroni8/python_spider | 296dcb7c3fd9dd028423fe5ec0a321d994478b15 | 7935fa462926bc8ea9bf9883bd15265dd0d3e6df | refs/heads/master | 2023-03-26T05:22:59.858422 | 2019-04-15T07:17:56 | 2019-04-15T07:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,453 | py | # _*_ coding:utf-8 _*_
__author__ = 'zjl'
__date__ = '2018/7/30 16:52'
import scrapy,json
import re
from scrapy.spiders import CrawlSpider,Rule
from scrapy.linkextractors import LinkExtractor
from scrapySchool_England.getItem import get_item1
from scrapySchool_England.getTuition_fee import getTuition_fee
from scrapySchool_England.items import ScrapyschoolEnglandItem1
from scrapySchool_England.remove_tags import remove_class
from scrapySchool_England.getIELTS import get_ielts, get_toefl
from w3lib.html import remove_tags
from scrapySchool_England.clearSpace import clear_space_str
import requests
from lxml import etree
from scrapySchool_England.translate_date import tracslateDate
from scrapySchool_England.getTuition_fee import getT_fee
class UniversityofSouthernQueenslandSpider(scrapy.Spider):
name = 'UniversityofSouthernQueensland_p'
allowed_domains = ['usq.edu.au/']
start_urls = []
C= [
'https://www.usq.edu.au/study/degrees/master-of-business-administration/finance',
'https://www.usq.edu.au/study/degrees/master-of-business-administration/project-management',
'https://www.usq.edu.au/study/degrees/master-of-business-administration/digital-marketing-analytics',
'https://www.usq.edu.au/study/degrees/master-of-business-administration/digital-marketing-analytics',
'https://www.usq.edu.au/study/degrees/master-of-business-administration/business-leadership',
'https://www.usq.edu.au/study/degrees/master-of-business-administration/business-leadership',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/business-leadership',
'https://www.usq.edu.au/study/degrees/master-of-arts/creative-arts',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/business-leadership',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/finance',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/finance',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/digital-marketing-analytics',
'https://www.usq.edu.au/study/degrees/master-of-education-commonwealth-supported/guidance-counselling',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/digital-marketing-analytics',
'https://www.usq.edu.au/study/degrees/master-of-business-administration/general',
'https://www.usq.edu.au/study/degrees/master-of-learning-and-teaching/primary',
'https://www.usq.edu.au/study/degrees/master-of-business-administration/general',
'https://www.usq.edu.au/study/degrees/master-of-education-commonwealth-supported/early-childhood',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/project-management',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/project-management',
'https://www.usq.edu.au/study/degrees/master-of-project-management',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/big-data-visualisation',
'https://www.usq.edu.au/study/degrees/master-of-learning-and-teaching/secondary',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/big-data-visualisation',
'https://www.usq.edu.au/study/degrees/master-of-engineering-science/civil-engineering',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/enterprise-leadership',
'https://www.usq.edu.au/study/degrees/master-of-engineering-science/electrical-electronic-engineering',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/enterprise-leadership',
'https://www.usq.edu.au/study/degrees/master-of-engineering-science/environmental-engineering',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/crisis-management',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/crisis-management',
'https://www.usq.edu.au/study/degrees/master-of-engineering-science/mechanical-engineering',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/finance',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/finance',
'https://www.usq.edu.au/study/degrees/master-of-engineering-science/power-engineering',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/strategic-marketing',
'https://www.usq.edu.au/study/degrees/master-of-spatial-science-technology/geographic-information-systems',
'https://www.usq.edu.au/study/degrees/master-of-business-administration-international/strategic-marketing',
'https://www.usq.edu.au/study/degrees/master-of-engineering-science/structural-engineering',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/international-business',
'https://www.usq.edu.au/study/degrees/master-of-engineering-science/agricultural-engineering',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/international-business',
'https://www.usq.edu.au/study/degrees/master-of-spatial-science-technology/surveying',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/general',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/general',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/sustainable-business',
'https://www.usq.edu.au/study/degrees/master-of-arts/corporate-communication',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/sustainable-business',
'https://www.usq.edu.au/study/degrees/master-of-arts/humanities-communication',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/professional-communication',
'https://www.usq.edu.au/study/degrees/master-of-computing-technology/networking-system-security',
'https://www.usq.edu.au/study/degrees/master-of-business-and-innovation/professional-communication',
'https://www.usq.edu.au/study/degrees/master-of-computing-technology/software-the-web',
'https://www.usq.edu.au/study/degrees/master-of-professional-accounting',
'https://www.usq.edu.au/study/degrees/master-of-professional-accounting',
'https://www.usq.edu.au/study/degrees/master-of-education-commonwealth-supported/teaching-english-to-speakers-of-other-languages',
'https://www.usq.edu.au/study/degrees/master-of-arts/editing-publishing',
'https://www.usq.edu.au/study/degrees/master-of-education-commonwealth-supported/teaching-english-to-speakers-of-other-languages',
'https://www.usq.edu.au/study/degrees/master-of-computing',
'https://www.usq.edu.au/study/degrees/master-of-science/sport-exercise',
'https://www.usq.edu.au/study/degrees/master-of-science/sport-exercise',
'https://www.usq.edu.au/study/degrees/master-of-business-administration/digital-transformation',
'https://www.usq.edu.au/study/degrees/master-of-science/mathematics-statistics',
'https://www.usq.edu.au/study/degrees/master-of-science/mathematics-statistics',
'https://www.usq.edu.au/study/degrees/master-of-science/applied-data-science',
'https://www.usq.edu.au/study/degrees/master-of-science/applied-data-science',
'https://www.usq.edu.au/study/degrees/master-of-information-systems',
'https://www.usq.edu.au/study/degrees/master-of-information-systems'
]
# print(len(C))
for i in C:
start_urls.append(i)
def parse(self, response):
pass
item = get_item1(ScrapyschoolEnglandItem1)
#1.university
university = 'University of Southern Queensland'
# print(university)
#2.url
url = response.url
# print(url)
#3.degree_name
degree_name = response.xpath('//*[@id="main-wrap"]/section[3]/div/div/div[1]/h1').extract()
degree_name = ''.join(degree_name)
degree_name = remove_tags(degree_name).replace('& ','')
# print(degree_name)
#4.degree_type
degree_type = 2
#5.degree_overview_en
degree_overview_en = response.xpath('//*[@id="overview"]/div/div').extract()
degree_overview_en = ''.join(degree_overview_en)
degree_overview_en = remove_class(degree_overview_en)
# print(degree_overview_en)
#6.career_en
career_en = response.xpath('//*[@id="careerOutcomesCollapse"]/div').extract()
career_en = ''.join(career_en)
career_en = remove_class(career_en)
# print(career_en)
#7.programme_en
if ' &' in degree_name:
degree_name = degree_name.replace(' &','')
if '('in degree_name:
programme_en = re.findall(r'\((.*)\)',degree_name)[0]
else:programme_en = degree_name.replace('Master of ','')
# print(programme_en)
#8.start_date
start_date = response.xpath('//*[@id="summary"]/div[3]/div[4]/ul/li').extract()
start_date = ''.join(start_date)
start_date = remove_tags(start_date)
if 'Semester 1 (February)Semester 2 (July)Semester 3 (November)' in start_date:
start_date = '2,7,11'
elif 'Semester 1 (February)Semester 2 (July)' in start_date:
start_date = '2,7'
elif 'Semester 2 (July)Semester 1 (February)' in start_date:
start_date = '2,7'
elif 'Semester 1 (February)' in start_date:
start_date = '2'
# print(start_date)
#9.duration
duration = response.xpath('//*[@id="summary"]/div[3]/div[6]/ul/li').extract()
duration = ''.join(duration)
duration = remove_tags(duration)
if '1.5' in duration:
duration = 1.5
else: duration =re.findall('\d',duration)[0]
# print(duration)
#10.location
location = response.xpath('//*[@id="summary"]/div[3]/div[7]/ul/li').extract()
location = ','.join(location)
location = remove_tags(location)
# print(location)
#11.modules_en
modules_en_url = response.xpath('//*[@id="program-structure"]//div[@class="icon-message__text"]//a/@href').extract()
modules_en_url = ''.join(modules_en_url)
headers = {
"User-Agent": "Mozilla/5.0. (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"}
data = requests.get(modules_en_url,headers=headers)
response2 = etree.HTML(data.text.replace('<?xml version="1.0" encoding="UTF-8"?>',''))
modules_en = response2.xpath("//h2[contains(text(),'Program structure')]//following-sibling::*")
doc = ""
if len(modules_en) > 0:
for a in modules_en:
doc += (etree.tostring(a, encoding='unicode', pretty_print=False, method='html'))
doc = remove_class(doc)
modules_en = doc
# print(modules_en)
#12.tuition_fee
tuition_fee = response.xpath('//*[@id="fees"]/div/div').extract()
tuition_fee = ''.join(tuition_fee)
tuition_fee = getT_fee(tuition_fee)
# print(tuition_fee)
#13.tuition_fee_pre
tuition_fee_pre = 'AUD'
#14.apply_pre
apply_pre = 'AUD'
#15.deadline
deadline = response.xpath('//*[@id="how-to-apply"]/div/div/ul/li').extract()
deadline = ''.join(deadline)
deadline = remove_tags(deadline)
if 'Semester 3' in deadline:
deadline ='2018-11-5,2019-2-25,2019-7-15'
else:deadline = '2019-2-25,2019-7-15'
# print(deadline)
#16.rntry_requirements_en
rntry_requirements_en = response.xpath('//*[@id="entry-requirements"]/div/div').extract()
rntry_requirements_en = ''.join(rntry_requirements_en)
rntry_requirements_en = remove_class(rntry_requirements_en)
# print(rntry_requirements_en)
#17.work_experience_desc_en
if 'a minimum of two years’ professional work experience, or equivalent.' in rntry_requirements_en:
work_experience_desc_en = 'a minimum of two years’ professional work experience, or equivalent.'
elif 'a minimum of one year professional work experience in business, or equivalent.' in rntry_requirements_en:
work_experience_desc_en = 'a minimum of one year professional work experience in business, or equivalent.'
elif 'USQ’s Graduate Certificate of Business provides a pathway into the Master of Business and Innovation for students who meet the work experience requirements but do not hold a Graduate Certificate at AQF level 8 or equivalent.' in rntry_requirements_en:
work_experience_desc_en = 'USQ’s Graduate Certificate of Business provides a pathway into the Master of Business and Innovation for students who meet the work experience requirements but do not hold a Graduate Certificate at AQF level 8 or equivalent.'
else:work_experience_desc_en = ''
work_experience_desc_en = '<p>'+work_experience_desc_en+'</p>'
#18.average_score
average_score = '75'
#19.ielts 20212223 24.toefl 25262728
ielts = 6.5
ielts_r = 6
ielts_w = 6
ielts_s = 6
ielts_l = 6
toefl = 90
toefl_r = 20
toefl_w = 20
toefl_s = 20
toefl_l = 20
#29.apply_documents_en
apply_documents_en = '本科毕业证 本科学位证 本科在读证明 本科成绩单 护照 语言成绩 个人简历(可选)个人陈述(可选)推荐信(可选)'
#30.apply_desc_en
apply_desc_en = "<p>How to apply 1. Choose a degree Choose the degree you want to apply for.2. Check you meet the entry requirements Check the degree details to find out the entry requirements.Check the standard undergraduate entry requirements by country.All students must also meet USQ's English Language Requirements. 3. Collect supporting documentationAlong with your application you will need to also submit certified copies of:award certificates academic transcripts formal identity papers, such as your passport, national identity card, and student visa.If your documents are not in English, you will need to supply a certified English translation.If you applying for on-campus study, and are under 18 years of age at time of application, we will require additional information to ensure that the requirements of Standard 5 - Younger Students, of the 2018 Education Services for Overseas Students (ESOS) National Code, and the USQ U18 International Student Care Framework are satisfied. Please complete and attach the U18 Welfare and Accommodation form to your USQ application for admission.4. Submit application There is no application or assessment fee to apply to study with USQ.Before you submit your application, make sure you have attached all certified supporting documentation. This will ensure we are able to provide a faster turnaround time for your application.Studying on-campus USQ is obligated to ensure that all students studying on-campus on a student visa are genuine students, who meet the Genuine Temporary Entrant (GTE) criteria outlined in the Department of Home Affairs Simplified Student Visa Framework (SSVF). Some of the factors that are considered under the existing requirement to be a genuine applicant for entry and study as a student include: English language proficiency; financial capacity; age requirements; and intention to comply with visa conditions. Please visit the Department of Home Affairs for further information. Our admissions team will consider all your application information and make an assessment as to whether the requirements are met in your particular case. Paper application form Our on-line application system has been designed to work on desktop or mobile devices. If you are unable to apply online, you can download our International Student application form and email to the admissions team. Application form (PDF 321 KB) Professional Development Single Courses Application Form (PDF 71 KB) Credit for previous study If you have studied or worked previously you be eligible for credit. Have a look at our Credit Calculator. You can apply for credit within the online application form. If you’re applying for credit you will need to provide further supporting documentation including: A copy of an approved course synopsis for the year in which the subject was successfully completed If your documents aren't in English, you'll need to supply a certified English translation. When to apply? USQ accepts applications all year round. USQ receives thousands of applications each year so it is important to apply early, to make sure you secure a place and to allow time for you to make your necessary study arrangements (e.g. visa, organise family and work commitments). Semester start dates are: Semester 1, 2018 - 26 February 2018 Semester 2, 2018 - 16 July 2018 Semester 3, 2018 - 19 November 2018 Semester 1, 2019 - 25 February 2019 Semester 2, 2019 - 15 July 2019How long will it take to have my application processed?Whilst USQ endeavors to process your application to study as soon as possible, the below guide provides examples of the typical application processing times, you can help us by ensuring you have supplied full information at time of applicatoin: Undergraduate applications: allow up to two weeks from the time all supporting certified documentation is submitted. where additional assessment requried for course credits and exemptions, this may take longer. Postgraduate coursework applications: allow two weeks from the time all supporting certified documentation is submitted. where additional assessment requried for course credits andexemptions, this may take longer. Postgraduate research applications: should allow a minimum of 25 working days for assessment, however this can be impacted by the availability of an appropriate supervisor. </p>"
item['university'] = university
item['url'] = url
item['degree_name'] = degree_name
item['degree_type'] = degree_type
item['degree_overview_en'] = degree_overview_en
item['career_en'] = career_en
item['programme_en'] = programme_en
item['start_date'] = start_date
item['duration'] = duration
item['location'] = location
item['modules_en'] = modules_en
item['tuition_fee'] = tuition_fee
item['tuition_fee_pre'] = tuition_fee_pre
item['apply_pre'] = apply_pre
item['deadline'] = deadline
item['rntry_requirements_en'] = rntry_requirements_en
item['work_experience_desc_en'] = work_experience_desc_en
item['average_score'] = average_score
item['ielts'] = ielts
item['ielts_r'] = ielts_r
item['ielts_w'] = ielts_w
item['ielts_s'] = ielts_s
item['ielts_l'] = ielts_l
item['toefl'] = toefl
item['toefl_r'] = toefl_r
item['toefl_w'] = toefl_w
item['toefl_s'] = toefl_s
item['toefl_l'] = toefl_l
item['apply_documents_en'] = apply_documents_en
item['apply_desc_en'] = apply_desc_en
yield item
| [
"[email protected]"
] | |
793418b32617fa1db2c52a0253125eee438cf45f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02714/s209800382.py | 708c8e61aee946e6022b85194cbf98402b9f0e67 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | N = int(input())
S = input()
R = []
G = []
B = []
for i in range(N):
if S[i] == 'R':
R.append(i+1)
elif S[i] == 'G':
G.append(i+1)
elif S[i] == 'B':
B.append(i+1)
lenb = len(B)
cnt = 0
for r in R:
for g in G:
up = max(r, g)
down = min(r, g)
diff = up - down
chk = 0
if up + diff <= N:
if S[up+diff-1] == 'B':
chk += 1
if down-diff >= 1:
if S[down-diff-1] == 'B':
chk += 1
if diff%2 == 0:
if S[int(up-diff/2-1)] == 'B':
chk += 1
cnt += lenb - chk
print(cnt)
| [
"[email protected]"
] | |
960f876bf5806f9ece30bfc58b45c9de7d2f9df6 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /PORMain/pirates/effects/DrainLife.py | 49d379787f4ef5944afb56aa288361dc05fa29d0 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,075 | py | from panda3d.physics import BaseParticleEmitter, BaseParticleRenderer, ColorInterpolationManager
from panda3d.core import ColorBlendAttrib, ModelNode, Point3, Vec3, Vec4
# File: D (Python 2.4)
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
import random
from PooledEffect import PooledEffect
from EffectController import EffectController
class DrainLife(PooledEffect, EffectController):
cardScale = 64.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/candleHalo')
self.card = model.find('**/effectCandleHalo')
if not DrainLife.particleDummy:
DrainLife.particleDummy = render.attachNewNode(ModelNode('DrainLifeParticleDummy'))
DrainLife.particleDummy.setDepthWrite(0)
DrainLife.particleDummy.setLightOff()
DrainLife.particleDummy.setColorScaleOff()
DrainLife.particleDummy.setFogOff()
self.f = ParticleEffect.ParticleEffect('DrainLife')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('DiscEmitter')
self.f.addParticles(self.p0)
f0 = ForceGroup.ForceGroup('Vortex')
self.f.addForceGroup(f0)
def createTrack(self, rate = 1):
self.p0.setPoolSize(128)
self.p0.setBirthRate(0.02)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(2.0)
self.p0.factory.setLifespanSpread(0.0)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setInitialXScale(0.01 * self.cardScale)
self.p0.renderer.setFinalXScale(0.02 * self.cardScale)
self.p0.renderer.setInitialYScale(0.0400 * self.cardScale)
self.p0.renderer.setFinalYScale(0.0400 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingColor, ColorBlendAttrib.OOneMinusIncomingAlpha)
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, Vec4(1.0, 0.0, 0.0, 1.0), Vec4(0.0, 0.0, 0.0, 0.19), 1)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(1.0)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 6.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p0.emitter.setRadius(0.598)
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.02), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100), Wait(7.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(1.0), self.endEffect)
def cleanUpEffect(self):
self.f.disable()
self.detachNode()
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| [
"[email protected]"
] | |
79352245c28258a3c73ad129aeba2840571d1a91 | 0f3146f6e44e43048dc030a6ad44def9201dbd29 | /alembic/versions/22d61f7599b_add_place_to_game.py | 22f2bab0c433989079875e17861d9334e4ec701f | [] | no_license | socek/basket | 30c7c4be753006a33b997c17cf6348a32b420cd6 | 30ba79a35f63fd1cf4a4cdaf4b3d21b063cfc1b6 | refs/heads/master | 2016-09-10T18:40:40.334233 | 2015-03-25T21:29:00 | 2015-03-25T21:29:24 | 30,159,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | """Add place to game
Revision ID: 22d61f7599b
Revises: 40903a23149
Create Date: 2015-02-09 18:07:27.752483
"""
# revision identifiers, used by Alembic.
revision = '22d61f7599b'
down_revision = '40903a23149'
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy import Column, Integer, ForeignKey
def upgrade():
op.add_column(
'games', Column('place_id', Integer, ForeignKey('places.id')))
def downgrade():
op.drop_column('games', 'place_id')
| [
"[email protected]"
] | |
6d6c98dded9fe0f02ff06c1ab479d8ed526e17a7 | 591c79db90bf1805659986c0195eb7d06db7384a | /run.py | b726a6609b31a3e9a479a7b2934455d4594d5881 | [] | no_license | Arrotech/BookIt | a95c4df3238d38b3c2677f02a3dc0f1b4fac24d8 | bba8da70ce860cae85eeb28769a333858feece52 | refs/heads/gh-pages | 2022-12-12T17:18:15.834847 | 2019-08-22T02:34:51 | 2019-08-22T02:34:51 | 201,605,566 | 0 | 0 | null | 2022-12-08T01:48:15 | 2019-08-10T08:59:14 | Python | UTF-8 | Python | false | false | 526 | py | import os
from app import bookit_app
from flask_jwt_extended import JWTManager
from app.api.v1.models.database import Database
config_name = os.getenv('APP_SETTINGS')
app = bookit_app(config_name)
@app.cli.command()
def create():
"""Create tables."""
Database().create_table()
@app.cli.command()
def admin():
"""Create an admin."""
Database().create_admin()
@app.cli.command()
def destroy():
"""Destroy tables."""
Database().destroy_table()
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
33969ea54f17fc731748456b6eeebafe130c8366 | 944401a6292baa2d23b9738898e0b0cb199d0795 | /lib/python2.7/site-packages/IPython/core/splitinput.py | a1b18fdde858f8fc51e8d71958e8f3e5336c1ed8 | [
"Python-2.0"
] | permissive | sunnyweilai/Finding-Theme-Color-Palettes | cc84c93ce58abdd1802431c41bd59181d7a4f75b | 4c38b112f5c40b43d6ec126e415b609c7fdc1f39 | refs/heads/master | 2022-12-21T09:41:31.187411 | 2019-04-30T14:50:17 | 2019-04-30T14:50:17 | 184,273,925 | 1 | 0 | null | 2022-12-07T03:46:55 | 2019-04-30T14:09:52 | Python | UTF-8 | Python | false | false | 4,790 | py | # encoding: utf-8
"""
Simple utility for splitting user input. This is used by both inputsplitter and
prefilter.
Authors:
* Brian Granger
* Fernando Perez
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
import sys
from IPython.utils import py3compat
from IPython.utils.encoding import get_stream_enc
#-----------------------------------------------------------------------------
# Main function
#-----------------------------------------------------------------------------
# RegExp for splitting line contents into pre-char//first word-method//rest.
# For clarity, each group in on one line.
# WARNING: update the regexp if the escapes in interactiveshell are changed, as
# they are hardwired in.
# Although it's not solely driven by the regex, note that:
# ,;/% only trigger if they are the first character on the line
# ! and !! trigger if they are first char(s) *or* follow an indent
# ? triggers as first or last char.
line_split = re.compile("""
^(\s*) # any leading space
([,;/%]|!!?|\?\??)? # escape character or characters
\s*(%{0,2}[\w\.\*]*) # function/method, possibly with leading %
# to correctly treat things like '?%magic'
(.*?$|$) # rest of line
""", re.VERBOSE)
def split_user_input(line, pattern=None):
"""Split user input into initial whitespace, escape character, function part
and the rest.
"""
# We need to ensure that the rest of this routine deals only with unicode
encoding = get_stream_enc(sys.stdin, 'utf-8')
line = py3compat.cast_unicode(line, encoding)
if pattern is None:
pattern = line_split
match = pattern.match(line)
if not match:
# print "match failed for line '%s'" % line
try:
ifun, the_rest = line.split(None,1)
except ValueError:
# print "split failed for line '%s'" % line
ifun, the_rest = line, u''
pre = re.match('^(\s*)(.*)',line).groups()[0]
esc = ""
else:
pre, esc, ifun, the_rest = match.groups()
#print 'line:<%s>' % line # dbg
#print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun.strip(),the_rest) # dbg
return pre, esc or '', ifun.strip(), the_rest.lstrip()
class LineInfo(object):
"""A single line of input and associated info.
Includes the following as properties:
line
The original, raw line
continue_prompt
Is this line a continuation in a sequence of multiline input?
pre
Any leading whitespace.
esc
The escape character(s) in pre or the empty string if there isn't one.
Note that '!!' and '??' are possible values for esc. Otherwise it will
always be a single character.
ifun
The 'function part', which is basically the maximal initial sequence
of valid python identifiers and the '.' character. This is what is
checked for alias and magic transformations, used for auto-calling,
etc. In contrast to Python identifiers, it may start with "%" and contain
"*".
the_rest
Everything else on the line.
"""
def __init__(self, line, continue_prompt=False):
self.line = line
self.continue_prompt = continue_prompt
self.pre, self.esc, self.ifun, self.the_rest = split_user_input(line)
self.pre_char = self.pre.strip()
if self.pre_char:
self.pre_whitespace = '' # No whitespace allowd before esc chars
else:
self.pre_whitespace = self.pre
def ofind(self, ip):
"""Do a full, attribute-walking lookup of the ifun in the various
namespaces for the given IPython InteractiveShell instance.
Return a dict with keys: {found, obj, ospace, ismagic}
Note: can cause state changes because of calling getattr, but should
only be run if autocall is on and if the line hasn't matched any
other, less dangerous handlers.
Does cache the #1lab_results of the call, so can be called multiple times
without worrying about *further* damaging state.
"""
return ip._ofind(self.ifun)
def __str__(self):
return "LineInfo [%s|%s|%s|%s]" %(self.pre, self.esc, self.ifun, self.the_rest)
| [
"[email protected]"
] | |
761d79644cbc78289b053f3ef1e65b89e42b8aca | a36f20ea67747e8656e2607bd033e20e1d9d9de6 | /ToRgbComparison.py | 6e69d722766834233b1cac1a607f8f96ddaf54f6 | [] | no_license | mfm24/miscpython | ae47de1bfe849d53243b9f2d47dc0f3bdb7acf10 | 0c02087a0e5864ee1d26f9f36390766cb0408e9f | refs/heads/master | 2020-04-06T07:08:43.414575 | 2017-03-12T00:06:59 | 2017-03-12T00:06:59 | 9,640,566 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,453 | py | # what's the best way to convert a 2d float value into
# a rgb uint8 image for showing. We have a few options:
import numpy as np
import time
import sys
no_weave = '--no-weave' in sys.argv
def to_rgb1(im):
# I think this will be slow
w, h = im.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = im
ret[:, :, 1] = im
ret[:, :, 2] = im
return ret
def to_rgb1a(im):
# This should be fsater than 1, as we only
# truncate to uint8 once (?)
w, h = im.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im
return ret
def to_rgb1b(im):
# I would expect this to be identical to 1a
w, h = im.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = im
ret[:, :, 1] = ret[:, :, 2] = ret[:, :, 0]
return ret
def to_rgb2(im):
# as 1, but we use broadcasting in one line
w, h = im.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, :] = im[:, :, np.newaxis]
return ret
def to_rgb3(im):
# we can use dstack and an array copy
# this has to be slow, we create an array with 3x the data we need
# and truncate afterwards
return np.asarray(np.dstack((im, im, im)), dtype=np.uint8)
def to_rgb3a(im):
# we can use the same array 3 times, converting to uint8 first
# this explicitly converts to np.uint8 once and is short
return np.dstack([im.astype(np.uint8)] * 3)
def to_rgb3b(im):
# as 3a, but we add an extra copy to contiguous 'C' order
# data
return np.dstack([im.astype(np.uint8)] * 3).copy(order='C')
if not no_weave:
def to_rgb4(im):
# we use weave to do the assignment in C code
# this only gets compiled on the first call
import scipy.weave as weave
w, h = im.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
code = """
int impos=0;
int retpos=0;
for(int j=0; j<Nim[1]; j++)
{
for (int i=0; i<Nim[0]; i++)
{
unsigned char d=im[impos++];
ret[retpos++] = d;
ret[retpos++] = d;
ret[retpos++] = d;
}
}
"""
weave.inline(code, ["im", "ret"])
return ret
def to_rgb5(im):
im.resize((im.shape[0], im.shape[1], 1))
return np.repeat(im.astype(np.uint8), 3, 2)
"""identical to 4, removing
def to_rgb5(im):
# we can use the same array 3 times
return np.dstack([np.asarray(im, dtype=np.uint8)] * 3)
def to_rgb6(im):
# is [x]*3 any differnt from (x,)*3
return np.dstack((im.astype(np.uint8),) * 3)
"""
funcs = {x:eval(x) for x in globals() if "to_rgb" in x}
print "testing Numpy v",np.version.version
print "on Python ", sys.version
for size in [64,256,1024,2048]:
s = np.random.uniform(256, size=(size,size))
# confirm all functions produce the same output:
ref = to_rgb1(s)
times = min(3*10**7/size**2, 10**5) or 1
print "\n\nFor Size",size,"\n==============="
for name, func in sorted(funcs.items()):
out = func(s)
assert np.array_equal(ref, out)
if ref.data != out.data:
print "Array data not in order"
start = time.time()
for i in range(times):
func(s)
end=time.time()
print "%s took \t%0.3f ms average for %d times" % (
name, (1000*(end-start)/times),times)
# see files in this folder for results
| [
"[email protected]"
] | |
41bc2f5ace0fa35654a900c92e31d5c792a9b8bd | 0305b3424ad2e89d64d6629d068caaa298419ea2 | /examples/list_files.py | 8ba0b3129c7528c90ab6fb786bfbaed0772a5f8c | [] | no_license | relman/sandcage-api-python | f8c0938aa8ddb03a10d434b3f82df26dda6ed906 | cd4b3e3ab7fc9a67cef33365e3d2b6eb806dc748 | refs/heads/master | 2021-01-12T17:23:02.319025 | 2016-10-21T13:31:26 | 2016-10-21T13:31:26 | 71,555,322 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # -*- coding: utf-8 -*-
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from src import SandCage
sc = SandCage(api_key='<INSERT_API_KEY>')
result = sc.list_files({
'directory': 'root', # optional
'page': 1, # optional
'results_per_page': 100 # optional
})
print(result.status_code)
print(result.json())
| [
"[email protected]"
] | |
bb69cb024ba98b5bee8a021ba68a257649a41338 | e848d0a1c941a3fb0db9f4f483b6d099895bff2b | /15-1_compute_MFE.py | 12d1ec5c894366c252c467513d659914119639b1 | [] | no_license | hexavier/tRNA_viruses | edfba3da03a8fac0659850d17b25f6f581f9fe12 | 802bf91e5270b72ae12c4d510e2241abac24e3a2 | refs/heads/master | 2021-05-18T13:01:24.968359 | 2020-12-14T11:24:27 | 2020-12-14T11:24:27 | 251,246,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,728 | py | # -*- coding: utf-8 -*-
import pandas as pd
import RNA
import numpy as np
import re
def get_codons(seq,frame):
cutseq = seq[frame:]
seqcodons = [cutseq[n:n+3] for n in range(0,len(cutseq),3) if len(cutseq)>=(n+3)]
return seqcodons
#%% Load data
metadata = pd.read_csv("data/refseq_humanvirus_CoCoPUT.tsv",index_col=0, sep="\t")
codontab = pd.read_csv("data/codons_table.tab", sep="\t", index_col=0)
# Fasta sequences parsing
text_file = open("data/refseq_humanvirus.fasta", "r")
allfasta = text_file.read().split('>')
text_file.close()
seqs = dict(zip([s.split("\n")[0] for s in allfasta],["".join(s.split("\n")[1:]) for s in allfasta]))
#%% Create output table
# Initialize structures
mfe_df = pd.DataFrame(columns = ["WT"], index = metadata.index)
mfe_random_df = pd.DataFrame(columns = np.arange(0,10), index = metadata.index)
# Count codons
for p in metadata.index:
# Find sequence
idx = [s for s in seqs.keys() if p in s]
seq = seqs[idx[0]]
# Compute MFE of WT
(ss, mfe) = RNA.fold(seq)
mfe_df.loc[p,"WT"] = mfe
# Randomize sequence preserving codons and protein
seqcodons = get_codons(seq,0)
seqaa = np.array([codontab.loc[c,"AA"] if c in codontab.index else "nonAA" for c in seqcodons])
for n in range(0,10):
seqrand = np.array(seqcodons)
for aa in list(set(codontab.AA)):
seqrand[seqaa==aa] = np.random.permutation(seqrand[seqaa==aa])
(ss, mfe) = RNA.fold("".join(seqrand))
mfe_random_df.loc[p,n] = mfe
#%% Save output
output_frame = pd.concat([metadata.iloc[:,0:9],mfe_df,mfe_random_df],axis=1)
output_frame.to_csv("results/MFE_humanvirus2.tsv", sep="\t")
| [
"[email protected]"
] | |
c14983e62b1143c21a28afa754a2a817b6d39590 | daf094a1e2fc8731cc4ac36e27237a3f3064ae3c | /src/config.py | 8468eba4d92d2c6f6c239e945d95ce309089e5eb | [] | no_license | haodongyu/AIM2020-Real-World-Image-Super-Resolution-Challenge-track-2_x3-upscale | db23f68e374bdd8e33e82a653ea3cea92052e7e6 | 922e4276ab2184c28e126b544c18d4140b556bcf | refs/heads/master | 2022-12-09T19:38:52.045409 | 2020-08-14T09:15:18 | 2020-08-14T09:15:18 | 287,469,703 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | dir_pools = ['../experiment/AIM_EDSR_x3_TEST_model_best_forensemble', '../experiment/AIM_DDet_x3_TEST2_model12_forensemble', '../experiment/AIM_DDet_x3_TEST2_model18_forensemble',
'../experiment/AIM_WDDet_x3_TEST_model_best_forensemble']
weights = {'../experiment/AIM_EDSR_x3_TEST_model_best_forensemble':1.0, '../experiment/AIM_DDet_x3_TEST2_model12_forensemble': 1.0, '../experiment/AIM_DDet_x3_TEST2_model18_forensemble':1.0,
'../experiment/AIM_WDDet_x3_TEST_model_best_forensemble': 1.0}
| [
"[email protected]"
] | |
a8fe86b663e981417bf12193e0e81410ff97bac9 | 5bd0a6890f2313c61a8390e54247e59d27f92bb1 | /hed/validator/onset_validator.py | f44b291baa48f117076e11418dbf159c69e68f23 | [
"MIT"
] | permissive | hed-standard/hed-python | 840d9760f783d0fe059e51ca6c5899f50d21173e | b871cae44bdf0ee68c688562c3b0af50b93343f5 | refs/heads/master | 2023-08-30T23:30:59.506164 | 2023-07-10T14:14:05 | 2023-07-10T14:14:05 | 188,481,236 | 5 | 30 | MIT | 2023-09-13T11:26:23 | 2019-05-24T20:06:26 | Python | UTF-8 | Python | false | false | 4,563 | py | from hed.models.model_constants import DefTagNames
from hed.models.hed_group import HedGroup
from hed.errors.error_reporter import ErrorHandler
from hed.errors.error_types import OnsetErrors
class OnsetValidator:
""" Validates onset/offset pairs. """
def __init__(self, def_dict, run_full_onset_checks=True):
self._defs = def_dict
self._onsets = {}
self._run_full_onset_checks = run_full_onset_checks
def validate_onset_offset(self, hed_string_obj):
""" Validate onset/offset
Parameters:
hed_string_obj (HedString): The hed string to check.
Returns:
list: A list of issues found in validating onsets (i.e., out of order onsets, unknown def names).
"""
onset_issues = []
for found_onset, found_group in self._find_onset_tags(hed_string_obj):
if not found_onset:
return []
def_tags = found_group.find_def_tags()
if not def_tags:
onset_issues += ErrorHandler.format_error(OnsetErrors.ONSET_NO_DEF_TAG_FOUND, found_onset)
continue
if len(def_tags) > 1:
onset_issues += ErrorHandler.format_error(OnsetErrors.ONSET_TOO_MANY_DEFS,
tag=def_tags[0][0],
tag_list=[tag[0] for tag in def_tags[1:]])
continue
# Get all children but def group and onset/offset, then validate #/type of children.
def_tag, def_group, _ = def_tags[0]
if def_group is None:
def_group = def_tag
children = [child for child in found_group.children if
def_group is not child and found_onset is not child]
max_children = 1
if found_onset.short_base_tag == DefTagNames.OFFSET_ORG_KEY:
max_children = 0
if len(children) > max_children:
onset_issues += ErrorHandler.format_error(OnsetErrors.ONSET_WRONG_NUMBER_GROUPS,
def_tag,
found_group.children)
continue
if children:
# Make this a loop if max_children can be > 1
child = children[0]
if not isinstance(child, HedGroup):
onset_issues += ErrorHandler.format_error(OnsetErrors.ONSET_TAG_OUTSIDE_OF_GROUP,
child,
def_tag)
# At this point we have either an onset or offset tag and it's name
onset_issues += self._handle_onset_or_offset(def_tag, found_onset)
return onset_issues
def _find_onset_tags(self, hed_string_obj):
return hed_string_obj.find_top_level_tags(anchor_tags=DefTagNames.TEMPORAL_KEYS)
def _handle_onset_or_offset(self, def_tag, onset_offset_tag):
is_onset = onset_offset_tag.short_base_tag == DefTagNames.ONSET_ORG_KEY
full_def_name = def_name = def_tag.extension
placeholder = None
found_slash = def_name.find("/")
if found_slash != -1:
placeholder = def_name[found_slash + 1:]
def_name = def_name[:found_slash]
def_entry = self._defs.get(def_name)
if def_entry is None:
return ErrorHandler.format_error(OnsetErrors.ONSET_DEF_UNMATCHED, tag=def_tag)
if bool(def_entry.takes_value) != bool(placeholder):
return ErrorHandler.format_error(OnsetErrors.ONSET_PLACEHOLDER_WRONG, tag=def_tag,
has_placeholder=bool(def_entry.takes_value))
if self._run_full_onset_checks:
if is_onset:
# onset can never fail as it implies an offset
self._onsets[full_def_name.lower()] = full_def_name
else:
is_offset = onset_offset_tag.short_base_tag == DefTagNames.OFFSET_ORG_KEY
if full_def_name.lower() not in self._onsets:
if is_offset:
return ErrorHandler.format_error(OnsetErrors.OFFSET_BEFORE_ONSET, tag=def_tag)
else:
return ErrorHandler.format_error(OnsetErrors.INSET_BEFORE_ONSET, tag=def_tag)
elif is_offset:
del self._onsets[full_def_name.lower()]
return []
| [
"[email protected]"
] | |
d3efe2b01dcb733bf1b4582a419bd37c7cb56f56 | d47ca1953395e0070357992bbc1123f95015dad3 | /Asfaleia/rotor_two.py | efd98a5afbd6096ef7bad168862dba1174f4d839 | [] | no_license | Horef/Asfaleia-or-Modern-Enigma | d8ba62873dbe055aadeb29cf4621d397dff3ffb2 | dc9f5994174873fd6866fb0b8b195af626ef7c38 | refs/heads/master | 2020-06-19T05:27:20.261025 | 2019-07-23T16:31:29 | 2019-07-23T16:31:29 | 196,580,463 | 0 | 0 | null | 2019-07-20T19:39:31 | 2019-07-12T13:02:56 | Python | UTF-8 | Python | false | false | 11,006 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 11 14:22:17 2019
@author: Sergiy Horef
"""
from rotor import Rotor
class Rotor_Two(Rotor):
def __init__(self, nr, val):
super().__init__(nr, val)
self.val_one_shuffler = [('f','i'), ('z','p'), ('m','b'),
('u','v'), ('c','h'), ('g','e'),
('l','t'), ('a','j'), ('n','q'),
('y','o'), ('x','k'), ('s','d'),
('r','w')]
self.val_two_shuffler = [('q','z'), ('r','s'), ('a','v'),
('o','d'), ('e','p'), ('i','j'),
('b','f'), ('m','x'), ('g','t'),
('l','y'), ('k','c'), ('n','h'),
('w','u')]
self.val_three_shuffler = [('v','m'), ('t','r'), ('a','x'),
('l','s'), ('b','p'), ('z','q'),
('u','i'), ('d','g'), ('h','c'),
('n','e'), ('y','f'), ('k','j'),
('o','w')]
self.val_four_shuffler = [('a','k'), ('z','w'), ('o','l'),
('h','f'), ('x','q'), ('e','s'),
('c','t'), ('d','p'), ('v','g'),
('b','i'), ('n','m'), ('u','y'),
('r','j')]
self.val_five_shuffler = [('a','k'), ('z','w'), ('o','l'),
('h','f'), ('x','q'), ('e','s'),
('c','t'), ('d','p'), ('v','g'),
('b','i'), ('n','m'), ('u','y'),
('r','j')]
self.val_six_shuffler = [('y','a'), ('e','d'), ('i','k'),
('m','s'), ('w','q'), ('r','l'),
('z','x'), ('v','t'), ('f','b'),
('p','o'), ('n','g'), ('c','j'),
('h','u')]
self.val_seven_shuffler = [('b','r'), ('j','e'), ('k','c'),
('a','h'), ('l','o'), ('x','d'),
('q','g'), ('f','p'), ('t','m'),
('v','n'), ('u','y'), ('w','s'),
('z','i')]
self.val_eight_shuffler = [('y','s'), ('n','v'), ('w','f'),
('e','a'), ('b','i'), ('g','z'),
('u','j'), ('o','m'), ('x','h'),
('q','r'), ('k','l'), ('p','d'),
('c','t')]
self.val_nine_shuffler = [('o','f'), ('z','p'), ('e','n'),
('q','j'), ('l','y'), ('k','r'),
('u','g'), ('h','v'), ('t','i'),
('m','x'), ('s','b'), ('a','w'),
('c','d')]
self.val_ten_shuffler = [('k','p'), ('j','o'), ('l','w'),
('r','i'), ('e','s'), ('f','h'),
('a','q'), ('b','t'), ('c','m'),
('d','x'), ('y','g'), ('v','n'),
('z','u')]
self.val_eleven_shuffler = [('d','q'), ('e','l'), ('u','x'),
('v','j'), ('a','m'), ('s','g'),
('w','f'), ('k','b'), ('i','t'),
('r','c'), ('o','p'), ('n','z'),
('y','h')]
self.val_twelve_shuffler = [('m','n'), ('a','h'), ('e','l'),
('k','t'), ('z','i'), ('b','g'),
('j','r'), ('o','f'), ('w','y'),
('d','p'), ('v','u'), ('c','s'),
('q','x')]
self.val_thirteen_shuffler = [('p','n'), ('k','f'), ('l','d'),
('e','j'), ('w','r'), ('c','q'),
('a','v'), ('x','s'), ('o','h'),
('y','t'), ('i','u'), ('b','m'),
('z','g')]
self.val_fourteen_shuffler = [('k','g'), ('q','h'), ('s','m'),
('p','b'), ('j','x'), ('t','d'),
('y','a'), ('e','i'), ('f','c'),
('n','v'), ('o','w'), ('r','l'),
('u','z')]
self.val_fifteen_shuffler = [('r','n'), ('c','j'), ('z','u'),
('o','m'), ('f','v'), ('w','a'),
('e','t'), ('h','l'), ('x','y'),
('p','b'), ('s','i'), ('k','d'),
('g','q')]
self.val_sixteen_shuffler = [('w','h'), ('p','x'), ('f','d'),
('k','r'), ('m','i'), ('l','s'),
('g','q'), ('v','a'), ('u','y'),
('o','z'), ('c','t'), ('e','j'),
('b','n')]
self.val_seventeen_shuffler = [('t','h'), ('s','r'), ('k','z'),
('x','c'), ('d','l'), ('f','n'),
('e','v'), ('i','u'), ('b','j'),
('a','g'), ('w','p'), ('o','y'),
('m','q')]
self.val_eighteen_shuffler = [('s','i'), ('f','v'), ('y','g'),
('u','n'), ('e','p'), ('x','c'),
('j','o'), ('b','h'), ('l','w'),
('t','d'), ('m','a'), ('q','z'),
('r','k')]
self.val_nineteen_shuffler = [('v','j'), ('u','s'), ('c','t'),
('a','x'), ('f','z'), ('r','l'),
('h','g'), ('y','w'), ('q','b'),
('k','i'), ('e','m'), ('o','d'),
('p','n')]
self.val_twenty_shuffler = [('u','n'), ('v','z'), ('c','o'),
('q','a'), ('w','l'), ('y','h'),
('p','t'), ('f','m'), ('e','k'),
('d','s'), ('b','g'), ('x','i'),
('j','r')]
self.val_twenty_one_shuffler = [('h','x'), ('j','g'), ('n','m'),
('p','y'), ('f','w'), ('r','c'),
('i','e'), ('d','k'), ('q','z'),
('s','a'), ('t','l'), ('v','o'),
('b','u')]
self.val_twenty_two_shuffler = [('k','x'), ('c','n'), ('w','v'),
('m','g'), ('d','z'), ('t','u'),
('l','f'), ('p','o'), ('r','e'),
('q','a'), ('b','s'), ('h','i'),
('j','y')]
self.val_twenty_three_shuffler = [('q','h'), ('s','k'), ('m','d'),
('a','b'), ('w','x'), ('p','n'),
('f','u'), ('l','y'), ('g','e'),
('j','c'), ('o','r'), ('i','v'),
('t','z')]
self.val_twenty_four_shuffler = [('e','h'), ('o','z'),
('y','v'), ('f','c'),
('s','n'), ('l','k'),
('m','x'), ('a','q'),
('t','r'), ('i','j'),
('u','w'), ('g','d'),
('b','p')]
self.val_twenty_five_shuffler = [('z','v'), ('o','b'),
('g','f'), ('k','p'),
('h','y'), ('q','e'),
('l','j'), ('t','u'),
('m','c'), ('r','s'),
('w','x'), ('i','n'),
('d','a')]
self.val_twenty_six_shuffler = [('x','t'), ('z','p'),
('o','r'), ('q','b'),
('s','n'), ('l','j'),
('h','u'), ('i','f'),
('w','e'), ('v','y'),
('k','a'), ('c','m'),
('g','d')]
self.shuffler_dict = {
1: self.val_one_shuffler,
2: self.val_two_shuffler,
3: self.val_three_shuffler,
4: self.val_four_shuffler,
5: self.val_five_shuffler,
6: self.val_six_shuffler,
7: self.val_seven_shuffler,
8: self.val_eight_shuffler,
9: self.val_nine_shuffler,
10: self.val_ten_shuffler,
11: self.val_eleven_shuffler,
12: self.val_twelve_shuffler,
13: self.val_thirteen_shuffler,
14: self.val_fourteen_shuffler,
15: self.val_fifteen_shuffler,
16: self.val_sixteen_shuffler,
17: self.val_seventeen_shuffler,
18: self.val_eighteen_shuffler,
19: self.val_nineteen_shuffler,
20: self.val_twenty_shuffler,
21: self.val_twenty_one_shuffler,
22: self.val_twenty_two_shuffler,
23: self.val_twenty_three_shuffler,
24: self.val_twenty_four_shuffler,
25: self.val_twenty_five_shuffler,
26: self.val_twenty_six_shuffler
}
def shuffle_letters(self, letters):
for pair in self.shuffler_dict[self.current_value]:
letters[pair[0]], letters[pair[1]] = letters[pair[1]], letters[pair[0]]
def __str__(self):
return 'This is a {self.__class__.__name__} with a value of {self.current_value}'.format(self=self)
def __repr__(self):
return '{self.__class__.__name__}({self.next_rotor})'.format(self=self) | [
"[email protected]"
] | |
4c03c38e1d85e0f37265f479b943d5d31c025960 | ef0f7dea29f6e55ef38c113477d0354ced39ba77 | /Exercise4.py | 124dbfa4e2c4a8e05840f09c554b400819bbbd83 | [] | no_license | JoannaEbreso/PythonProgress | e7613f5f72b9430fbf1c0aadd50328336d578475 | 0f3d9ddc6595c34eda5668015c1a7ba8e460af8a | refs/heads/master | 2022-11-26T08:16:07.199769 | 2020-06-27T15:14:48 | 2020-06-27T15:14:48 | 271,935,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | x= "interesting"
def Jojo():
x = "Okay"
print("Python is " + x)
Jojo()
print("Python is " + x)
b = "JoannaEbreso"
print (b[-5:-2])
print (b[2:5])
x = "Hello World"
print(len(x))
| [
"[email protected]"
] | |
164529bff82c266cbf49fc91fc09fb7279596099 | 75089747a38a99f39de2fc437c938d0dc17878f9 | /phoneAndEmail.py | a7d3ab72fd5d9b1d99bb0aa6b7c7a1fa7ee2c517 | [] | no_license | ShiJianjun0915/myprojects | c6be70561257897407b9bd603487032526abfa06 | be0610a822d1f608f0b0f9f91e4d2baa3c77b40e | refs/heads/master | 2020-06-13T01:21:10.168367 | 2016-12-27T03:45:56 | 2016-12-27T03:45:56 | 75,469,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | '''phoneAndEmail.py - Finds phone numbers and email address
on the clipboard
'''
import pyperclip, re
#Create phone number regex
phoneRegex = re.compile(r'''(
(\d{3}|\(\d{3}\))? # area code
(\s|-|\.)? # separator
(\d{3}) # first 3 digits
(\s|-|\.) # separator
(\d{4}) # last 4 digits
(\s*(ext|x|ext.) \s*(\d{2,5}))? # extension
)''', re.VERBOSE)
#Create email regex
emailRegex = re.compile(r'''(
[a-zA-Z0-9._%+-]+ # username
@ # @ symbol
[a-zA-Z0-9.-]+ #domain name
(\.[a-zA-Z]{2,4}) #dot-something
)''', re.VERBOSE)
#Find matches in clipboard text
text = str(pyperclip.paste())
mathes = []
for groups in phoneRegex.findall(text):
phoneNum = '-'.join([groups[1], groups[3],groups[5]])
if groups[8] != '':
phoneNum += ' x' + groups[8]
mathes.append(phoneNum)
for groups in emailRegex.findall(text):
mathes.append(groups[0])
#Copy results to the clipboard
if len(mathes) > 0:
pyperclip.copy('\n'.join(mathes))
print('Copied to clipboard:')
print('\n'.join(mathes))
else:
print('No phone numbers or email address found.') | [
"[email protected]"
] | |
8f9941fcdbe83805a264e49150d3c81a137fa39b | 20ac7310b4f7e5fed1351035381d973d593ef635 | /Capstone/score.py | 48b04576f10f699ca9fffd1444a81c20146feb1e | [] | no_license | Nupurgopali/Microsoft-Azure-ML-Repo | 6839d397030527d076d57c7beaf9e24112ce5059 | a5c412ee67518f8e8b6dc0c6e64203cbee0b5f5e | refs/heads/main | 2023-08-03T23:45:39.313358 | 2021-09-07T17:57:33 | 2021-09-07T17:57:33 | 403,601,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,474 | py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import logging
import os
import pickle
import numpy as np
import pandas as pd
import joblib
import azureml.automl.core
from azureml.automl.core.shared import logging_utilities, log_server
from azureml.telemetry import INSTRUMENTATION_KEY
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
input_sample = pd.DataFrame({"area_mean": pd.Series([0.0], dtype="float64"), "area_se": pd.Series([0.0], dtype="float64"), "area_worst": pd.Series([0.0], dtype="float64"), "compactness_mean": pd.Series([0.0], dtype="float64"), "compactness_se": pd.Series([0.0], dtype="float64"), "compactness_worst": pd.Series([0.0], dtype="float64"), "concave points_mean": pd.Series([0.0], dtype="float64"), "concave points_se": pd.Series([0.0], dtype="float64"), "concave points_worst": pd.Series([0.0], dtype="float64"), "concavity_mean": pd.Series([0.0], dtype="float64"), "concavity_se": pd.Series([0.0], dtype="float64"), "concavity_worst": pd.Series([0.0], dtype="float64"), "fractal_dimension_mean": pd.Series([0.0], dtype="float64"), "fractal_dimension_se": pd.Series([0.0], dtype="float64"), "fractal_dimension_worst": pd.Series([0.0], dtype="float64"), "perimeter_mean": pd.Series([0.0], dtype="float64"), "perimeter_se": pd.Series([0.0], dtype="float64"), "perimeter_worst": pd.Series([0.0], dtype="float64"), "radius_mean": pd.Series([0.0], dtype="float64"), "radius_se": pd.Series([0.0], dtype="float64"), "radius_worst": pd.Series([0.0], dtype="float64"), "smoothness_mean": pd.Series([0.0], dtype="float64"), "smoothness_se": pd.Series([0.0], dtype="float64"), "smoothness_worst": pd.Series([0.0], dtype="float64"), "symmetry_mean": pd.Series([0.0], dtype="float64"), "symmetry_se": pd.Series([0.0], dtype="float64"), "symmetry_worst": pd.Series([0.0], dtype="float64"), "texture_mean": pd.Series([0.0], dtype="float64"), "texture_se": pd.Series([0.0], dtype="float64"), "texture_worst": pd.Series([0.0], dtype="float64")})
output_sample = np.array([0])
try:
log_server.enable_telemetry(INSTRUMENTATION_KEY)
log_server.set_verbosity('INFO')
logger = logging.getLogger('azureml.automl.core.scoring_script')
except:
pass
def init():
global model
# This name is model.id of model that we want to deploy deserialize the model file back
# into a sklearn model
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model.pkl')
path = os.path.normpath(model_path)
path_split = path.split(os.sep)
log_server.update_custom_dimensions({'model_name': path_split[1], 'model_version': path_split[2]})
try:
logger.info("Loading model from path.")
model = joblib.load(model_path)
logger.info("Loading successful.")
except Exception as e:
logging_utilities.log_traceback(e, logger)
raise
@input_schema('data', PandasParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
result = model.predict(data)
return json.dumps({"result": result.tolist()})
except Exception as e:
result = str(e)
return json.dumps({"error": result})
| [
"[email protected]"
] | |
154a6f9d6411492e873a3cc1df6a8692bc50eb5d | e953c138d3808d92fcc9848824985be5bc42f034 | /python/memory/object.py | 3bac548d07cf37e38f2bc1406886a6af6f427a63 | [] | no_license | hotoku/samples | 1cf3f7006ae8ba9bae3a52113cdce6d1e1d32c5a | ce0d95d87e08386d9eb83d7983bd2eaff0682793 | refs/heads/main | 2023-08-09T09:05:15.185012 | 2023-08-04T09:29:06 | 2023-08-04T09:29:06 | 222,609,036 | 0 | 0 | null | 2022-03-30T01:44:03 | 2019-11-19T04:35:27 | Jupyter Notebook | UTF-8 | Python | false | false | 943 | py | from dataclasses import dataclass
import sys
import numpy as np
@dataclass
class Week:
year: int
week: int
w1 = Week(2020, 1)
print(sys.getsizeof(w1)) # => 48
a1 = np.arange(10)
print(sys.getsizeof(a1)) # => 184
a2 = np.arange(20)
print(sys.getsizeof(a2)) # => 264
a3 = np.arange(30)
print(sys.getsizeof(a3)) # => 344
aw1 = np.array([Week(2020, i) for i in range(10)])
print(sys.getsizeof(aw1)) # => 184
aw2 = np.array([Week(2020, i) for i in range(20)])
print(sys.getsizeof(aw2)) # => 264
# np.arrayもポインタ持っているだけなので、実際のサイズが分からんな
@dataclass
class Int3:
i1: int
i2: int
i3: int
i31 = Int3(300, 300, 300)
print(sys.getsizeof(i31)) # => 48
# ということは、Weekが実際に、どれだけメモリ使ってるのかも分からんな・・
# Local Variables:
# lsp-pyright-venv-path: "/Users/hotoku/.pyenv/versions/3.9.5/envs/global"
# End:
| [
"[email protected]"
] | |
2f0904fcc369ba8c6ad295abf797c73aaab0022f | 5e3ebc83bc3fe2f85c34563689b82b1fc8b93a04 | /examples/remarketing/add_remarketing_action.py | 8a5c8e9783e7b8cdb2cc6096712711c4039041a0 | [
"Apache-2.0"
] | permissive | pdsing/google-ads-python | 0ce70227cd6bb13a25cd13de0ca05c2636279ecd | ee2c059498d5679a0d1d9011f3795324439fad7c | refs/heads/master | 2023-05-04T18:39:57.412453 | 2021-05-21T16:38:17 | 2021-05-21T16:38:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,941 | py | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example demonstrates usage of remarketing actions.
A new remarketing action will be created for the specified customer, and its
associated tag snippets will be retrieved.
"""
import argparse
import sys
from uuid import uuid4
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, page_size):
remarketing_action_resource_name = _add_remarketing_action(
client, customer_id
)
print(f'Created remarketing action "{remarketing_action_resource_name}".')
queried_remarketing_action = _query_remarketing_action(
client, customer_id, remarketing_action_resource_name, page_size
)
_print_remarketing_action_attributes(queried_remarketing_action)
# [START add_remarketing_action]
def _add_remarketing_action(client, customer_id):
remarketing_action_service = client.get_service("RemarketingActionService")
remarketing_action_operation = client.get_type("RemarketingActionOperation")
remarketing_action = remarketing_action_operation.create
remarketing_action.name = f"Remarketing action #{uuid4()}"
try:
remarketing_action_response = (
remarketing_action_service.mutate_remarketing_actions(
customer_id=customer_id,
operations=[remarketing_action_operation],
)
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
return remarketing_action_response.results[0].resource_name
# [END add_remarketing_action]
def _query_remarketing_action(client, customer_id, resource_name, page_size):
"""Retrieves the previously created remarketing action with tag snippets.
Args:
client: the Google Ads client
customer_id: the Google Ads customer ID
resource_name: the resource name of the remarketing action to query
page_size: the number of rows to return per page
Returns:
the found remarketing action
"""
# [START add_remarketing_action_1]
query = f"""
SELECT
remarketing_action.id,
remarketing_action.name,
remarketing_action.tag_snippets
FROM remarketing_action
WHERE remarketing_action.resource_name = '{resource_name}'"""
# [END add_remarketing_action_1]
googleads_service_client = client.get_service("GoogleAdsService")
search_request = client.get_type("SearchGoogleAdsRequest")
search_request.customer_id = customer_id
search_request.query = query
search_request.page_size = page_size
results = googleads_service_client.search(search_request)
try:
return list(results)[0].remarketing_action
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
def _print_remarketing_action_attributes(remarketing_action):
print(
f"Remarketing action has ID {remarketing_action.id} and name "
f'"{remarketing_action.name}". \nIt has the following '
"generated tag snippets:\n"
)
for tag_snippet in remarketing_action.tag_snippets:
tracking_code_type = tag_snippet.type_.name
tracking_code_page_format = tag_snippet.page_format.name
print("=" * 80)
print(
f'Tag snippet with code type "{tracking_code_type}", and code '
f'page format "{tracking_code_page_format}" has the following:\n'
)
print("-" * 80)
print(f"Global site tag: \n\n{tag_snippet.global_site_tag}")
print("-" * 80)
print(f"Event snippet: \n\n{tag_snippet.event_snippet}")
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v7")
parser = argparse.ArgumentParser(
description="Adds a remarketing action for specified customer."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
# The following argument(s) are optional.
parser.add_argument(
"-p",
"--page_size",
type=int,
default=1000,
help="Number of pages to be returned in the response.",
)
args = parser.parse_args()
main(googleads_client, args.customer_id, args.page_size)
| [
"[email protected]"
] | |
6c06d9d4395b12a17700fbdba79c49314433e47d | c183f5c5683ed9b7ce5be3adee3c27a6cc7880e6 | /classifier/classifier.py | 17838776add289a815b0aed8d5a971fd139d6d4a | [] | no_license | trivalchang/utility | 961d6680c5d0df981ff8eb7c550e844481e8e7b3 | 3cf3156d949492d75b2c38b925ae2ab985f20371 | refs/heads/master | 2021-09-07T21:48:32.368018 | 2018-03-01T14:15:08 | 2018-03-01T14:15:08 | 115,503,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py |
from __future__ import print_function
import numpy as np
import cv2
import os
import sys
from sklearn.svm import SVC
import pickle
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(path + '/../')
from feature.HOG import HOG
from fileOp.h5_dataset import h5_dump_dataset
class Classifier():
model = None
classifier_type = None
def __init__(self, path, type='SVC'):
self.model = pickle.loads(open(path, "rb").read())
self.classifier_type = type
def predict(self, feature):
return self.model.predict([feature]) | [
"[email protected]"
] | |
dfafd1ae71ca03b108e11ecaad512794deeac438 | 8e5617355d4df1481a2518a32d167a40eaabaa0a | /rains_or_reason/chessBoardCellColor.py | fda3616d3087e3c6d91add84e255cc0da8d00260 | [] | no_license | bryanfree66/codefights_solutions | 32d39604b955daff89e7d99c1021b0f39d5f1b0e | 5b0e57ea5b88c6b6c2e7888e8aec777f56a214be | refs/heads/master | 2020-03-21T08:22:26.322219 | 2018-06-22T20:12:25 | 2018-06-22T20:12:25 | 138,339,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | def chessBoardCellColor(cell1, cell2):
cell1_list = list(cell1)
cell2_list = list(cell2)
cell1_list[0] = ord(cell1_list[0])
cell1_list[1] = int(cell1_list[1])
cell2_list[0] = ord(cell2_list[0])
cell2_list[1] = int(cell2_list[1])
if abs(cell2_list[0] - cell1_list[0]) % 2 == 0 and \
abs(cell2_list[1] - cell1_list[1]) % 2 == 0:
return True
elif abs(cell2_list[0] - cell1_list[0]) % 2 != 0 and \
abs(cell2_list[1] - cell1_list[1]) % 2 != 0:
return True
else:
return False
| [
"[email protected]"
] | |
dab916fe4919f6e44f5e2dccfd5b3627e6537b09 | cdc9d6574c16f6be0dc39e4fe1e29610a0e1b3ac | /src/polling_service/urls.py | 1455219884e0a4b5f561d4eced028321e21d8646 | [] | no_license | Kirill67tyar/polling-service | 6672699c9fbcc3d5902f0b15e8a69e0caa9f5a45 | a5746acd8b0cc5342e54c99acb5a9f071f5d7c12 | refs/heads/master | 2023-06-23T03:13:53.160972 | 2021-07-28T15:41:50 | 2021-07-28T15:41:50 | 375,479,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py | """polling_service URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import debug_toolbar
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('api.urls', namespace='api')),
path('api-auth/', include('rest_framework.urls')),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('', include('polls.urls', namespace='polls')),
]
if settings.DEBUG:
urlpatterns.append(path('__debug__/', include(debug_toolbar.urls)))
# [email protected]
# alskdjfhg
# {
# "refresh": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0b2tlbl90eXBlIjoicmVmcmVzaCIsImV4cCI6MTYyNDMwNzM3OSwianRpIjoiNjMxYzFmOGNkMGNhNDE0ODhhMTllY2U5N2YxYjJhNjQiLCJ1c2VyX2lkIjoxfQ.oSw-LyBfy5canJ5awn-hRcDG5eyz3BpsJRaPRyUwOYs",
# "access": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0b2tlbl90eXBlIjoiYWNjZXNzIiwiZXhwIjoxNjI0MjIxMjc5LCJqdGkiOiIxNzJjZWIyZWM4NTk0MDUxYTMxYTQzMjgyYmNjNjdjMiIsInVzZXJfaWQiOjF9.NbwkqeZBv8_wwB1g-KzMWI8-rr-Rna629bdaLXEXCoQ"
# }
# важно - при использовании такой системы authenticated_classes нужно отключать в обработчиках
# refresh token по умолчанию один день
# access token по умолчанию 5 минут
# в настройках изменено
# в настройках меняется
# Получить refresh и access token:
# http://127.0.0.1:8000/api/token/ - post запрос
# в теле запроса:
# {"email": "[email protected]",
# "password": "alskdjfhg"}
# получить доступ для контента:
# в postman выбираешь тип авторизации - Bearer Token и вводишь в поле access token
# протухает access token через несколько минут
# обновить access token когда он протух
# http://127.0.0.1:8000/api/token/refresh/ - post запрос
# в теле запроса:
# {"refresh": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0b2tlbl90eXBlIjoicmVmcmVzaCIsImV4cCI6MTYyNDMwNTkyNiwianRpIjoiYzlmMjA5Y2NmODRlNDdjOGExZDdjMjZlZjI5Y2FkZjciLCJ1c2VyX2lkIjoxfQ.bVv5YYN8ucakAlTk2pIs-Cx_79_uYzZ-OQLZlaZ1FkE"}
# или другой refresh token если ты его поменял. | [
"[email protected]"
] | |
b9244b65c7a9a429e4cb1dd85862f9e5ec1f16ff | cf0f5661309176d5465e6f2172f3eaad8a6f9fcd | /script/tbgp.py | 28a779cb0de58fbda5411a182a29b1363576adb2 | [] | no_license | afred123/tools | e68f5b9b25be009c88222d102e55f36c84d641bb | 9ab42fe937a076c7df9bd69910122a16a609f263 | refs/heads/master | 2021-05-16T16:34:59.118998 | 2018-03-14T01:51:14 | 2018-03-14T01:51:14 | 120,058,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | import urllib;
import urllib2;
import json
def get(url):
username="admin"
password="admin"
auth=urllib2.HTTPDigestAuthHandler()
auth.add_password("R Open Networking Platform", url, username, password)
opener=urllib2.build_opener(auth)
urllib2.install_opener(opener)
res_data=urllib2.urlopen(url)
res=res_data.read()
print res
def post(requrl, data):
username="admin"
password="admin"
reqdata = json.dumps(data)
auth=urllib2.HTTPDigestAuthHandler()
auth.add_password("R Open Networking Platform", requrl, username, password)
opener=urllib2.build_opener(auth)
headers = {"Content-type": "application/json","Accept": "application/json"}
req = urllib2.Request(url=requrl,data=reqdata, headers=headers)
urllib2.install_opener(opener)
try:
res_data=urllib2.urlopen(req)
res=res_data.read()
f.write(res +'\n')
except urllib2.URLError as e:
if hasattr(e, 'code'):
print 'Error code:',e.code
elif hasattr(e, 'reason'):
print 'Reason:',e.reason
finally:
if res_data:
res_data.close()
if __name__ == "__main__":
# get("http://172.18.106.32:9200/_all/log/_search")
# file_object = open('data.txt')
# try:
# all_the_text = file_object.read()
# finally:
# file_object.close()
data={}
for i in range(1,100):
res=post("http://172.18.34.43:8181/restconf/operations/route-optimize:get-ddos-community",data)
| [
"[email protected]"
] | |
94fc4d3dfcfd9eed28cd72599b4126980bae625f | f86c79bfbbe1c13ee5dafe4a7c177b31977f5213 | /app/wgame/apps.py | 3d1a9ddc2bdeeb6fd098ecc945d81cd3867e70a2 | [] | no_license | 1786078681/WindG | 94ffac6d52adcd3a53c8c741fef097f986485d49 | e8e907a4e474d09d0e74eef606f2ce4075800b75 | refs/heads/master | 2021-05-14T19:01:11.219422 | 2018-01-03T07:31:44 | 2018-01-03T07:31:44 | 116,098,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class WgameConfig(AppConfig):
name = 'wgame'
| [
"[email protected]"
] | |
414d31ffebf9f07684fa2abe24dac050dc7b38d3 | a7446017c233a0077d2527286c6957e529d4d101 | /controllers/default.py | 5427308ade4e9f9c9055c6e81f6c384a9ad14fe8 | [
"LicenseRef-scancode-public-domain"
] | permissive | damlaozdemir/web2pymusicproject | 6713cecc5f655bf0fc2ccd7a7378c38654e6f927 | 497d0829ea7e96e6c49a5886c10834fac9389277 | refs/heads/master | 2021-01-17T19:13:30.491496 | 2016-07-03T18:15:30 | 2016-07-03T18:15:30 | 62,509,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,101 | py | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
import datetime
songs_data = [ {'id':1, "Singer's Name": "Rihanna", "Album": "Talk That Talk", "singerimg": "rihanna.jpg ","albumimg": "talkthattalk.jpg", "releaseyear": 2011,
'title':["You da One","Where Have You Been","We Found Love", "Talk That Talk","Cockiness (Love It)","Birthday Cake","We All Want Love","Drunk on Love","Roc Me Out","Watch n' Learn","Farewell " ]},
{'id':2, "Singer's Name": "Beyonce", "Album": "Dangerously in Love", "singerimg": "beyonce.jpg ","albumimg": "beyonce.jpg", "releaseyear": 2003,
'title':["Crazy In Love","Naughty Girl","That's How You Like It", "Baby Boy","Hip Hop Star","Be With You","Me, Myself and I","Yes","Signs","Speechless","The Closer I Get To You","Dangerously in Love 2","Beyonce(Interlude)","Gift From Virgo","Daddy"]},
{'id':3, "Singer's Name": "Chris Brown", "Album": "Chris Brown", "singerimg": "chris.jpg ","albumimg": "Chris_brown.jpg", "releaseyear": 2005,
'title':["Intro","Run It","Yo","Young Love","Cockiness (Love It)","Gimme That","Ya Man Ain't Me","Winner","Ain't No Way","What's My Name","Is This Love?","Poppin'-Main","Just Fine","Say Goodbye","Run It!(Remix)","Thank You"]},
{'id':4, "Singer's Name": "Jay Z", "Album": " Reasonable Doubt ", "singerimg": "Jay.jpg ","albumimg": "Reasonable_Doubt.jpg", "releaseyear": 1996,
'title':["Can't Knock the Hustle","Politics as Usual","Brooklyn's Finest", "Dead Presidents II","Feelin' It","D'Evils","22 Two's","Can I Live","Ain't No Nigga","Friend or Foe","Coming of Age","Cashmare Thoughts","Bring It On","Regrets"]},
{'id':5, "Singer's Name": "Cem Karaca", "Album": "Nerde Kalmıştık?", "singerimg": "karaca.jpg ","albumimg": "Nerde_Kalmistik.jpg", "releaseyear": 1992,
'title':["Raptiye Rap Rap","Islak Islak","Sen Duymadın","Bu Biçim", "Sen de Başını Alıp Gitme","Niyazi Köfteler","Karabağ","Herkes Gibisin","Nöbetçinin Türküsü ","Ömrüm","Suskunluk"]}
]
def index():
return dict(msg={})
def about():
return dict(msg={})
def music():
rows = db(db.Songsdata.id >= 0).select()
return dict(rows=rows)
def song():
if request.args(0):
rows=db(db.Songsdata.id ==request.args(0)).select().first()
if rows:
songs=db(db.song.Songsdataid==rows.id).select(db.song.songname, db.song.id)
comments=db(db.comments.usersid==db.users.id).select(db.users.username,db.comments.datecomment,db.comments.comments)
return dict(song=rows,songs=songs,comments=comments)
else:
return dict()
def addcomment():
if request.args(0) and session.user:
user=db(db.users.username==session.user.username).select().first()
db.comments.insert(songid=request.args(0),usersid=user.id,comments=request.vars.text,datecomment=datetime.datetime.utcnow())
address='/MusicProject/default/song/'+request.args(0)
return redirect(address)
return dict(msg={})
def addalbum():
return dict(grid=SQLFORM.grid(db.Songsdata,user_signature=False))
def addsong():
return dict(grid=SQLFORM.grid(db.song,user_signature=False))
def addsinger():
return dict(grid=SQLFORM.grid(db.singers,user_signature=False))
def top():
return dict(msg={})
def report():
return dict(msg={})
def bio():
return dict(msg={})
def photos():
rows = db(db.Songsdata).select()
return dict(rows=rows)
def events():
return dict(msg={})
def NewReleases():
rows = db(db.Songsdata.releaseyear >= 2015 ).select()
return dict(news=rows)
def play():
if request.args(0) and session.user:
rows = db(db.song.id==request.args(0)).select()
return dict(rows = rows)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/manage_users (requires membership in
http://..../[app]/default/user/bulk_register
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def uploads():
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
| [
"[email protected]"
] | |
687dc68675faaef8c8287596a6dfd322077860af | 5907197df97016ef4a0910942f6732b1e9919140 | /build/mybot_description_m/catkin_generated/pkg.develspace.context.pc.py | 709ac4644d4c42ea89b8d9951793693d40b1cbe4 | [] | no_license | maxwelldc/catkin_ws | 81b848dc9a4ca38da7b4f2fa1a796489027350e7 | 1af78be65ad649ff03df4daeb6a67fae95aa9c26 | refs/heads/master | 2022-11-29T10:56:05.296889 | 2020-08-06T12:38:56 | 2020-08-06T12:38:56 | 285,525,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "mybot_description"
PROJECT_SPACE_DIR = "/home/winkle/sim/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
193bec28711eb5e27fbb2774fb57798dc55dc51c | 57e5c72166c694b4022fe7d804c27fd7a7a47210 | /v1/python/download-latest-event-logs.py | a5b51d60dd689b4e49b1311a653981a03a66c187 | [
"Apache-2.0"
] | permissive | tauchw/api-examples | 771add7e481705e5840b7221eb5a24a1301e37a6 | fd2045d60c3eeca491b5033906fb62cf999531d9 | refs/heads/master | 2023-06-01T13:35:52.693404 | 2021-05-18T19:04:37 | 2021-05-18T19:04:37 | 250,577,441 | 0 | 0 | Apache-2.0 | 2020-03-27T15:55:10 | 2020-03-27T15:55:09 | null | UTF-8 | Python | false | false | 1,026 | py | #!/usr/bin/python
#
# Copyright 2019 Edgewise Networks
# SPDX-License-Identifier: Apache-2.0
#
import os
import glob
import requests
import yaml
from edgeutils import ApiSession
logs_dir = './logs'
with open('config.yaml') as f:
config = yaml.safe_load(f)
api = ApiSession(config)
# Get event logs list, sort, and limit to newest 5
event_logs = api.get('audit-event-exports/recent')
event_logs = sorted(event_logs, key=lambda x: x['filename'])[-5:]
# Get local logs and drop these entries from event logs list
local_logs = [x.strip('./') for x in glob.glob('{}/audit_events.*.json'.format(logs_dir))]
event_logs = [x for x in event_logs if x['filename'] not in local_logs]
# Download new log files to logs_dir
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
for event_log in event_logs:
response = requests.get(event_log['url'])
with open('{}/{}'.format(logs_dir, event_log['filename']), 'wb') as f:
f.write(response.content)
print("Wrote log '{}'".format(event_log['filename']))
| [
"[email protected]"
] | |
cd4054f332a12c5a22567202de541651c1c6fed6 | 093b0d6668230bd7242ac8362e0d797c3d1728ef | /ui_qmlstyles.py | 9f5c47ae2bf7e34b3986bd7b426798f1637d5054 | [] | no_license | lynx-r1/multiqml | 8b289fb917b788b0b89c8a83e9d28762e131a80c | cad89dfaea2c9372520739c07f6b1815e2c3427e | refs/heads/master | 2021-01-01T20:49:04.655067 | 2010-11-16T03:10:03 | 2010-11-16T03:10:03 | 1,084,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,476 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qmlstyles.ui'
#
# Created: Wed Jan 7 03:32:46 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_QmlStylesForm(object):
def setupUi(self, QmlStylesForm):
QmlStylesForm.setObjectName("QmlStylesForm")
QmlStylesForm.resize(896, 514)
self.horizontalLayout_2 = QtGui.QHBoxLayout(QmlStylesForm)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.pbnLoadStyle = QtGui.QPushButton(QmlStylesForm)
self.pbnLoadStyle.setObjectName("pbnLoadStyle")
self.verticalLayout.addWidget(self.pbnLoadStyle)
self.pbnDefaultStyle = QtGui.QPushButton(QmlStylesForm)
self.pbnDefaultStyle.setObjectName("pbnDefaultStyle")
self.verticalLayout.addWidget(self.pbnDefaultStyle)
self.pbnLoadColormapFromBand = QtGui.QPushButton(QmlStylesForm)
self.pbnLoadColormapFromBand.setObjectName("pbnLoadColormapFromBand")
self.verticalLayout.addWidget(self.pbnLoadColormapFromBand)
self.pbnClose = QtGui.QPushButton(QmlStylesForm)
self.pbnClose.setObjectName("pbnClose")
self.verticalLayout.addWidget(self.pbnClose)
self.label = QtGui.QLabel(QmlStylesForm)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.chbxSelectAllLayers = QtGui.QCheckBox(QmlStylesForm)
self.chbxSelectAllLayers.setChecked(True)
self.chbxSelectAllLayers.setObjectName("chbxSelectAllLayers")
self.horizontalLayout.addWidget(self.chbxSelectAllLayers)
self.chbxViewLayers = QtGui.QCheckBox(QmlStylesForm)
self.chbxViewLayers.setObjectName("chbxViewLayers")
self.horizontalLayout.addWidget(self.chbxViewLayers)
self.verticalLayout.addLayout(self.horizontalLayout)
self.lvRasterLayers = QtGui.QListView(QmlStylesForm)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lvRasterLayers.sizePolicy().hasHeightForWidth())
self.lvRasterLayers.setSizePolicy(sizePolicy)
self.lvRasterLayers.setMaximumSize(QtCore.QSize(180, 16777215))
self.lvRasterLayers.setObjectName("lvRasterLayers")
self.verticalLayout.addWidget(self.lvRasterLayers)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.chbxTransparentLayer = QtGui.QCheckBox(QmlStylesForm)
self.chbxTransparentLayer.setObjectName("chbxTransparentLayer")
self.verticalLayout_2.addWidget(self.chbxTransparentLayer)
self.label_2 = QtGui.QLabel(QmlStylesForm)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.lvTransparency = QtGui.QListView(QmlStylesForm)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(100)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lvTransparency.sizePolicy().hasHeightForWidth())
self.lvTransparency.setSizePolicy(sizePolicy)
self.lvTransparency.setMaximumSize(QtCore.QSize(100, 16777215))
self.lvTransparency.setObjectName("lvTransparency")
self.verticalLayout_2.addWidget(self.lvTransparency)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setHorizontalSpacing(1)
self.gridLayout.setObjectName("gridLayout")
self.vslValueTransparency = QtGui.QSlider(QmlStylesForm)
self.vslValueTransparency.setMinimum(0)
self.vslValueTransparency.setMaximum(255)
self.vslValueTransparency.setProperty("value", QtCore.QVariant(255))
self.vslValueTransparency.setOrientation(QtCore.Qt.Vertical)
self.vslValueTransparency.setInvertedAppearance(False)
self.vslValueTransparency.setInvertedControls(False)
self.vslValueTransparency.setTickPosition(QtGui.QSlider.TicksBelow)
self.vslValueTransparency.setTickInterval(5)
self.vslValueTransparency.setObjectName("vslValueTransparency")
self.gridLayout.addWidget(self.vslValueTransparency, 1, 0, 5, 1)
self.label_5 = QtGui.QLabel(QmlStylesForm)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(30, 205, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 2, 1, 1, 1)
self.lbTransparencyValuePercant = QtGui.QLabel(QmlStylesForm)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbTransparencyValuePercant.sizePolicy().hasHeightForWidth())
self.lbTransparencyValuePercant.setSizePolicy(sizePolicy)
self.lbTransparencyValuePercant.setMinimumSize(QtCore.QSize(37, 0))
self.lbTransparencyValuePercant.setMaximumSize(QtCore.QSize(37, 16777215))
self.lbTransparencyValuePercant.setFrameShape(QtGui.QFrame.NoFrame)
self.lbTransparencyValuePercant.setObjectName("lbTransparencyValuePercant")
self.gridLayout.addWidget(self.lbTransparencyValuePercant, 3, 1, 1, 1)
spacerItem1 = QtGui.QSpacerItem(30, 205, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem1, 4, 1, 1, 1)
self.label_3 = QtGui.QLabel(QmlStylesForm)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 5, 1, 1, 1)
self.sbxValueTransparency = QtGui.QSpinBox(QmlStylesForm)
self.sbxValueTransparency.setMaximum(255)
self.sbxValueTransparency.setProperty("value", QtCore.QVariant(255))
self.sbxValueTransparency.setObjectName("sbxValueTransparency")
self.gridLayout.addWidget(self.sbxValueTransparency, 0, 0, 1, 2)
self.horizontalLayout_2.addLayout(self.gridLayout)
self.canvasFrame = QtGui.QFrame(QmlStylesForm)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.canvasFrame.sizePolicy().hasHeightForWidth())
self.canvasFrame.setSizePolicy(sizePolicy)
self.canvasFrame.setFrameShape(QtGui.QFrame.StyledPanel)
self.canvasFrame.setFrameShadow(QtGui.QFrame.Raised)
self.canvasFrame.setObjectName("canvasFrame")
self.horizontalLayout_2.addWidget(self.canvasFrame)
self.label.setBuddy(self.lvRasterLayers)
self.label_2.setBuddy(self.lvTransparency)
self.label_5.setBuddy(self.vslValueTransparency)
self.lbTransparencyValuePercant.setBuddy(self.vslValueTransparency)
self.label_3.setBuddy(self.vslValueTransparency)
self.retranslateUi(QmlStylesForm)
QtCore.QObject.connect(self.vslValueTransparency, QtCore.SIGNAL("valueChanged(int)"), self.sbxValueTransparency.setValue)
QtCore.QObject.connect(self.sbxValueTransparency, QtCore.SIGNAL("valueChanged(int)"), self.vslValueTransparency.setValue)
QtCore.QMetaObject.connectSlotsByName(QmlStylesForm)
QmlStylesForm.setTabOrder(self.pbnLoadStyle, self.lvRasterLayers)
QmlStylesForm.setTabOrder(self.lvRasterLayers, self.pbnDefaultStyle)
QmlStylesForm.setTabOrder(self.pbnDefaultStyle, self.pbnLoadColormapFromBand)
QmlStylesForm.setTabOrder(self.pbnLoadColormapFromBand, self.pbnClose)
QmlStylesForm.setTabOrder(self.pbnClose, self.lvTransparency)
QmlStylesForm.setTabOrder(self.lvTransparency, self.vslValueTransparency)
def retranslateUi(self, QmlStylesForm):
QmlStylesForm.setWindowTitle(QtGui.QApplication.translate("QmlStylesForm", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.pbnLoadStyle.setText(QtGui.QApplication.translate("QmlStylesForm", "Load Style ...", None, QtGui.QApplication.UnicodeUTF8))
self.pbnDefaultStyle.setText(QtGui.QApplication.translate("QmlStylesForm", "Restory Default Style", None, QtGui.QApplication.UnicodeUTF8))
self.pbnLoadColormapFromBand.setText(QtGui.QApplication.translate("QmlStylesForm", "Load Colormap From Band", None, QtGui.QApplication.UnicodeUTF8))
self.pbnClose.setText(QtGui.QApplication.translate("QmlStylesForm", "Close", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("QmlStylesForm", "Raster Layers:", None, QtGui.QApplication.UnicodeUTF8))
self.chbxSelectAllLayers.setText(QtGui.QApplication.translate("QmlStylesForm", "Select all", None, QtGui.QApplication.UnicodeUTF8))
self.chbxViewLayers.setText(QtGui.QApplication.translate("QmlStylesForm", "View", None, QtGui.QApplication.UnicodeUTF8))
self.chbxTransparentLayer.setText(QtGui.QApplication.translate("QmlStylesForm", "Transnparent", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("QmlStylesForm", "Transparent\n"
"pixels:", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("QmlStylesForm", "Full", None, QtGui.QApplication.UnicodeUTF8))
self.lbTransparencyValuePercant.setText(QtGui.QApplication.translate("QmlStylesForm", "100%", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("QmlStylesForm", "None", None, QtGui.QApplication.UnicodeUTF8))
| [
"[email protected]"
] | |
1b53a2e22af9c79f7cb5fdc38488abbc5cb31b78 | bfbdea4f266287542b543414311e65cc8a878130 | /TEXT_CLASSIFICATION/Transformer/common/json_dump.py | e65581116655328f16e939ce5b00785215abf13b | [] | no_license | wushishang/complex-order | aad040c62314aafe82f918773a6a4631e403fb2a | 95c0426724681455d84d73398e4aa2b188d93991 | refs/heads/master | 2023-08-19T12:42:55.497278 | 2021-10-30T05:21:44 | 2021-10-30T05:21:44 | 400,369,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,475 | py | import ast
import enum
import logging
import re
import time
import numpy as np
import pandas as pd
import torch
from common.helper import tdiv, print_stats
class JsonDump:
def __init__(self, filename):
print_stats("creating_json_dump", to=filename)
self.logger = self.get_logger(filename)
self.start_time = time.time()
def get_logger(self, path):
logger = logging.getLogger(path)
logger.propagate = False
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(path, mode='a')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
@staticmethod
def _format(t):
k, v = t
if torch.is_tensor(v):
v = float(v)
if type(v) == tuple:
v = tdiv(v)
if isinstance(v, enum.Enum):
v = str(v.name)
if isinstance(v, np.generic):
v = v.item()
if isinstance(k, enum.Enum):
k = str(k.name)
return k, v
def add(self, **info):
self.logger.info(dict(map(JsonDump._format, info.items())))
def no_format_add(self, **info):
self.logger.info(info.items())
def str_add(self, my_str):
self.logger.info(my_str)
@staticmethod
def read(file, ret_df=True):
with open(file, "r") as fp:
lines = fp.readlines()
parsed_lines = list(map(ast.literal_eval, lines))
df = pd.DataFrame(parsed_lines)
if ret_df:
return df
else:
return df.columns.values, df.values
@staticmethod
def read_column(file, column, tl):
with open(file, "r") as fp:
lines = fp.readlines()
parsed_lines = list(map(lambda x: tl(re.compile(f"'{column}': (.*?),").findall(x)[0]), lines))
return parsed_lines
@staticmethod
def read_norms(file, itr):
with open(file, "r") as fp:
df = []
lines = fp.readlines()
for line in lines:
norms = np.array(list(map(float, re.compile("gnorm.*\[(.*)\].*]").findall(line)[0].split(", "))))
epoch = float(re.compile("\('epoch', (.*?)\)").findall(line)[0])
df.append(np.column_stack([np.full_like(norms, itr), np.full_like(norms, epoch), norms])[:10])
return np.concatenate(df, 0)
| [
"[email protected]"
] | |
34bb715248b0e43afb20ee6346d489b37bbbfaaa | 5844d42f07303a6e2f164cee7f30514d0b48d9d6 | /APIRestDjango/asgi.py | b9f0bcfd8eb96df0051d08a208f200ab8518fbc9 | [] | no_license | LFrakie/DjangoApiRestDeploy | c74fed856d252484cb2cf029c84db9414fafe319 | bf2da283430a43044c0206afa9a941784b1443a0 | refs/heads/master | 2023-08-29T18:40:52.664396 | 2021-11-01T04:03:17 | 2021-11-01T04:03:17 | 423,314,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
ASGI config for APIRestDjango project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'APIRestDjango.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
985c7aa28847cd4854fce9905bc2bdd32a93a5a0 | 9beef29d5673a5ff723cb82288848ddcc7c87f1b | /project2_to_student/svm.py | ffba2aff0332ee0a13110656f8faef161b5409e1 | [] | no_license | JwDong2019/Pattern_recognition | 88ee9f43c42bd1f0c3cc1a1dbdea2a241c3635a7 | d0723f9d7c074788f5d1f0fe9a13d1b35119f8eb | refs/heads/master | 2020-09-09T05:35:34.933327 | 2019-11-13T03:36:42 | 2019-11-13T03:36:42 | 221,363,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | # -*- coding: UTF-8 -*-
import sys
path=r'C:\Users\zhtjw\Desktop\libsvm-3.24\python'
sys.path.append(path)
from svmutil import *
from scipy.io import loadmat
import numpy as np
import math
import matplotlib.pyplot as plt
import seaborn as sns
train_label = loadmat(r'train_label.mat')
train_label = list(train_label['train_label'])
train_data = loadmat(r'train_data.mat')
train_data = list(train_data['train_data'])
test_data = loadmat(r'test_data.mat')
test_data = list(test_data['test_data'])
test_label = loadmat(r'test_label.mat')
test_label = list(test_label['test_label'])
acc = np.zeros(shape=(6,6))
for i in range(6):
for j in range(6):
g_d = math.pow(2,(2*i-5))
# g_d = i/10+0.01
# c_d = j/10+0.01
c_d = math.pow(2,(2*j-5))
options = '-t 3 -d 3 -c %f -g %f'%(c_d ,g_d)
model = svm_train(train_label,train_data,options)
print("result:")
p_label, p_acc, p_val = svm_predict(test_label, test_data, model)
corr = 0
print(len(p_label))
for k in range(len(p_label)):
#print(p_label[k])
# f = open(r'SVM.txt','a')
# f.write(str(k+1)+' '+str(int(p_label[k]))+'\n')
# f.close()
if p_label[k] == test_label[k]:
corr = corr+1
acc[i,j] = corr/len(p_label)
f, ax = plt.subplots(figsize=(6, 6))
ax = sns.heatmap(acc, annot=True,cmap="YlGnBu",linewidths=.5,annot_kws={'size':10,'weight':'bold','color':'red'})
ax.set_title('Sigmod')
ax.set_xlabel('lb g')
ax.set_xticklabels([]) #设置x轴图例为空值
ax.set_ylabel('lb b')
ax.set_yticklabels([])
plt.show()
| [
"[email protected]"
] | |
1396be293ec9c20991c348c63754479f5a95ee3b | ef2e354ae06e9994b7bc65f9685f8769ec56dc28 | /signin/views.py | 61b5f11c58b4c816f2b49989c0e0c093c439ae29 | [] | no_license | akhilpatil123/FareShare | 45e634b07749f507a40eeb08be710b2090844ab9 | a0d89ba324ef5cf74fe5c54cf641f0d3625bd373 | refs/heads/master | 2020-04-30T00:21:56.041455 | 2019-03-19T11:55:56 | 2019-03-19T11:55:56 | 176,501,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.core.mail import send_mail
from django.shortcuts import render,redirect
from .models import *
from .forms import *
from django.http import *
from django.contrib.auth.models import User
from django.contrib import auth,messages
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import logout,login
# Create your views here.
def registration(request):
if request.method=='POST':
form1=userform(request.POST)
if form1.is_valid():
username=form1.cleaned_data['username']
first_name=form1.cleaned_data['first_name']
last_name=form1.cleaned_data['last_name']
email=form1.cleaned_data['email']
password=form1.cleaned_data['password']
User.objects.create_user(username=username,first_name=first_name,last_name=last_name,email=email,password=password)
return HttpResponseRedirect('/registration/')
else:
form1=userform()
return render(request,'registration.html',{'frm':form1})
def login(request):
'''if request.user.is_autheticated():
return redirect('offerride/index.html')'''
if request.method=="POST":
username =request.POST['user']
password=request.POST['pas']
try:
user=auth.authenticate(username=username,password=password)
if user is not None:
auth.login(request,user)
return render(request,'welcome.html')
else:
messages.error(request,'Username and password did not match')
except auth.ObjectNotExist:
print("invalid user")
return render(request,'login.html')
def logout(request):
#logout(request)
#return redirect('offerride/index.html')
auth.logout(request)
return render(request, 'offerride/index.html')
def admin_page(request):
if not request.user.is_autheticated():
return redirect('signin/login.html')
return render(request, 'offerride/index.html')
def profile(request):
prof = User.objects.all()
context={
'profile': prof
}
return render(request,'profile.html',context)
def email(request):
subject = 'ride confirmation'
k = 'hello'
email_from = settings.EMAIL_HOST_USER
rel= '[email protected]'
send_mail(subject, k, email_from, [rel],fail_silently=False)
return render(request,'send.html') | [
"[email protected]"
] | |
f112488eca72f60da8d513ba0c7dc7eb948f1314 | 0c1b3d8e1874cfb954143bd118e81df3d2a23b4f | /Course/Course07/Course07/Course07.py | 16d155770af4183876838fc754472d82ffbd0a07 | [] | no_license | GeorgiMinkov/Python-stuffs | 7e25c64463c0e0eb6cdcd135ae1d513f2b029d1f | 3cc32a2571b1e9081b38aff6cdf637fc12ea7e9b | refs/heads/master | 2021-01-18T16:19:21.727596 | 2017-10-11T09:17:18 | 2017-10-11T09:17:18 | 86,736,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py | # use psycho indexing from 1
# encoding: utf-8
class Vector(object):
def __init__(self, *values):
self._values = list(values) # ?? ?? ???? ?? ? mutable
def __getitem__(self, key):
return self._values[key - 1]
def __setitem__(self, key, value):
self._values[key - 1] = value
class Matrix(object):
def __init__(self, rows, cols):
# [0] * 3 = [0, 0, 0]
# ????? ???, ?? ??? ??????? ?? ???????? ? ???????????? ????????? ?????? ??????????? self._values = [[0] * cols] * rows
self._values = [[0] * cols for i in xrange(0, rows)]
def __getitem__(self, (row, column)):
return self._values[row - 1][column - 1]
def __setitem__(self, (row, column), value):
self._values[row - 1][column - 1] = value
def f(*args):
print args
class Rapper(object):
def __init__(self, func):
self.func = func
def __call__(self, *args):
return map(self.func, args)
def sqare(x):
return x ** 2
#######################################################################
def wrapWith(f):
def wrapper(g):
def wrapped(*args, **kwargs):
return f(g(*args, **kwargs))
return wrapped
return wrapper
def kasa(f):
print "KASA"
@wrapWith(kasa)
def bira():
print "bira"
return 42
class WrapWith(object):
def __init__(self, f):
self.f = f
def __call__(self, g):
def wrapped(*args, **kwargs):
return self.f(g(*args, **kwargs))
return wrapped
########################inheritance
class D(object):
def foo(self):
print "D's foo"
class C(D):
def foo(self):
print "C's foo"
return super(C, self).foo()
class B(D):
def foo(self):
print "B's foo"
return super(B, self).foo()
class A(B, C):
def foo(self):
print "A's foo"
return super(A, self).foo()
# MRO - Method Resolution Order
if __name__ == "__main__":
#v = Vector(1, 5, 6)
#v[1] = 9
#m = Matrix(3, 3)
#m[1, 2] = 10
#print m[1, 2]
#print m[2, 2]
#print m[3, 2]
#xs = [1, 2, 3]
#f(xs)
#f(*xs)
#m = Rapper(sqare)
#print m(1, 2, 3)
#print bira()
test = A()
test.foo()
print A.__mro__ | [
"[email protected]"
] | |
21b04577adcb437a6b460fb9fd8c9f744d97d813 | 035ed13073a3d018a0cc9b3ba56410e1477a786d | /Edi_Library/connectMongoDB.py | e46638c99c3f6dd3677deca8dc456b8262e53e66 | [] | no_license | yousefsul/EDI_Library | bffe7198774a0555bbc3e94e826df693ef38fd7c | ea197318f16c896eb223116f6048f32c99e97ebc | refs/heads/master | 2023-09-01T11:21:13.029151 | 2021-09-18T13:41:48 | 2021-09-18T13:41:48 | 405,415,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | from pymongo import MongoClient
MONGO_CLIENT = "mongodb://yousef:[email protected]:63327/?authSource=admin&readPreference=primary&appname" \
"=MongoDB%20Compass&ssl=false"
class ConnectMongoDB:
"""
connect to devDB and client database
define the clients_collection,visits_collection,claims_collection as none
"""
def __init__(self):
try:
self.__mongo_client = MongoClient(MONGO_CLIENT)
self.__db = self.__mongo_client.client_2731928905_DB
self.__main_collection = None
self.__index_collection = None
except ConnectionError:
print(ConnectionError, "connection error have been occured")
def connect_to_collection(self , collection_name):
self.__main_collection = self.__db[collection_name]
def insert_to_main_collection(self, result):
try:
self.__main_collection.insert(result)
except Exception as e:
print("An Exception occurred ", e)
def connect_to_index_collection(self, collection_name):
self.__index_collection = self.__db[collection_name]
def insert_to_index_collection(self, result):
try:
self.__index_collection.insert(result)
except Exception as e:
print("An Exception occurred ", e)
| [
"[email protected]"
] | |
f9c18e26046f157c32283ef7744f6fbaa5153e8c | 18845896c764a564de1d719375e10cf31369c831 | /pyworld/toolkit/tools/visutils/format.py | a65cf5d74d97a8bb4d8039b520c12efcd545657e | [] | no_license | BenedictWilkins/pyworld-rl | b4fbc6a1f28a6f8f7d142634c1581f1aaa78065d | ef94d7a79a835212899d030d012024af5e1897bc | refs/heads/master | 2023-01-07T19:12:05.230919 | 2020-09-18T15:44:51 | 2020-09-18T15:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 24 13:10:10 2020
@author: ben
"""
| [
"[email protected]"
] | |
fce3ef57204d418506111fe770ceea4d801292ef | 28d7e7be8d17b24325349ac8b1a0f746d0e7196a | /myutman/__init__.py | 7a3a64db3efbb4f72adb5f9855f147ddfca67775 | [] | no_license | myutman/distributed-changepoint-detection | 962196ec475c2288e453078b70e494857b4dac9b | 9cc2a5f49d7925de93fa13a70e241fc0e3819e9a | refs/heads/master | 2020-08-11T21:37:32.568537 | 2020-06-17T17:33:48 | 2020-06-17T17:33:48 | 214,631,998 | 0 | 0 | null | 2019-10-19T09:43:36 | 2019-10-12T11:02:07 | null | UTF-8 | Python | false | false | 532 | py | from myutman.fuse.fuse import FuseForWindowAlgo
from myutman.generation.generation import ClientTerminalsReorderSampleGeneration, ChangeWithClientSampleGeneration, \
ChangeWithTerminalSampleGeneration, ChangeSampleGeneration
from myutman.node_distribution.node_distribution import RoundrobinNodeDistribution, DependentNodeDistribution, \
SecondMetaDependentNodeDistribution
from myutman.stand.stand_utils import compare_mdrs, compare_fdrs, compare_latencies
from myutman.streaming_algo.window_algo import WindowStreamingAlgo | [
"[email protected]"
] | |
aa38f0144cc56bbe326c370dfdbcc8705a5e248f | d9031bfff990457e53470d8e578314980024cfe8 | /fluent_python_eg/8、对象引用、可变性和垃圾 回收/为任意对象做深复制和浅复制.py | 344150302a7c06123dc34270c7e6421cabb74729 | [] | no_license | L316645200/_Python | f2d69772b2769b73061d3c5c8e957214a4ad9dfd | ca08b86da4e97df8e0f6e704c287f4fdb22f8675 | refs/heads/master | 2023-06-25T07:34:07.629789 | 2021-06-30T03:28:02 | 2021-06-30T03:28:02 | 249,311,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | # 示例 8-8 校车乘客在途中上车和下车
class Bus:
def __init__(self, passengers=None):
if passengers is None:
self.passengers = []
else:
self.passengers = list(passengers)
def pick(self, name):
self.passengers.append(name)
def drop(self, name):
self.passengers.remove(name)
# 示例 8-9 使用 copy 和 deepcopy 产生的影响
import copy
bus1 = Bus(['Alice', 'Bill', 'Claire', 'David'])
bus2 = copy.copy(bus1)
bus3 = copy.deepcopy(bus1)
print(id(bus1), id(bus2), id(bus3))
bus1.drop('Bill')
print(bus2.passengers)
print(id(bus1.passengers), id(bus2.passengers), id(bus3.passengers))
print(bus3.passengers)
print()
# 示例 8-10 循环引用:b 引用 a,然后追加到 a 中;deepcopy 会 想办法复制 a
a = [10, 20]
b = [a, 30]
a.append(b)
print(a)
c = copy.deepcopy(a)
print(c) | [
"[email protected]"
] | |
ee6512a76805369538943a622b67b5220a30b0f6 | 861d25fe3985340dae621d179f7af368e0dd7f39 | /stockweb/apache/django.wsgi | a14db05fff3bc47c9c5dcf5ca20ffd3b88a1b173 | [] | no_license | zhuwei302/python | 1ebcfac3e4f208ba81af93d13b8857bea9497862 | dfad635149b8f307ae0eb5b2b8c348843de663bb | refs/heads/master | 2021-01-19T18:44:32.843216 | 2018-09-18T15:17:21 | 2018-09-18T15:17:21 | 101,160,521 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 238 | wsgi | #coding=utf-8
import os
import sys
import django.core.handlers.wsgi
os.environ['DJANGO_SETTINGS_MODULE'] = 'stockweb.settings'
app_apth = "E:/py/stockweb"
sys.path.append(app_apth)
application = django.core.handlers.wsgi.WSGIHandler() | [
"[email protected]"
] | |
9f879ed1cd06cd14624c649b924d8b38d6c67866 | b98c1b5aba8432f1ad80107b9780f9a9c58334f1 | /appionlib/apImage/imagestat.py | 2e0839428346c2c8ed61f295c68d5b42a993f734 | [
"Apache-2.0"
] | permissive | vosslab/ctfeval | b58b27be3033cf0cc15e5888be73bee627bc9951 | 6cfc648f91c318c3a46a959e4771c3d16d8e741a | refs/heads/master | 2021-05-27T22:36:45.694258 | 2014-05-09T15:38:23 | 2014-05-09T15:38:23 | 19,549,073 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,235 | py | #Part of the new pyappion
## pythonlib
import os
import math
## numpy
import pyami.quietscipy
from numpy import ma
## appion
from appionlib import apDisplay
from appionlib.apImage import imagenorm
####
# This is a low-level file with NO database connections
# Please keep it this way
####
#=========================
def meanEdgeValue(imgdata, w=0):
"""
get the average values for the edges of width = w pixels
"""
xmax = imgdata.shape[0]
ymax = imgdata.shape[1]
leftEdgeAvg = (imgdata[0:xmax, 0:w]).mean()
rightEdgeAvg = (imgdata[0:xmax, ymax-w:ymax]).mean()
topEdgeAvg = (imgdata[0:w, 0:ymax]).mean()
bottomEdgeAvg = (imgdata[xmax-w:xmax, 0:ymax]).mean()
edgeAvg = (leftEdgeAvg + rightEdgeAvg + topEdgeAvg + bottomEdgeAvg)/4.0
return edgeAvg
#=========================
def centralMean(imgarray, trim=0.1):
"""
get the average values for the edges of trim = x percent
"""
a = imagenorm.cutEdges(imgarray,trim=trim)
return a.mean()
#=========================
def maskImageStats(mimage):
n=ma.count(mimage)
mimagesq=mimage*mimage
sum1=ma.sum(mimage)
sum2=ma.sum(sum1)
sumsq1=ma.sum(mimagesq)
sumsq2=ma.sum(sumsq1)
avg=sum2/n
if (n > 1):
stdev=math.sqrt((sumsq2-sum2*sum2/n)/(n-1))
else:
stdev=2e20
return n,avg,stdev
#=========================
def getImageInfo(im):
"""
prints out image information good for debugging
"""
avg1 = im.mean()
stdev1 = im.std()
min1 = im.min()
max1 = im.max()
return avg1,stdev1,min1,max1
#=========================
def printImageInfo(im):
"""
prints out image information good for debugging
"""
#print " ... size: ",im.shape
#print " ... sum: ",im.sum()
avg1,stdev1,min1,max1 = getImageInfo(im)
if len(im.shape) == 2:
print "Image: %d x %d - type %s"%(im.shape[0], im.shape[1], im.dtype)
elif len(im.shape) == 1:
print "Image: %d - type %s"%(im.shape[0], im.dtype)
print " ... avg: %.2e +- %.2e"%(avg1, stdev1)
print " ... range: %.2e <> %.2e"%(min1, max1)
return avg1,stdev1,min1,max1
#=========================
def correlationCoefficient(x,y,mask=None):
"""
calcualate the correlation coefficient of two numpys
"""
if x.shape != y.shape:
apDisplay.printError("images are not the same shape in correlation calc")
if mask != None:
if x.shape != mask.shape:
apDisplay.printError("mask is not the same shape as images in correlation calc")
tot = mask.sum()
if tot == 0:
return 0.0
x = imagenorm.normStdevMask(x,mask)
y = imagenorm.normStdevMask(y,mask)
else:
tot = float(x.shape[0]*x.shape[1])
x = imagenorm.normStdev(x)
y = imagenorm.normStdev(y)
z = x*y
if mask != None:
z = z*mask
sm = z.sum()
return sm/tot
#=========================
def rmsd(x,y,mask=None):
return math.sqrt(msd(x,y,mask=mask))
#=========================
def msd(x,y,mask=None):
if mask != None:
tot = float(mask.sum())
if tot == 0:
return 1.0e13
x = imagenorm.normStdevMask(x,mask)
y = imagenorm.normStdevMask(y,mask)
else:
tot = float(x.shape[0]*x.shape[1])
x = imagenorm.normStdev(x)
y = imagenorm.normStdev(y)
z = (x-y)**2
if mask != None:
z = z*mask
sm = z.sum()
return sm/tot
####
# This is a low-level file with NO database connections
# Please keep it this way
####
| [
"[email protected]"
] | |
d846551054439c70fbf5eb93c1b508984967f6e5 | 501759cfea4e650f8f05235ce795b7a33bef2a10 | /monkey_checker/monkey_detect/detect/detect.py | ab08f627c2bfe15e633501aa813529df5973006a | [] | no_license | KsTmhr/Monkey_checker_page | 735462178d85311adb1024e3653f59e2ec367e91 | 4b608e29d5cb5e48f5eda8459e0409cc829fa3a9 | refs/heads/main | 2023-02-04T06:47:33.455647 | 2020-12-19T02:40:48 | 2020-12-19T02:40:48 | 312,583,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,761 | py | import argparse
import os
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from PIL import Image, ImageFilter #テストに使用
from monkey_detect.detect.models.experimental import attempt_load # manage.pyからの相対パスで指定しないと読まない
from monkey_detect.detect.utils.datasets import LoadStreams, LoadImages # 単体でテストするときは"monkey_detect.detect.detect."を丸ごと削る
from monkey_detect.detect.utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
from monkey_detect.detect.utils.torch_utils import select_device, load_classifier, time_synchronized
from django.conf import settings
base_dir = str(settings.BASE_DIR) # base_dir + 〜 の形でPathを指定する
def detect(img_size=640):
weights = base_dir + '/monkey_detect/detect/last.pt'
out = base_dir + '/static/js/result' # 画像保存先
"""
im = Image.open(source)
new_im = im.convert('L')
new_im.save(out+"/new_img.jpg")
"""
source = base_dir + "/monkey_detect/detect/monkey.JPG"
imgsz = img_size
view_img = True
print(weights)
# Initialize
set_logging()
device = select_device('')
if os.path.exists(out): # output dir
shutil.rmtree(out) # delete dir
os.makedirs(out) # make new dir
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
modelc.to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=True)[0]
# Apply NMS
pred = non_max_suppression(pred, 0.25, 0.45, classes=0, agnostic=True)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
print(det)
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, conf, *xywh)# label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line) + '\n') % line)
label = '%s %.2f' % (names[int(cls)], conf)
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Stream results
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
print('Results saved to %s' % Path(out))
print('Done. (%.3fs)' % (time.time() - t0))
| [
"[email protected]"
] | |
d6b35765e95b1515aee103f1064fde9b73e86908 | fe02ca79bc36fea997f7fdfeda0f8d52e69414e0 | /python/和为K的子数组.py | 765951a4538c375a505583204a50494481112896 | [] | no_license | Joenhle/leetcode | 4e474a46712d56dc2639115742486cf329489e81 | dfabec60216b87c563e0cf1b6bb3a35fee296ea7 | refs/heads/master | 2023-03-15T19:09:42.691491 | 2023-01-31T12:44:32 | 2023-01-31T12:44:32 | 595,606,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | class Solution:
def subarraySum(self, nums, k):
dp, sums, temp, m = [0 for _ in range(len(nums))], [0 for _ in range(len(nums))], 0, {}
for i, num in enumerate(nums):
temp += num
if temp not in m:
m[temp] = []
m[temp].append(i)
sums[i] = temp
dp[0] = 1 if nums[0] == k else 0
for i in range(1, len(nums)):
dp[i] = dp[i-1]
if sums[i] == k:
dp[i] += 1
if sums[i] - k in m:
for ind in m[sums[i] - k]:
if ind < i:
dp[i] += 1
else:
break
return dp[len(dp)-1]
s = Solution()
print(s.subarraySum([1, -1, 0, 0], 0))
| [
"[email protected]"
] | |
d240889edcff21c7b7dcead79410625a60c0b8da | eb13d18b3912159c557810b3ffe3ebce0a0687de | /w_level_5.py | 41591d4f990136c060e7b2a52850b2c2303550af | [] | no_license | labradorkuro/water_level | 36b584dc8e57335b136d418657eb3ac6d192338c | c00530d0e3b72b858f6033e3f806b2df18dad1f0 | refs/heads/master | 2020-07-05T20:22:14.958001 | 2016-11-17T02:16:57 | 2016-11-17T02:16:57 | 73,981,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,680 | py | #coding: utf-8
#水位計測システムセンサー側プログラム
import RPi.GPIO as GPIO #GPIOライブラリをインポート
import time
import os,socket
import struct
from time import sleep
import urllib,urllib2
import fcntl
import struct
import datetime
import json
import threading
import serial
import subprocess
#ピン番号の割り当て方式を「コネクタのピン番号」に設定
GPIO.setmode(GPIO.BOARD)
#使用するピン番号を代入
LED_PIN = 32
LEVEL_1_PIN = 11 #水位1
LEVEL_2_PIN = 13 #水位2
LEVEL_3_PIN = 15 #水位3
LEVEL_4_PIN = 16 #水位4
LEVEL_5_PIN = 18 #水位5
GPIO.setwarnings(False)
#11番ピンを出力に設定し、初期出力をローレベルにする
GPIO.setup(LED_PIN,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(LEVEL_1_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(LEVEL_2_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(LEVEL_3_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(LEVEL_4_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(LEVEL_5_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
INTERVAL = 10 #送信インターバル初期値 単位 sec
SLEEPTIME = 0.1 #永久ループのスリープ時間 単位 sec
TEST_INT = 1 #(テスト用)インターバルを10:1/10[60s] 20:1/20[30s] 30:1/30[20s] 60:1/60[10s]
READ_SLEEP = 1 #センサー値取得スリーブ時間 単位sec
#グローバル変数
g_macAddr = 0 #MACアドレス保存用
g_counter = 0 #単に起動してからの計測回数print用
g_sendInterval = 60 #送信インターバル(秒)
g_cmpTime = 0 #時間経過比較用時刻
level_1 = 0 # 水位
level_2 = 0
level_3 = 0
level_4 = 0
level_5 = 0
level_check = 0
url = 'http://153.126.193.185/hifmis/ajaxlib.php'
#url = ''
#
# MACアドレスの取得
# IN: インターフェース名 ex)"eht0" "wlan0"
#
def getMacAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
#
# 現在日時取得
# OUT: 2016-10-21 18:34:46
#
def getDatetime():
datet = datetime.datetime.today() #現在日付・時刻のdatetime型データの変数を取得
return datet.strftime("%Y-%m-%d %H:%M:%S")
#
# センサー値取得
#
def readSensor():
global level_1
global level_2
global level_3
global level_4
global level_5
level_1 = readSensor_sub(LEVEL_1_PIN) #水位計測
level_2 = readSensor_sub(LEVEL_2_PIN) #水位計測
level_3 = readSensor_sub(LEVEL_3_PIN) #水位計測
level_4 = readSensor_sub(LEVEL_4_PIN) #水位計測
level_5 = readSensor_sub(LEVEL_5_PIN) #水位計測
level_check = level_1 + (level_2 * 2)+ (level_3 * 4) + (level_4 * 8) + (level_5 * 16)
return level_check
#
# 入力ポートチェック
#
def readSensor_sub(pin):
level_val = 0
if( GPIO.input(pin) == GPIO.HIGH):
level_val = 1
return level_val
#
# 水位データの取得(1分間前回と異なる値を取得した場合に変化したとみなす)
# 入力:前回送信値
def checkLevel(pre_send_value,firstboot):
ret = 0
cnt = 1
while True:
time.sleep(READ_SLEEP) # 1秒待ち
# センサー値取得
val = readSensor()
print 'readSensor : %d' % val + '(%d)' %cnt
if firstboot == 1:
return val
if val == pre_send_value:
ret = pre_send_value
break
cnt += 1
if cnt >= 60:
ret = val
break
return ret
#
# 水位計測メイン処理
#
def main():
global g_macAddr
global g_sendInterval
global g_cmpTime
global level_1
global level_2
global level_3
global level_4
global level_5
g_cmpTime = time.time()
g_macAddr = getMacAddr("wlan0")
print g_macAddr
firstboot = 1
prev_level = -1
#無限ループ
while True:
# 水位計測
level_check = checkLevel(prev_level,firstboot)
#10秒毎に温度湿度を計測して送信する
if prev_level != level_check or g_cmpTime+g_sendInterval < time.time():
g_cmpTime = time.time()
print level_check
prev_level = level_check
#HTTP送信
if url != '':
params = urllib.urlencode({'func':"regRecord", 'mac_address':g_macAddr, 'level1':level_1, 'level2':level_2, 'level3':level_3, 'level4':level_4, 'level5':level_5})
try:
res = urllib2.urlopen(url, params)
print getDatetime(),
print "SEND DATA:%s" % params
g_cmpTime = time.time()
print '-----------------Alpha01-02'
res_data =res.read()
print res_data, #,で改行されない
json_res = json.loads(res_data)
print "status=%s" % json_res['status'] + " int=%s" % json_res['int']
if json_res['int'] > 0:
g_sendInterval = (json_res['int']/1000)/TEST_INT #msec ⇒ sec
if json_res['status'] == 'OK':
GPIO.output(LED_PIN,GPIO.HIGH)
print '\r'
except urllib2.URLError, e:
g_sendInterval = 10 #返り値のintervalが来ないので10秒としておく
print e
GPIO.output(LED_PIN,GPIO.LOW)
firstboot = 0
#GPIOを開放
print "GPIOを開放"
GPIO.cleanup()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a32bc4f0ca851d53e4bdbeca967195ddb1231d2d | bb64aaeab1b8fdf186dc2641e1b684ac76c8cafb | /chap01/sum2.py | 8d40479fc83db388692fbfb9f67f0ae8bf2cbc75 | [] | no_license | niwza/PythonSummerfield | 3601b9fecd98ff09035f0ff38b5bc91bb954f77f | d8baaf81151e23f5e3d99736388539dc25ecdee8 | refs/heads/master | 2021-09-01T11:00:50.897315 | 2017-12-26T16:58:24 | 2017-12-26T16:58:24 | 114,018,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | print("Type integers, each followed by Enter; or ^D or ^Z to finish")
total = 0
count = 0
while True:
try:
line = input()
if line:
number = int(line)
total += number
count += 1
except ValueError as err:
print(err)
continue
except EOFError:
break
if count:
print("count =", count, "total =", total, "mean =", total / count)
| [
"[email protected]"
] | |
874e073007da9fd2f373c4939d0f6082d4775641 | 6f7df24b2e563b43c6f78d9b7b987d8a3abb980e | /aoc/aoc2020/days/day8/part_1.py | 67667ca938bf0318d89f1535611f0754bb658a83 | [] | no_license | bendikjohansen/adventofcode | 1ca2d9a674c84faafffd2e087592eb4829c3fd25 | 8381c2e44937124779f10a0c8791761e2db36944 | refs/heads/main | 2023-02-05T17:45:34.973773 | 2020-12-28T20:10:22 | 2020-12-28T20:10:22 | 325,106,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | from aoc.utils import get_input
from typing import Tuple, List
Instriction = Tuple[str, int]
Instructions = List[Tuple[str, int]]
def parse_instructions(all_instructions: str) -> Instriction:
return [(ins.split()[0], int(ins.split()[1]))
for ins in all_instructions.splitlines()]
def find_loop(instructions: Instructions, index: int = 0,
indices: List[int] = []) -> int:
if index in indices:
return 0
updated_indices = indices + [index]
operation, arg = instructions[index]
if operation == 'acc':
return find_loop(instructions, index + 1, updated_indices) + arg
if operation == 'jmp':
return find_loop(instructions, index + arg, updated_indices)
return find_loop(instructions, index + 1, updated_indices)
def solve_part_one(all_instructions: str) -> int:
instructions = parse_instructions(all_instructions)
return find_loop(instructions, 0)
if __name__ == "__main__":
all_instructions = get_input(2020, 8)
result = solve_part_one(all_instructions)
print(result)
| [
"[email protected]"
] | |
abbefb1bcdec9318f2329543523118ea6b73f16b | a217654ec309d404e91ce93881e40e4f2bf26f8a | /FitHub/profile/admin.py | 9692bef62f8a1dec41a2521bcc5d5a03def244a8 | [
"MIT"
] | permissive | gyhyihe/FitHub | 29c31ad6566e6ac178f1a664a9500a5fc117ba6e | c2db1126ac2ebcd18025f5690d0ad1a962fd9313 | refs/heads/master | 2022-12-10T03:49:40.081165 | 2019-09-30T12:50:47 | 2019-09-30T12:50:47 | 204,245,313 | 1 | 0 | MIT | 2022-12-08T06:39:09 | 2019-08-25T04:21:24 | Python | UTF-8 | Python | false | false | 557 | py | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
# Register your models here.
from login.forms import CustomUserCreationForm, CustomUserChangeForm
from login.models import Member, Staff, Class, MemberLevel, User
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
admin.site.register(Staff)
admin.site.register(Class)
admin.site.register(MemberLevel)
admin.site.register(Member)
admin.site.register(User, CustomUserAdmin)
| [
"[email protected]"
] | |
94580e9450ca609e7a58c8fb5de7bf6cc3331829 | e18ef621eb92bb7af2e683fbc0b8c8b638b1ec46 | /ACTIVITIES/python/queuepractice.py | 0b07d07e0bc74171a6d5257f5417f84813778b3c | [] | no_license | BPCao/Assignments | 496b238ce8c2db101beef80d8538027fca26683e | 6144f7a35fdec4b945f69e282b7164cac9f7ddfe | refs/heads/master | 2020-04-22T12:32:08.729961 | 2019-05-02T13:07:19 | 2019-05-02T13:07:19 | 170,374,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | class Queue:
def __init__(self):
self.lst = []
self.new_lst = []
def enqueue(self, num):
self.lst.insert(0, num)
def dequeue(self):
try:
self.new_lst.append(self.lst[0])
del self.lst[0]
except IndexError:
print("Value not present, please pick a valid index.")
| [
"[email protected]"
] | |
27770099ae6a957db2efb71480f83b8c09157c50 | fb8b1d35ec1ec14f1360ce6e59e78db06e4a3f79 | /raincastcli | 11b718587dde2bb2658cf7bcebaf90aa90ff476c | [] | no_license | slashdevsda/raincaster | 11896ae653df78d6bb30b77556aa419c7f289e86 | b6c07798b770aed7a37def8271428963baddde63 | refs/heads/master | 2023-06-03T20:47:20.000943 | 2019-08-30T13:49:31 | 2019-08-30T13:49:31 | 205,389,479 | 0 | 0 | null | 2023-05-22T22:30:31 | 2019-08-30T13:37:58 | Python | UTF-8 | Python | false | false | 58 | #! /bin/env python
from raincast.main import main
main()
| [
"[email protected]"
] | ||
381326b6b749f1c8ba16eccb813cc408826781d2 | 66d776736d6aa1712ce0b0a1fc4c40ec95f09fff | /5/simulation/bisection/main.py | 41d6d487611b6294404b74f562f52bfb4d36bafb | [] | no_license | onokatio/nitkc | fd9920efa189155bd869c468e87624ea114211d9 | 7e2c368c99cf7abaa489cb78b6fd00ad15bb2133 | refs/heads/master | 2022-07-20T04:51:11.111922 | 2021-10-11T17:05:02 | 2021-10-11T17:05:02 | 204,952,362 | 0 | 1 | null | 2022-01-23T17:19:20 | 2019-08-28T14:32:30 | C | UTF-8 | Python | false | false | 442 | py | #!/bin/python3
#print("number:")
#n = input()
#print("a:")
#a = input()
#print("b:")
#b = input()
a = 0.0
b = 2.0
#print("count:")
#count = input()
def f(x):
return x*x-2
x1 = int(a)
x2 = int(b)
y = 10
#for i in range(int(count)):
while abs(y) > 10**-15:
x = (x1+x2)/2
y = f(x)
print(f"f({x}) = {y}")
if(y < 0): # x is too low
x1 = x
x2 = x2
if(y > 0): # x is too high
x1 = x1
x2 = x
| [
"[email protected]"
] | |
ec4e228b4463fd73d57f087772bb99fbb63625ac | 98b43a9d58732f8040df448d4a1197955559cbf0 | /genutils/CheckScafLibFilePresence.py | 6aec614973b046578975911284252962fac6759b | [] | no_license | flosopher/floscripts | c4ee793b541c55cf3b485573fb497ecb751c7141 | ca36f8091210f7f75db718e813a6e99ff99fe04d | refs/heads/master | 2021-01-18T00:21:10.137105 | 2015-08-17T10:51:39 | 2015-08-17T10:51:39 | 16,000,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | #!/usr/bin/python
#script to check whether a file with a given ending is present
#for all scaffolds in an input list
import sys
import os
ScafFileList = []
ListfileList = []
file_ending = ""
checkpoint = 0
#convenience for matching/scaffold related stuff
ScaffoldDir = "/lab/shared/scaffolds/"
def get_scaffold_dir( pdb_code ):
subcode = pdb_code[1:3]
return ScaffoldDir + subcode + "/" + pdb_code + "/"
CommandArgs = sys.argv[1:]
for arg in CommandArgs:
if arg == '-l':
arg_ind = CommandArgs.index(arg)
for i in range( len( CommandArgs ) - arg_ind - 1 ):
if( (CommandArgs[arg_ind + i + 1])[0:1] == '-' ):
break
ListfileList.append (CommandArgs[arg_ind + i +1 ] )
if arg == '-s':
arg_ind = CommandArgs.index(arg)
for i in range( len( CommandArgs ) - arg_ind -1 ):
if( (CommandArgs[arg_ind + i + 1])[0:1] == '-' ):
break
ScafFileList.append(CommandArgs[arg_ind + i + 1] )
if arg == '-file_ending':
file_ending = CommandArgs[ CommandArgs.index(arg) + 1 ]
if file_ending == "":
print "Error, need to specify desired file ending with -file_ending"
sys.exit()
#now process the file lists
for list in ListfileList:
#print "processing list %s" % list
listfile = open(list,'r')
files = listfile.readlines()
listfile.close
for file in files:
ScafFileList.append( file.replace("\n","") )
curdir = os.getcwd()
scafs_not_present = []
for scaf in ScafFileList:
desired_file = get_scaffold_dir( scaf ) + scaf + file_ending
#print "looking for %s" % desired_file
if not os.path.exists( desired_file ):
#print "doesn't exist"
scafs_not_present.append( scaf )
num_missing = len( scafs_not_present )
if( num_missing > 0 ):
outstring = ""
for scaf in scafs_not_present:
outstring = outstring + scaf + "\n"
print "The following %s scaffolds do not have the desired file: \n %s" %(num_missing,outstring)
| [
"[email protected]"
] | |
53fffc220a54e9dfadbd4347d14b2b0f558b5f0d | 781fecde191ba0b8872bc2b44d4bddd6d3309657 | /fileopening.py | 69282bd6fa68fea7e61f00dad16144d8da801145 | [] | no_license | Swrajitpaul/python | 7730a05c25bd582580139b80506b4dba9766ab3d | 4f84f97af526cff5b9b4bf65ae41dd88607b1409 | refs/heads/master | 2023-01-23T17:30:00.270432 | 2023-01-18T23:42:13 | 2023-01-18T23:42:13 | 138,118,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 25 18:56:27 2018
@author: swrajit
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 9 13:36:46 2016
@author: WELG
"""
data = []
file_name = input("Provide a name of a file of data ")
try:
fh = open(file_name, 'r')
except IOError:
print('cannot open', file_name)
else:
for new in fh:
if new != '\n':
addIt = new[:-1].split(',') #remove trailing \n
data.append(addIt)
finally:
fh.close() # close file even if fail
| [
"[email protected]"
] | |
d7a99d9cad3b72b31f9c9e1d01901a2678245a52 | 7423108625745f781169146f9ec9b6a09b588ecb | /FacialLandmarkDetection/read_text.py | d11fed67bd7aef623afb52033532bf95cbf9693e | [] | no_license | suguruma/FacialAnalysisFor3D | 21c96ff00930bbd93649e6add0ef7d3f65cc60d1 | e041d7f5cffae67575442a6d252ee04bcabd1b78 | refs/heads/master | 2021-09-14T01:01:53.691969 | 2018-05-06T22:55:34 | 2018-05-06T22:55:34 | 111,112,385 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,445 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 12:08:23 2017
@author: Terada
"""
import numpy as np
import glob
import pandas as pd
## 顔器官ID
landmark_dict = {'0':'left_eye_outer_corner_x',
'1':'left_eye_outer_corner_y',
'2':'left_eye_inner_corner_x',
'3':'left_eye_inner_corner_y',
'4':'right_eye_inner_corner_x',
'5':'right_eye_inner_corner_y',
'6': 'right_eye_outer_corner_x',
'7': 'right_eye_outer_corner_y',
'8': 'left_nose_top_x',
'9': 'left_nose_top_y',
'10': 'left_nose_bottom_x',
'11': 'left_nose_bottom_y',
'12': 'right_nose_top_x',
'13': 'right_nose_top_y',
'14': 'right_nose_bottom_x',
'15': 'right_nose_bottom_y',
'16': 'nose_root_x',
'17': 'nose_root_y',
'18': 'mouth_center_top_lip_x',
'19': 'mouth_center_top_lip_y',
'20': 'mouth_left_corner_x',
'21': 'mouth_left_corner_y',
'22': 'mouth_center_bottom_lip_x',
'23': 'mouth_center_bottom_lip_y',
'24': 'mouth_right_corner_x',
'25': 'mouth_right_corner_y',
'26': 'mouth_center_lip_x',
'27': 'mouth_center_lip_y'}
## 2D点クラス
class Point2D():
def __init__(self, _x, _y):
self.x = _x;
self.y = _y;
## 特徴点群
class LandmarkData():
def __init__(self):
self.idx = []
self.Point = []
def add_parts_index(self, _parts_idx):
self.idx.append(_parts_idx)
def add_Point(self, _Point):
self.Point.append(_Point)
def extract_filename(filepath, ext = None):
_name = filepath.split('\\')[len(filepath.split('\\'))-1]
if ext == None:
filename = _name
else:
filename = _name.split(ext)[0]
return filename
'''
テキストファイル読み込み
'''
def read_landmark(_data_path, _format = 1):
files = None
if type(_data_path) == str:
files = glob.glob(_data_path)
elif type(_data_path) == list:
files = []
for path in _data_path:
files.append(glob.glob(path)[0])
filename_dict = {}
landmarks_list = []
for i, filename in enumerate(files):
## make file ID : ファイルID生成
fname = extract_filename(filename, None) #.txt
filename_dict[i] = fname
## for Landmark
fp = open(filename)
data = fp.read()
fp.close()
if _format == 1:
landmarks = LandmarkData()
flag_of_start = False
lines = data.split('\n')
for line in lines:
if "ID" in line:
flag_of_start = True
elif flag_of_start and len(line) > 0:
#[No][ID][X][Y]
vals = line.split(',')
landmarks.add_parts_index(1) # start zero(0)
landmarks.add_Point( Point2D( int(vals[2]), int(vals[3]) ))
landmarks_list.append(landmarks)
else:
landmarks = LandmarkData()
flag_of_start = False
lines = data.split('\n')
for line in lines:
if "Format" in line:
flag_of_start = True
elif '#' in line and flag_of_start:
#[Area_Number][Index_Numer_in_Area][Index_Numer][X][Y]
vals = line.split(' ')
landmarks.add_parts_index(int(vals[4]) + 1) # start zero(0)
landmarks.add_Point( Point2D( int(vals[5]), int(vals[6]) ))
landmarks_list.append(landmarks)
return filename_dict, landmarks_list
'''
lmlist[ff].Point[lmlist[oo].idx[yy]].(x/y)
[ff] -> ファイルIDを指定する
[oo] -> 顔器官IDを指定する
(x/y) -> x か yで座標値を取得する
'''
def make_landmark_df(_lm_list, _fname_dict, _landmark_dict):
lmptlist = []
for i in range(len(_lm_list)):
lmpt = np.arange(0)
for j in range(len(_lm_list[i].idx)):
lmpt = np.append(lmpt, _lm_list[i].Point[j].x)
lmpt = np.append(lmpt, _lm_list[i].Point[j].y)
lmptlist.append(lmpt)
## ファイル名を列indexとして設定する
fn_label = [_fname_dict[i] for i in range(len(_fname_dict))]
## 器官名を行indexとして設定する
lm_label = [_landmark_dict[str(i)] for i in range(len(_landmark_dict))]
df = pd.DataFrame(np.array(lmptlist), index = fn_label, columns = lm_label)
return df
'''
出力
'''
def write_landmark_df(_df_list, _save_fname):
_df_list.to_csv(_save_fname)
'''
-1から1の値に変換
'''
def landmark_change_scale(_df_lm, _img_size):
np_lmptlist = np.array(_df_lm).astype(np.float32)
np_lmptlist = (np_lmptlist - _img_size/2) / (_img_size/2)
return np_lmptlist
'''
メイン
'''
def main(filename_path, io_fname, img_size, READ_CSV = False):
fname_dict = None
### ラベル入力 ###
if(READ_CSV):
### ラベルデータ読み込み
lm_df = pd.read_csv(io_fname, index_col=0)
else:
### ファイル名、特徴点の読み込み (リスト化)
fname_dict, lm_list = read_landmark(filename_path)
print("Read Landmark Texts : {0}".format(len(fname_dict)))
### ラベル整形
lm_df = make_landmark_df(lm_list, fname_dict, landmark_dict)
### ラベル出力 (CSV)
save_fname = io_fname
write_landmark_df(lm_df, save_fname)
print("Write Landmark Dataset : OK")
### ラベル変換
y = landmark_change_scale(lm_df, img_size.max())
return y, fname_dict
def convertNewFormat(fname_dict, lm_list):
import csv
for i in range(len(lm_list)):
with open(".//convert//" + fname_dict[i], 'w', newline='') as f:
writer = csv.writer(f, lineterminator='\n')
lm_label = ["No.", "ID", "POS(X)", "POS(Y)"]
writer.writerow(lm_label)
for j in range(len(lm_list[i].idx)):
row = [j + 1, 0, lm_list[i].Point[j].x, lm_list[i].Point[j].y]
writer.writerow(row)
if __name__ == "__main__":
### ラベル入力 ###
READ_CSV = False
input_fname = 'data//label.csv'
img_size = np.array([400, 360]) #[h, w]
if(READ_CSV):
### ラベルデータ読み込み
lm_df = pd.read_csv(input_fname, index_col=0)
print("Read CSV: {0}".format(input_fname))
else:
### ファイル名、特徴点の読み込み (リスト化)
filename_path = 'data//txt3dTo2d//before//*.txt'
fname_dict, lm_list = read_landmark(filename_path)
print("Read Files: {0}".format(filename_path))
convertNewFormat(fname_dict, lm_list)
### ラベル整形
lm_df = make_landmark_df(lm_list, fname_dict, landmark_dict)
### ラベル出力 (CSV)
#save_fname = input_fname
#write_landmark_df(lm_df, save_fname)
### ラベル変換
y = landmark_change_scale(lm_df, img_size.max())
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(y.shape, y.min(), y.max()))
| [
"[email protected]"
] | |
71182020350d13f9a755b08cb68244aef83530ac | b4d31cdb46aaf8f3ba2fc742188cde5a13149f16 | /1/CN_Proj1_9731096/Q2_socket_client.py | f6f72032519ef42ea7d510228bda0c7afc2beb8a | [] | no_license | amir78729/Computer-Network-Course-Project | f22f3aa4dd57d68dba854d2888067dcc24d8ea42 | 82ce9406ac518a4847b7f513b6d635fb34863ef6 | refs/heads/main | 2023-04-29T19:20:33.369149 | 2021-05-25T12:49:24 | 2021-05-25T12:49:24 | 350,129,785 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import socket
msgFromClient = "Hello Server:)"
bytesToSend = str.encode(msgFromClient)
server_IP = '208.67.222.222'
server_IP = '127.0.0.1' # uncomment for testing
server_port = 53 # the port for DNS
bufferSize = 512
# Creating a UDP socket at client side
UDP_client_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
print('UDP socket has been created!')
# Send to server using created UDP socket
UDP_client_socket.sendto(bytesToSend, (server_IP, server_port))
print('The message has been sent to Server!\nPORT: {}\nADDRESS: {}'.format(server_port, server_IP))
msgFromServer = UDP_client_socket.recvfrom(bufferSize)
msg = "Message from Server: {}".format(msgFromServer[0])
print(msg)
DNS_record = socket.gethostbyname("google.com")
print(DNS_record)
| [
"[email protected]"
] | |
3bfe44b8258bd095b4b540aa4e146533bbf1aa7a | 741e0321faf0d63786cf2ba6a619b61b0f2e390a | /app/core/migrations/0001_initial.py | 87d60db29157eba80149fbca1279e0b5cb408f7b | [
"MIT"
] | permissive | WebRo/recipe-app-api | 6a6cd94cd40a914adb88fff659f0e6d28441b87c | a344901689538733eabae3e4b6f467a7f3436534 | refs/heads/main | 2023-03-01T14:28:03.964989 | 2021-02-07T20:42:55 | 2021-02-07T20:42:55 | 335,720,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | # Generated by Django 3.1.6 on 2021-02-07 00:21
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(max_length=255, unique=True)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
),
]
| [
"[email protected]"
] | |
c9c53618f922ae4d277fd12a755c32f68228bbf8 | ddedc686452b1300d04e4e38a851207a1e393295 | /kodiswift/constants.py | 66688017eb8b52aa91b21ed6a0d4d6e08657e135 | [] | no_license | yugimaster/plugin.video.highporn | 8aeefa8fa598a803305e04731901e56b2ae852ed | d7d17195e1b714ae9620178197589f900e8169ee | refs/heads/master | 2021-09-10T17:30:42.661767 | 2018-03-26T08:41:09 | 2018-03-26T08:41:09 | 124,479,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | # -*- coding: utf-8 -*-
"""
kodiswift.constants
--------------------
This module contains some helpful constants which ease interaction
with Kodi.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
"""
from __future__ import absolute_import
from kodiswift import xbmcplugin
__all__ = ['SortMethod']
class SortMethod(object):
"""Static class to hold all of the available sort methods. The prefix
of 'SORT_METHOD_' is stripped.
e.g. SORT_METHOD_TITLE becomes SortMethod.TITLE
"""
ALBUM = xbmcplugin.SORT_METHOD_ALBUM
ALBUM_IGNORE_THE = xbmcplugin.SORT_METHOD_ALBUM_IGNORE_THE
ARTIST = xbmcplugin.SORT_METHOD_ARTIST
ARTIST_IGNORE_THE = xbmcplugin.SORT_METHOD_ARTIST_IGNORE_THE
BITRATE = xbmcplugin.SORT_METHOD_BITRATE
CHANNEL = xbmcplugin.SORT_METHOD_CHANNEL
COUNTRY = xbmcplugin.SORT_METHOD_COUNTRY
DATE = xbmcplugin.SORT_METHOD_DATE
DATEADDED = xbmcplugin.SORT_METHOD_DATEADDED
DATE_TAKEN = xbmcplugin.SORT_METHOD_DATE_TAKEN
DRIVE_TYPE = xbmcplugin.SORT_METHOD_DRIVE_TYPE
DURATION = xbmcplugin.SORT_METHOD_DURATION
EPISODE = xbmcplugin.SORT_METHOD_EPISODE
FILE = xbmcplugin.SORT_METHOD_FILE
FULLPATH = xbmcplugin.SORT_METHOD_FULLPATH
GENRE = xbmcplugin.SORT_METHOD_GENRE
LABEL = xbmcplugin.SORT_METHOD_LABEL
LABEL_IGNORE_FOLDERS = xbmcplugin.SORT_METHOD_LABEL_IGNORE_FOLDERS
LABEL_IGNORE_THE = xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE
LASTPLAYED = xbmcplugin.SORT_METHOD_LASTPLAYED
LISTENERS = xbmcplugin.SORT_METHOD_LISTENERS
MPAA_RATING = xbmcplugin.SORT_METHOD_MPAA_RATING
NONE = xbmcplugin.SORT_METHOD_NONE
PLAYCOUNT = xbmcplugin.SORT_METHOD_PLAYCOUNT
PLAYLIST_ORDER = xbmcplugin.SORT_METHOD_PLAYLIST_ORDER
PRODUCTIONCODE = xbmcplugin.SORT_METHOD_PRODUCTIONCODE
PROGRAM_COUNT = xbmcplugin.SORT_METHOD_PROGRAM_COUNT
SIZE = xbmcplugin.SORT_METHOD_SIZE
SONG_RATING = xbmcplugin.SORT_METHOD_SONG_RATING
STUDIO = xbmcplugin.SORT_METHOD_STUDIO
STUDIO_IGNORE_THE = xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE
TITLE = xbmcplugin.SORT_METHOD_TITLE
TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_TITLE_IGNORE_THE
TRACKNUM = xbmcplugin.SORT_METHOD_TRACKNUM
UNSORTED = xbmcplugin.SORT_METHOD_UNSORTED
VIDEO_RATING = xbmcplugin.SORT_METHOD_VIDEO_RATING
VIDEO_RUNTIME = xbmcplugin.SORT_METHOD_VIDEO_RUNTIME
VIDEO_SORT_TITLE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE
VIDEO_SORT_TITLE_IGNORE_THE = xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE
VIDEO_TITLE = xbmcplugin.SORT_METHOD_VIDEO_TITLE
# VIDEO_USER_RATING = xbmcplugin.SORT_METHOD_VIDEO_USER_RATING
VIDEO_YEAR = xbmcplugin.SORT_METHOD_VIDEO_YEAR
@classmethod
def from_string(cls, sort_method):
"""Returns the sort method specified. sort_method is case insensitive.
Will raise an AttributeError if the provided sort_method does not
exist.
>>> SortMethod.from_string('title')
"""
return getattr(cls, sort_method.upper())
| [
"[email protected]"
] | |
2783a7a5fc798d4c1ba29f8d78f26538e7ab02a7 | 0a4e0e27c6bd19b35198a1e9068dbb5616e59d1e | /smspva_wrapper/ext/containers/number_object.py | 6871a60c4e700735c9177a44afd8ccebe4c48929 | [
"WTFPL"
] | permissive | babiato/smspva-wrapper | cd2aadaeaca5eb037902514d5a27b6c2f1b15861 | 5f52dc639240ca09a9f4275f153520f40c382b58 | refs/heads/master | 2022-09-08T23:48:35.765069 | 2020-05-31T00:15:54 | 2020-05-31T00:15:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | class NumberContainer:
"""Immutable high level container for get_number query"""
def __init__(self, data: dict, country: str, service: str):
self._data = data
self._country = country
self._service = service
@property
def data(self) -> dict:
return self._data
@property
def response(self) -> int:
return int(self._data.get('response'))
@property
def country_code(self) -> str:
return self._data.get('CountryCode')
@property
def number(self) -> str:
return self._data.get('number')
@property
def full_number(self) -> str:
return f"{self.country_code}{self.number}"
@property
def id(self) -> str:
return self._data.get('id')
@property
def country(self) -> str:
return self._country
@property
def service(self) -> str:
return self._service
| [
"[email protected]"
] | |
e84c28dca4dfbec02885f1b46195c418520fb320 | e2998df3cc014b7bec44a6a7b1e0083ae2526e8b | /code/sensor.py | c784a8cea4056ab1148e31ea739c08475b770287 | [] | no_license | syli9526/GlobalLounge-with-RaspberryPi | 40818d154e181980e1f9f96c16e6a047e7563ef0 | d62740847a41d12180de0d28fb0e5fe8a7023371 | refs/heads/master | 2020-05-05T13:22:56.461593 | 2019-06-13T15:36:26 | 2019-06-13T15:36:26 | 180,074,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | import RPi.GPIO as gpio
import atexit
import time
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
# init sonic sencor
a_out = 23
b_out = 24
gpio.setup(a_out,gpio.IN)
gpio.setup(b_out,gpio.IN)
# init motor
motor_pin = 18
gpio.setup(motor_pin, gpio.OUT)
pwm_motor = gpio.PWM(motor_pin, 50)
pwm_motor.start(0)
# init led
led = {'red': 21, 'green': 20, 'blue': 16}
gpio.setup(led['red'], gpio.OUT)
gpio.setup(led['green'], gpio.OUT)
gpio.setup(led['blue'], gpio.OUT)
pwm_red = gpio.PWM(led['red'], 1000)
pwm_green = gpio.PWM(led['green'], 1000)
pwm_blue = gpio.PWM(led['blue'], 1000)
led_pwm ={'red': pwm_red, 'green': pwm_green, 'blue': pwm_blue}
pwm_red.start(100)
pwm_green.start(0)
pwm_blue.start(0)
# init buzzer
buz = 27
gpio.setup(buz, gpio.OUT, initial = gpio.HIGH)
def buzzer():
gpio.output(buz,gpio.LOW)
time.sleep(0.1)
gpio.output(buz,gpio.HIGH)
def updateled(color, duty):
led_pwm[color].ChangeDutyCycle(float(duty))
def open():
pwm_motor.ChangeDutyCycle(6)
time.sleep(0.8)
pwm_motor.ChangeDutyCycle(0)
def close():
time.sleep(0.8)
pwm_motor.ChangeDutyCycle(2)
time.sleep(0.8)
pwm_motor.ChangeDutyCycle(0)
def first_infrared(s):
start_time = s
end_time = s
updateled('red', 0)
updateled('green', 100)
while end_time - start_time <= 10:
input_state = gpio.input(a_out)
if input_state == False:
open()
return second_infrared(time.time())
end_time = time.time()
updateled('red', 100)
updateled('blue', 0)
return False
def second_infrared(s):
start_time = s
end_time = s
while end_time - start_time <= 10:
input_state = gpio.input(b_out)
if input_state == False:
time.sleep(1)
close()
updateled('red', 100)
updateled('blue', 0)
return True
end_time = time.time()
updateled('red', 100)
updateled('green', 0)
return False
@atexit.register
def cleanUp():
gpio.cleanup()
# test
#first_infrared(time.time())
| [
"[email protected]"
] | |
44cd870582d791a6a48df0b3e8a3d83296006869 | 0bdcbc60761342f48441c691b09007b90275a140 | /mysite/urls.py~ | a7e580abc7d9ad332c3833b629d2d9fbd3052554 | [] | no_license | alviandk/project | 3fb9932a7cb6ee0f3e4ea11991fe959fc8273f1e | 1350580d2ce324b2d2c0e291a4ed406d17aa1950 | refs/heads/master | 2021-01-19T21:29:41.917436 | 2014-11-13T08:34:29 | 2014-11-13T08:34:29 | 22,808,164 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^polls/', include('polls.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| [
"[email protected]"
] | ||
23e7cbc6d439c28fc293b29294fcba10d6698948 | b1f611617e64d5ef8def4a4570f5ace537adfb99 | /fotoapp/migrations/0001_initial.py | cb7443ca79e6c23eb7ae41eb43f6b234e629d217 | [] | no_license | yeison000/LauraFotografia | 5e5c80ffe9c54067f9e9621546d31874bdf55e61 | 06a2d3598c7e339b42dadefebd8bf365557d02a2 | refs/heads/master | 2022-08-28T22:18:02.218634 | 2020-05-27T11:35:52 | 2020-05-27T11:35:52 | 267,299,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,346 | py | # Generated by Django 2.2.12 on 2020-05-20 07:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Albume',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('albume_fields', models.ImageField(upload_to='static/imgenesportada')),
('nombre', models.CharField(max_length=70, null=True)),
('fecha', models.DateField()),
('comentario', models.CharField(max_length=500, null=True)),
('albumDestacado', models.BooleanField(default=False)),
],
options={
'verbose_name': '1) Album',
},
),
migrations.CreateModel(
name='Contacto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=500, null=True)),
('email', models.EmailField(max_length=254, null=True)),
('telefono', models.IntegerField(null=True)),
('respuesta1', models.CharField(max_length=500, null=True)),
('respuesta2', models.CharField(max_length=500, null=True)),
('respuesta3', models.CharField(max_length=500, null=True)),
('respuesta4', models.CharField(max_length=500, null=True)),
('respuesta5', models.CharField(max_length=500, null=True)),
],
options={
'verbose_name': '5) Contacto',
},
),
migrations.CreateModel(
name='Pack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, max_length=500, null=True)),
('pack_fields', models.ImageField(blank=True, upload_to='static/packs')),
('descripcion', models.TextField(blank=True, max_length=900, null=True)),
],
options={
'verbose_name': '4) Pack',
},
),
migrations.CreateModel(
name='Portfolio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=70, null=True)),
('image', models.ImageField(upload_to='static/imagenPorfolio')),
],
options={
'verbose_name': '0) Portfolio',
},
),
migrations.CreateModel(
name='PortfolioSoloImg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=70, null=True)),
('image', models.ImageField(upload_to='static/imagenPorfolio')),
],
),
migrations.CreateModel(
name='SoloImg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='static/imagenesalbum')),
],
options={
'verbose_name': '3) Beauty & Retouch',
},
),
migrations.CreateModel(
name='ImgAlbume',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='static/imagenesalbum')),
('fkAlbume', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='fotoapp.Albume')),
],
options={
'verbose_name': '2) Img de los Album',
},
),
migrations.AddField(
model_name='albume',
name='fkPortFolio',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='fotoapp.Portfolio'),
),
]
| [
"[email protected]"
] | |
ab7e77a9f9d8560917c2ce65308bf21936720b22 | 3c9b69b6e69f3c64b504e8107e9142ccde96adf9 | /ModularU/tests.py | f91d347cfaf318c780470243b08045813d1c4025 | [] | no_license | vangelis-michael/ds_Distributions | 3e201e4b4c0a62a8a42d8e7ff09eeedb36b71a99 | d0b04e711d7d1013e6909097233f256a4a7bb7d2 | refs/heads/master | 2022-04-26T10:42:45.558736 | 2020-04-29T03:23:24 | 2020-04-29T03:23:24 | 259,811,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | import unittest
from distributions import Gaussian
from distributions import Binomial
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
self.gaussian.read_data_file('numbers.txt')
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_readdata(self):
self.assertEqual(self.gaussian.data, [1, 3, 99, 100, 120, 32, 330, 23, 76, 44, 31], 'data not read in correctly')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947, 'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(), sum(self.gaussian.data) / float(len(self.gaussian.data)),\
'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 1634.25, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 1607.68, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gausssian_sum = gaussian_two + gaussian_one
self.assertEqual(gausssian_sum.mean, 55)
self.assertEqual(gausssian_sum.stdev, 5)
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded) | [
"[email protected]"
] | |
84b2fbb588d80a12b81c536ac4f27dba6aa59a1d | 3c1185645e41b14e6dfcb90ae028a9ace45951da | /figures/main/fig1_figManifold.py | 2dd28c5b8d9a4ae7f93a9e0791590a3bd1b395f0 | [] | no_license | aleifer/PredictionCode | 1bd4e9a99d3da8c215a13bd78079d16cb3120b80 | a255eb1f6dfe4ded27529b4b0284dd64bbf15ea7 | refs/heads/master | 2020-06-20T21:45:23.241393 | 2019-05-23T19:41:07 | 2019-05-23T19:41:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,888 | py |
# -*- coding: utf-8 -*-
"""
Created on Thu May 17 13:15:14 2018
Figure 2 - Behavior is represented in the brain
@author: monika
"""
import numpy as np
import matplotlib as mpl
from sklearn.feature_selection import mutual_info_classif
#
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import prediction.dataHandler as dh
# deliberate import all!
from prediction.stylesheet import *
# suddenly this isn't imported from stylesheet anymore...
mpl.rcParams["axes.labelsize"] = 14
mpl.rcParams["xtick.labelsize"] = 14
mpl.rcParams["ytick.labelsize"] = 14
mpl.rcParams["font.size"] = 12
fs = mpl.rcParams["font.size"]
################################################
#
# grab all the data we will need
#
################################################
data = {}
for typ in ['AML18', 'AML32']:
for condition in ['immobilized']:# ['moving', 'immobilized', 'chip']:
folder = '../../{}_{}/'.format(typ, condition)
dataLog = '../../{0}_{1}/{0}_{1}_datasets.txt'.format(typ, condition)
outLoc = "../../Analysis/{}_{}_results.hdf5".format(typ, condition)
outLocData = "../../Analysis/{}_{}.hdf5".format(typ, condition)
try:
# load multiple datasets
dataSets = dh.loadDictFromHDF(outLocData)
keyList = np.sort(dataSets.keys())
results = dh.loadDictFromHDF(outLoc)
# store in dictionary by typ and condition
key = '{}_{}'.format(typ, condition)
data[key] = {}
data[key]['dsets'] = keyList
data[key]['input'] = dataSets
data[key]['analysis'] = results
except IOError:
print typ, condition , 'not found.'
pass
print 'Done reading data.'
################################################
#
# create figure 1: This is twice the normal size
#
################################################
# we will select a 'special' dataset here, which will have all the individual plots
fig = plt.figure('FigManifolds', figsize=(7.5, 3.5))
# this gridspec makes one example plot of a heatmap with its PCA
#gs1 = gridspec.GridSpec(4, 3, width_ratios = [1,1,1], height_ratios=[0.1, 1,1,2])
#gsHeatmap = gridspec.GridSpecFromSubplotSpec(4,5, subplot_spec=gs1[0:4,:], width_ratios=[1.25, 0.1, 0.5,0.5,0.5], height_ratios = [0.1,10,10,10], wspace=0.3, hspace=0.25)
gs1 = gridspec.GridSpec(1,2)
gs1.update(left=0.1, right=0.98, bottom = 0.07, top=0.99, hspace=0.45, wspace=0.25)
################################################
#
# letters
#
################################################
# add a,b,c letters, 9 pt final size = 18pt in this case
letters = ['A', 'B']
y0 = 0.99
locations = [(0,y0), (0.5,y0), (0.62,y0)]
for letter, loc in zip(letters, locations):
plt.figtext(loc[0], loc[1], letter, weight='semibold', size=18,\
horizontalalignment='left',verticalalignment='top',)
################################################
#
#first row -- Signal analysis
#
################################################
# suddenly this isn't imported from stylesheet anymore...
mpl.rcParams["axes.labelsize"] = 14
mpl.rcParams["xtick.labelsize"] = 14
mpl.rcParams["ytick.labelsize"] = 14
mpl.rcParams["font.size"] = 12
fs = mpl.rcParams["font.size"]
#################################################
##
## plot beautiful manifold!
##
#################################################
dset = data['AML32_immobilized']['analysis']
for di, key in enumerate(dset.keys()[:1]):
print key
# pull out all the components
x, y, z = dset[key]['PCA']['fullData'][:3]
# normalize components
x/=np.max(x)
y/=np.max(y)
z/=np.max(z)
# smooth
# make smoooth
smooth = 12
from scipy.ndimage.filters import gaussian_filter1d
x = gaussian_filter1d(x, smooth)
y = gaussian_filter1d(y, smooth)
z = gaussian_filter1d(z, smooth)
# plot in 3d
ax = plt.subplot(gs1[di], projection='3d', clip_on = False, zorder=-100, aspect='equal')
ax.plot(x, y, z, color=N1, zorder=-10)
ax.scatter(x[::12],y[::12],z[::12], color=B1, s=2)
ax.set_xlabel(r'PC$_1$', labelpad = 5)
ax.set_ylabel(r'PC$_2$', labelpad = 5)
ax.set_zlabel(r'PC$_3$', labelpad = 5)
#ax.view_init(elev=40, azim=150)
ax.view_init(elev=-15, azim=-70)
moveAxes(ax, 'scale', 0.025)
moveAxes(ax, 'left', 0.05)
moveAxes(ax, 'down', 0.1)
ax = plt.subplot(gs1[di+1], projection='3d', clip_on = False, zorder=-100, aspect='equal')
ax.plot(x, y, z, color=N1, zorder=-10)
ax.scatter(x[::12],y[::12],z[::12], color=B1, s=2)
moveAxes(ax, 'scale', 0.025)
moveAxes(ax, 'left', 0.05)
moveAxes(ax, 'down', -0.025)
ax.set_xlabel(r'PC$_1$', labelpad = 5)
ax.set_ylabel(r'PC$_2$', labelpad = 5)
ax.set_zlabel(r'PC$_3$', labelpad = 5)
#l = np.zeros(3)#+axmin
#ax.view_init(elev=-40, azim=90)
plt.show() | [
"[email protected]"
] | |
fb3e652a8bf4fe2f6eed5bb7cdc7e078750da1c1 | e26c56d8a17a46fde6fea99ef307ba276a5ef9ab | /tools/webserver.py | 25bec807bac57e92454be246e037898dfbcab0b3 | [
"Apache-2.0"
] | permissive | Andy1314Chen/DIFM-Paddle | 37e5341259c3d49e7419890fcc6d83bcfdbb14b4 | 3d5debe9a17854e543cb29b9ab2a309453f4ac9d | refs/heads/main | 2023-07-03T11:58:19.431311 | 2021-08-14T14:01:43 | 2021-08-14T14:01:43 | 394,324,872 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,794 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import argparse
import paddle
import os
from paddle_serving_client import Client
from paddle_serving_app.local_predict import LocalPredictor
if sys.argv[1] == 'gpu':
from paddle_serving_server_gpu.web_service import WebService
elif sys.argv[1] == 'cpu':
from paddle_serving_server.web_service import WebService
class RecService(WebService):
def preprocess(self, feed=[], fetch=[]):
feed_dict = {}
feed = feed[0]
for key in feed.keys():
feed_dict[key] = np.array(feed[key])
return feed_dict, fetch, True
def postprocess(self, feed=[], fetch=[], fetch_map=None):
print(fetch)
print(fetch_map)
fetch_map = {x: fetch_map[x].tolist() for x in fetch_map.keys()}
return fetch_map
rec_service = RecService(name="rec")
#rec_service.setup_profile(30)
rec_service.load_model_config("serving_server")
rec_service.prepare_server(workdir="workdir", port=int(sys.argv[2]))
if sys.argv[1] == 'gpu':
rec_service.set_gpus("0")
rec_service.run_debugger_service(gpu=True)
elif sys.argv[1] == 'cpu':
rec_service.run_debugger_service()
rec_service.run_web_service()
| [
"[email protected]"
] | |
8103ce0c3a02a55f3e3d23c5cc17118e06757afa | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_263/ch83_2019_06_03_04_35_15_514602.py | fc2a375a3fc8fc8edf263466897d8015eca200d7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | def medias_por_inicial(entrada):
alunos = entrada
saida = {}
n = 1
for k,v in alunos:
if k[0] not in saida:
saida[k[0]] = v
else:
saida[k[0]] = ((saida[k[0]]*n) + v)/(n+1)
n += 1
return saida
| [
"[email protected]"
] | |
e3852c7495b07bcb8e6181fa94249b1176822a98 | 2594650405c1424bec1ab14c1ce994906d6cc961 | /ProjectEuler/p001_multiply_3_and_5.py | 9bb990a9e00e727ee83b5f53c216a04cfe435f19 | [] | no_license | aa2276016/Learning_Python | 10dd46eeb77d5ec05b4e607c523e9e5597a2e7ee | f0e3b4876ea078a45493eb268992cec62ccd29d1 | refs/heads/master | 2021-10-19T08:34:15.694353 | 2018-03-17T02:38:49 | 2018-03-17T02:38:49 | 125,590,648 | 0 | 0 | null | 2018-03-17T02:41:26 | 2018-03-17T02:41:26 | null | UTF-8 | Python | false | false | 1,833 | py | # P001 Multiply 3 and 5
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
# The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
def pe_001(n1, n2, limit):
result = 0
for i in range(1, limit):
if i % n1 == 0 or i % n2 == 0:
result += i
return result
# This problem had a difficulty rating of 5%.
# Additional information
# This method is simple enough but may overflow when the range is large, i.e. 10000000
# A better way is to calculate [the sum of 3 divisble + sum of 5 divisible - sum of 15 divisible]
def pe_001b(n1, n2, limit):
# write a function to get the end number and the total number of divisible and then calculate the sum by using formula of Arithmetic progression
def sum_divisible(divisor):
num_end = (limit - 1) - (limit - 1) % divisor
total_n = num_end // divisor
return (divisor + num_end) * total_n // 2
return sum_divisible(n1) + sum_divisible(n2) - sum_divisible(n1*n2)
if __name__ == '__main__':
assert pe_001(3, 5, 10) == 23, 'below 10'
print('answer is:')
print(pe_001(3, 5, 1000)) # passed
print()
print('new method:')
assert pe_001b(3, 5, 10) == 23, 'below 10'
print('answer is:')
print(pe_001b(3, 5, 1000)) # passed
# test time consumption by using timeit
if __name__ == '__main__':
import timeit
print('\npe001, original method:')
print(timeit.repeat('pe_001(3, 5, 1000)', setup='from __main__ import pe_001', repeat=3, number=10000))
print('\npe001b, new method:')
print(timeit.repeat('pe_001b(3, 5, 1000)', setup='from __main__ import pe_001b', repeat=3, number=10000))
# new method is much faser
print(pe_001b(3, 5, 1000000000)) # test for large number, still works
| [
"[email protected]"
] | |
58a45363d82384330aff0c3ec93214b297b6794f | cf8be80fe9d7acfae03d86430d1c8ff8d22a8655 | /ribosome/test/config.py | 53acd0ccf42a1abc38ccbf82c7b077f54f457fa6 | [
"MIT"
] | permissive | tek/ribosome-py | 4da2faf3f7c2d646c5a90bf73e81ec12bd360d38 | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | refs/heads/master | 2022-12-21T22:46:49.075358 | 2020-08-31T16:22:51 | 2020-08-31T16:22:51 | 66,086,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,400 | py | from __future__ import annotations
from typing import Any, Callable, Optional
from ribosome.config.component import Component
from amino import Map, Lists, Dat, Path, List, Nil, Maybe
from amino.test import temp_dir
from ribosome.config.config import Config
from ribosome.compute.program import Program
from ribosome.rpc.api import RpcProgram, rpc
from ribosome.nvim.io.compute import NvimIO
from ribosome.compute.interpret import ProgIOInterpreter
from ribosome.nvim.io.api import N
from ribosome.test.request import Handler, no_handler
default_config_name = 'spec'
no_pre = lambda: N.unit
class TestConfig(Dat['TestConfig']):
@staticmethod
def cons(
config: Config,
pre: Callable[[], NvimIO[None]]=None,
log_dir: Path=None,
log_file: Path=None,
components: List[str]=Nil,
io_interpreter: ProgIOInterpreter=None,
logger: Program[None]=None,
vars: Map[str, Any]=Map(),
request_handler: Handler=no_handler,
function_handler: Handler=no_handler,
command_handler: Handler=no_handler,
autostart: bool=True,
config_path: str=None,
tmux_socket: str=None,
) -> 'TestConfig':
ld = log_dir or temp_dir('log')
lf = log_file or ld / config.basic.name
return TestConfig(
config,
pre or no_pre,
ld,
lf,
components,
io_interpreter,
logger,
vars,
request_handler,
function_handler,
command_handler,
autostart,
Maybe.optional(config_path),
Maybe.optional(tmux_socket),
)
def __init__(
self,
config: Config,
pre: Callable[[], NvimIO[None]],
log_dir: Path,
log_file: Path,
components: List[str],
io_interpreter: Optional[ProgIOInterpreter],
logger: Optional[Program[None]],
vars: Map[str, Any],
request_handler: Handler,
function_handler: Handler,
command_handler: Handler,
autostart: bool,
config_path: Maybe[str],
tmux_socket: Maybe[str],
) -> None:
self.config = config
self.pre = pre
self.log_dir = log_dir
self.log_file = log_file
self.components = components
self.io_interpreter = io_interpreter
self.logger = logger
self.vars = vars
self.request_handler = request_handler
self.function_handler = function_handler
self.command_handler = command_handler
self.autostart = autostart
self.config_path = config_path
self.tmux_socket = tmux_socket
def with_vars(self, **kw: Any) -> 'TestConfig':
return self.copy(vars=self.vars ** Map(kw))
def spec_config(*rpc: RpcProgram) -> Config:
component: Component = Component.cons(
'main',
rpc=Lists.wrap(rpc)
)
return Config.cons(
name=default_config_name,
prefix=default_config_name,
components=Map(main=component),
internal_component=False,
)
def single_prog_config(prog: Program, **kw: Any) -> Config:
return spec_config(rpc.write(prog).conf(**kw))
__all__ = ('single_prog_config', 'spec_config')
| [
"[email protected]"
] | |
52106b532e462e7226a7a7861e4d795b8c6d8e70 | d90b7ba2f8027b4fe86c80f7d7ddb4f3338f48c7 | /simplepytorch/api.py | a5d14539005e9794c28f0cfa53bec668ab6cea09 | [
"CC-BY-4.0",
"MIT"
] | permissive | adgaudio/simplepytorch | 7e7269ba527710d27bed4fd9a9c9a2faec210e43 | 5804bd8edd660fcfbe04a0af3ddea8e20d2c1f95 | refs/heads/master | 2023-05-14T09:06:37.137486 | 2023-05-05T12:42:44 | 2023-05-05T12:42:44 | 241,730,763 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | """
DEPRECATED. Use simplepytorch.trainlib instead.
Code available to model configs, assembled all in one place
"""
import torch.utils.data as TD
import torch.multiprocessing
from . import metrics
from . import datasets
from .cache import Cache
from .cmdline import ModelConfigABC, CmdlineOptions, load_model_config
from .logging_tools import LogRotate, CsvLogger, PickleLogger, HDFLogger, DoNothingLogger, MultiplexedLogger, DataLogger
from .early_stopping import EarlyStopping
from .feedforward import (
FeedForwardModelConfig, FeedForwardBinaryClassifier,
train, train_one_epoch, val_perf_binary_classifier)
def create_data_loader(config, idxs, dataset, shuffle=True,
drop_last=False,
num_workers=torch.multiprocessing.cpu_count()-1):
assert idxs.max() < len(dataset) # sanity check
if shuffle:
sampler = TD.SubsetRandomSampler(idxs)
else:
sampler = TD.SequentialSampler(idxs)
dataset = TD.Subset(dataset, idxs)
return TD.DataLoader(
dataset,
batch_size=config.batch_size,
sampler=sampler,
drop_last=drop_last,
pin_memory=True, num_workers=num_workers
)
| [
"[email protected]"
] | |
19a899ebcf968564c35c02e6998ba8e37fbb82f4 | 98b4aeadab444eaf6f0d5b469c199e6d24a52f7f | /step03/2438.py | 378a78a725686c42b067df3904a24406eb06f014 | [] | no_license | kwr0113/BOJ_Python | 7a9dc050bb3bb42ae2b03671c5d6fa76cc0d6d99 | 27bafdaafc44115f55f0b058829cb36b8c79469a | refs/heads/master | 2023-06-10T23:22:20.639613 | 2021-06-25T07:25:53 | 2021-06-25T07:25:53 | 328,057,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | # 2438.py
n = int(input())
for i in range(1, n + 1):
print("*" * i)
| [
"[email protected]"
] | |
578504e5bd25ea5f5602510759d6ebff651bfce6 | b096157f61f6e3944db4b79783f072851cf19f54 | /text_wrap.py | f3883b45f4b574a7441d4e43a0a28349afee3df0 | [] | no_license | billyrayvalentine/python-misc | 2a475265766a1e3a1f4982bd871dd5cf5809e8d3 | dc4418d9e06724f63a1a04315d71003bf44448d4 | refs/heads/master | 2022-12-02T21:45:53.511935 | 2022-05-03T15:42:15 | 2022-05-03T15:42:15 | 48,059,751 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | #!/usr/bin/env python3
# Wrap stdin to 32 chars but preserve line break for paragraphs
import sys
import configparser
import os
import argparse
import textwrap
def process_line(line):
"""Wrap text but preserve blank lines as paragraph breaks"""
if line == "\n":
return "\n\n"
else:
return textwrap.fill(line, width=32, fix_sentence_endings=True)
if __name__ == "__main__":
# Setup command line args
parser = argparse.ArgumentParser(description="texttoslide")
parser.add_argument(
"-d",
"--debug",
choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"],
help="debug level",
default="INFO",
)
args = parser.parse_args()
processed_lines = map(process_line, sys.stdin.readlines())
for l in processed_lines:
sys.stdout.writelines(l)
| [
"[email protected]"
] | |
78d65402507319fb63629a44d60abce6b988509e | e59fe240f0359aa32c59b5e9f581db0bfdb315b8 | /galaxy-dist/lib/galaxy/visualization/registry.py | cb2b2affc82fa1ae38b196d09c71e867edc19b3d | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | subway/Galaxy-Distribution | dc269a0258471597d483687a0f1dd9e10bd47448 | d16d6f9b6a8b7f41a218c06539863c8ce4d5a73c | refs/heads/master | 2021-06-30T06:26:55.237251 | 2015-07-04T23:55:51 | 2015-07-04T23:55:51 | 15,899,275 | 1 | 2 | null | 2020-10-07T06:17:26 | 2014-01-14T10:47:28 | Groff | UTF-8 | Python | false | false | 37,808 | py | """
Lower level of visualization framework which does three main things:
- associate visualizations with objects
- create urls to visualizations based on some target object(s)
- unpack a query string into the desired objects needed for rendering
"""
import os
import shutil
import glob
from galaxy import util
import galaxy.model
from galaxy.web import url_for
from galaxy.web.base import pluginframework
import logging
log = logging.getLogger( __name__ )
__TODO__ = """
BUGS:
anon users clicking a viz link gets 'must be' msg in galaxy_main (w/ masthead)
should not show visualizations (no icon)?
newick files aren't being sniffed prop? - datatype is txt
have parsers create objects instead of dicts
allow data_sources with no model_class but have tests (isAdmin, etc.)
maybe that's an instance of User model_class?
some confused vocabulary in docs, var names
tests:
anding, grouping, not
has_dataprovider
user is admin
data_sources:
lists of
add description element to visualization.
user_pref for ordering/ex/inclusion of particular visualizations
"""
# ------------------------------------------------------------------- the registry
class VisualizationsRegistry( pluginframework.PageServingPluginManager ):
"""
Main responsibilities are:
- discovering visualization plugins in the filesystem
- testing if an object has a visualization that can be applied to it
- generating a link to controllers.visualization.render with
the appropriate params
- validating and parsing params into resources (based on a context)
used in the visualization template
"""
#: name of this plugin
#: any built in visualizations that have their own render method in ctrls/visualization
# these should be handled somewhat differently - and be passed onto their resp. methods in ctrl.visualization
#TODO: change/remove if/when they can be updated to use this system
BUILT_IN_VISUALIZATIONS = [
'trackster',
'circster',
'sweepster',
'phyloviz'
]
def __str__( self ):
return self.__class__.__name__
def __init__( self, app, **kwargs ):
self.config_parser = VisualizationsConfigParser()
super( VisualizationsRegistry, self ).__init__( app, 'visualizations', **kwargs )
# what to use to parse query strings into resources/vars for the template
self.resource_parser = ResourceParser()
log.debug( '%s loaded', str( self ) )
def is_plugin( self, plugin_path ):
"""
Determines whether the given filesystem path contains a plugin.
In this base class, all sub-directories are considered plugins.
:type plugin_path: string
:param plugin_path: relative or absolute filesystem path to the
potential plugin
:rtype: bool
:returns: True if the path contains a plugin
"""
# plugin_path must be a directory, have a config dir, and a config file matching the plugin dir name
if not os.path.isdir( plugin_path ):
# super won't work here - different criteria
return False
if not 'config' in os.listdir( plugin_path ):
return False
expected_config_filename = '%s.xml' %( os.path.split( plugin_path )[1] )
if not os.path.isfile( os.path.join( plugin_path, 'config', expected_config_filename ) ):
return False
return True
def load_plugin( self, plugin_path ):
"""
Create the visualization plugin object, parse its configuration file,
and return it.
Plugin bunches are decorated with:
* config_file : the path to this visualization's config file
* config : the parsed configuration for this visualization
:type plugin_path: string
:param plugin_path: relative or absolute filesystem path to the plugin
:rtype: ``util.bunch.Bunch``
:returns: the loaded plugin object
"""
#TODO: possibly move this after the config parsing to allow config to override?
plugin = super( VisualizationsRegistry, self ).load_plugin( plugin_path )
# config file is required, otherwise skip this visualization
plugin[ 'config_file' ] = os.path.join( plugin_path, 'config', ( plugin.name + '.xml' ) )
config = self.config_parser.parse_file( plugin.config_file )
if not config:
return None
plugin[ 'config' ] = config
return plugin
# -- getting resources for visualization templates from link query strings --
# -- building links to visualizations from objects --
def get_visualizations( self, trans, target_object ):
"""
Get the names of visualizations usable on the `target_object` and
the urls to call in order to render the visualizations.
"""
#TODO:?? a list of objects? YAGNI?
applicable_visualizations = []
for vis_name in self.plugins:
url_data = self.get_visualization( trans, vis_name, target_object )
if url_data:
applicable_visualizations.append( url_data )
return applicable_visualizations
def get_visualization( self, trans, visualization_name, target_object ):
"""
Return data to build a url to the visualization with the given
`visualization_name` if it's applicable to `target_object` or
`None` if it's not.
"""
visualization = self.plugins.get( visualization_name, None )
if not visualization:
return None
data_sources = visualization.config[ 'data_sources' ]
for data_source in data_sources:
# currently a model class is required
model_class = data_source[ 'model_class' ]
if not isinstance( target_object, model_class ):
continue
# tests are optional - default is the above class test
tests = data_source[ 'tests' ]
if tests and not self.is_object_applicable( trans, target_object, tests ):
continue
param_data = data_source[ 'to_params' ]
url = self.get_visualization_url( trans, target_object, visualization_name, param_data )
link_text = visualization.config.get( 'link_text', None )
if not link_text:
# default to visualization name, titlecase, and replace underscores
link_text = visualization_name.title().replace( '_', ' ' )
render_location = visualization.config.get( 'render_location' )
# remap some of these vars for direct use in ui.js, PopupMenu (e.g. text->html)
return {
'href' : url,
'html' : link_text,
'target': render_location
}
return None
def is_object_applicable( self, trans, target_object, data_source_tests ):
"""
Run a visualization's data_source tests to find out if
it be applied to the target_object.
"""
#log.debug( 'is_object_applicable( self, trans, %s, %s )', target_object, data_source_tests )
for test in data_source_tests:
test_type = test[ 'type' ]
result_type = test[ 'result_type' ]
test_result = test[ 'result' ]
test_fn = test[ 'fn' ]
#log.debug( '%s %s: %s, %s, %s, %s', str( target_object ), 'is_object_applicable',
# test_type, result_type, test_result, test_fn )
if test_type == 'isinstance':
# parse test_result based on result_type (curr: only datatype has to do this)
if result_type == 'datatype':
# convert datatypes to their actual classes (for use with isinstance)
test_result = trans.app.datatypes_registry.get_datatype_class_by_name( test_result )
if not test_result:
# warn if can't find class, but continue (with other tests)
log.warn( 'visualizations_registry cannot find class (%s) for applicability test', test_result )
continue
#NOTE: tests are OR'd, if any test passes - the visualization can be applied
if test_fn( target_object, test_result ):
#log.debug( 'test passed' )
return True
return False
def get_visualization_url( self, trans, target_object, visualization_name, param_data ):
"""
Generates a url for the visualization with `visualization_name`
for use with the given `target_object` with a query string built
from the configuration data in `param_data`.
"""
#precondition: the target_object should be usable by the visualization (accrd. to data_sources)
# convert params using vis.data_source.to_params
params = self.get_url_params( trans, target_object, param_data )
# we want existing visualizations to work as normal but still be part of the registry (without mod'ing)
# so generate their urls differently
url = None
if visualization_name in self.BUILT_IN_VISUALIZATIONS:
url = url_for( controller='visualization', action=visualization_name, **params )
else:
url = url_for( controller='visualization', action='render',
visualization_name=visualization_name, **params )
#TODO:?? not sure if embedded would fit/used here? or added in client...
return url
def get_url_params( self, trans, target_object, param_data ):
"""
Convert the applicable objects and assoc. data into a param dict
for a url query string to add to the url that loads the visualization.
"""
params = {}
for to_param_name, to_param_data in param_data.items():
#TODO??: look into params as well? what is required, etc.
target_attr = to_param_data.get( 'param_attr', None )
assign = to_param_data.get( 'assign', None )
# one or the other is needed
# assign takes precedence (goes last, overwrites)?
#NOTE this is only one level
if target_attr and hasattr( target_object, target_attr ):
params[ to_param_name ] = getattr( target_object, target_attr )
if assign:
params[ to_param_name ] = assign
#NOTE!: don't expose raw ids: encode id, _id
if params:
params = trans.security.encode_dict_ids( params )
return params
# -- getting resources for visualization templates from link query strings --
def get_resource_params_and_modifiers( self, visualization_name ):
"""
Get params and modifiers for the given visualization as a 2-tuple.
Both `params` and `param_modifiers` default to an empty dictionary.
"""
visualization = self.plugins.get( visualization_name )
expected_params = visualization.config.get( 'params', {} )
param_modifiers = visualization.config.get( 'param_modifiers', {} )
return ( expected_params, param_modifiers )
def query_dict_to_resources( self, trans, controller, visualization_name, query_dict ):
"""
Use a resource parser, controller, and a visualization's param configuration
to convert a query string into the resources and variables a visualization
template needs to start up.
"""
param_confs, param_modifiers = self.get_resource_params_and_modifiers( visualization_name )
resources = self.resource_parser.parse_parameter_dictionary(
trans, controller, param_confs, query_dict, param_modifiers )
return resources
# ------------------------------------------------------------------- parsing the config file
class ParsingException( ValueError ):
"""
An exception class for errors that occur during parsing of the visualizations
framework configuration XML file.
"""
pass
class VisualizationsConfigParser( object ):
"""
Class that parses a visualizations configuration XML file.
Each visualization will get the following info:
- how to load a visualization:
-- how to find the proper template
-- how to convert query string into DB models
- when/how to generate a link to the visualization
-- what provides the data
-- what information needs to be added to the query string
"""
VALID_RENDER_LOCATIONS = [ 'galaxy_main', '_top', '_blank' ]
def __init__( self, debug=False ):
self.debug = debug
# what parsers should be used for sub-components
self.data_source_parser = DataSourceParser()
self.param_parser = ParamParser()
self.param_modifier_parser = ParamModifierParser()
def parse_file( self, xml_filepath ):
"""
Parse the given XML file for visualizations data.
:returns: tuple of ( `visualization_name`, `visualization` )
"""
try:
xml_tree = galaxy.util.parse_xml( xml_filepath )
visualization = self.parse_visualization( xml_tree.getroot() )
return visualization
# skip vis' with parsing errors - don't shutdown the startup
except ParsingException, parse_exc:
log.exception( 'Skipped visualization config "%s" due to parsing errors', xml_filepath )
return None
def parse_visualization( self, xml_tree ):
"""
Parse the template, name, and any data_sources and params from the
given `xml_tree` for a visualization.
"""
returned = {}
# data_sources are the kinds of objects/data associated with the visualization
# e.g. views on HDAs can use this to find out what visualizations are applicable to them
data_sources = []
data_sources_confs = xml_tree.find( 'data_sources' )
for data_source_conf in data_sources_confs.findall( 'data_source' ):
data_source = self.data_source_parser.parse( data_source_conf )
if data_source:
data_sources.append( data_source )
# data_sources are not required
if not data_sources:
raise ParsingException( 'No valid data_sources for visualization' )
returned[ 'data_sources' ] = data_sources
# parameters spell out how to convert query string params into resources and data
# that will be parsed, fetched, etc. and passed to the template
# list or dict? ordered or not?
params = {}
param_confs = xml_tree.find( 'params' )
for param_conf in param_confs.findall( 'param' ):
param = self.param_parser.parse( param_conf )
if param:
params[ param_conf.text ]= param
# params are not required
if params:
returned[ 'params' ] = params
# param modifiers provide extra information for other params (e.g. hda_ldda='hda' -> dataset_id is an hda id)
# store these modifiers in a 2-level dictionary { target_param: { param_modifier_key: { param_mod_data }
# ugh - wish we didn't need these
param_modifiers = {}
for param_modifier_conf in param_confs.findall( 'param_modifier' ):
param_modifier = self.param_modifier_parser.parse( param_modifier_conf )
# param modifiers map accrd. to the params they modify (for faster lookup)
target_param = param_modifier_conf.get( 'modifies' )
param_modifier_key = param_modifier_conf.text
if param_modifier and target_param in params:
# multiple params can modify a single, other param,
# so store in a sub-dict, initializing if this is the first
if target_param not in param_modifiers:
param_modifiers[ target_param ] = {}
param_modifiers[ target_param ][ param_modifier_key ] = param_modifier
# not required
if param_modifiers:
returned[ 'param_modifiers' ] = param_modifiers
# the template to use in rendering the visualization (required)
template = xml_tree.find( 'template' )
if template == None or not template.text:
raise ParsingException( 'template filename required' )
returned[ 'template' ] = template.text
# link_text: the string to use for the text of any links/anchors to this visualization
link_text = xml_tree.find( 'link_text' )
if link_text != None and link_text.text:
returned[ 'link_text' ] = link_text
# render_location: where in the browser to open the rendered visualization
# defaults to: galaxy_main
render_location = xml_tree.find( 'render_location' )
if( ( render_location != None and render_location.text )
and ( render_location.text in self.VALID_RENDER_LOCATIONS ) ):
returned[ 'render_location' ] = render_location.text
else:
returned[ 'render_location' ] = 'galaxy_main'
# consider unifying the above into it's own element and parsing method
return returned
# ------------------------------------------------------------------- parsing a query string into resources
class DataSourceParser( object ):
"""
Component class of VisualizationsConfigParser that parses data_source elements
within visualization elements.
data_sources are (in the extreme) any object that can be used to produce
data for the visualization to consume (e.g. HDAs, LDDAs, Jobs, Users, etc.).
There can be more than one data_source associated with a visualization.
"""
# these are the allowed classes to associate visualizations with (as strings)
# any model_class element not in this list will throw a parsing ParsingExcepion
ALLOWED_MODEL_CLASSES = [
'HistoryDatasetAssociation',
'LibraryDatasetDatasetAssociation'
]
ATTRIBUTE_SPLIT_CHAR = '.'
# these are the allowed object attributes to use in data source tests
# any attribute element not in this list will throw a parsing ParsingExcepion
ALLOWED_DATA_SOURCE_ATTRIBUTES = [
'datatype'
]
def parse( self, xml_tree ):
"""
Return a visualization data_source dictionary parsed from the given
XML element.
"""
returned = {}
# model_class (required, only one) - look up and convert model_class to actual galaxy model class
model_class = self.parse_model_class( xml_tree.find( 'model_class' ) )
if not model_class:
raise ParsingException( 'data_source needs a model class' )
returned[ 'model_class' ] = model_class
# tests (optional, 0 or more) - data for boolean test: 'is the visualization usable by this object?'
tests = self.parse_tests( xml_tree.findall( 'test' ) )
# when no tests are given, default to isinstance( object, model_class )
if tests:
returned[ 'tests' ] = tests
# to_params (optional, 0 or more) - tells the registry to set certain params based on the model_clas, tests
returned[ 'to_params' ] = {}
to_params = self.parse_to_params( xml_tree.findall( 'to_param' ) )
if to_params:
returned[ 'to_params' ] = to_params
return returned
def parse_model_class( self, xml_tree ):
"""
Convert xml model_class element to a galaxy model class
(or None if model class is not found).
This element is required and only the first element is used.
The model_class string must be in ALLOWED_MODEL_CLASSES.
"""
if xml_tree is None or not xml_tree.text:
raise ParsingException( 'data_source entry requires a model_class' )
if xml_tree.text not in self.ALLOWED_MODEL_CLASSES:
log.debug( 'available data_source model_classes: %s' %( str( self.ALLOWED_MODEL_CLASSES ) ) )
raise ParsingException( 'Invalid data_source model_class: %s' %( xml_tree.text ) )
# look up the model from the model module returning an empty data_source if not found
model_class = getattr( galaxy.model, xml_tree.text, None )
return model_class
def _build_getattr_lambda( self, attr_name_list ):
"""
Recursively builds a compound lambda function of getattr's
from the attribute names given in `attr_name_list`.
"""
if len( attr_name_list ) == 0:
# identity - if list is empty, return object itself
return lambda o: o
next_attr_name = attr_name_list[-1]
if len( attr_name_list ) == 1:
# recursive base case
return lambda o: getattr( o, next_attr_name )
# recursive case
return lambda o: getattr( self._build_getattr_lambda( attr_name_list[:-1] ), next_attr_name )
def parse_tests( self, xml_tree_list ):
"""
Returns a list of test dictionaries that the registry can use
against a given object to determine if the visualization can be
used with the object.
"""
# tests should NOT include expensive operations: reading file data, running jobs, etc.
# do as much here as possible to reduce the overhead of seeing if a visualization is applicable
# currently tests are or'd only (could be and'd or made into compound boolean tests)
tests = []
if not xml_tree_list:
return tests
for test_elem in xml_tree_list:
test_type = test_elem.get( 'type' )
test_result = test_elem.text
if not test_type or not test_result:
log.warn( 'Skipping test. Needs both type attribute and text node to be parsed: '
+ '%s, %s' %( test_type, test_elem.text ) )
continue
# test_attr can be a dot separated chain of object attributes (e.g. dataset.datatype) - convert to list
#TODO: too dangerous - constrain these to some allowed list
#TODO: does this err if no test_attr - it should...
test_attr = test_elem.get( 'test_attr' )
test_attr = test_attr.split( self.ATTRIBUTE_SPLIT_CHAR ) if isinstance( test_attr, str ) else []
# build a lambda function that gets the desired attribute to test
getter = self._build_getattr_lambda( test_attr )
# result type should tell the registry how to convert the result before the test
test_result_type = test_elem.get( 'result_type' ) or 'string'
# test functions should be sent an object to test, and the parsed result expected from the test
# is test_attr attribute an instance of result
if test_type == 'isinstance':
#TODO: wish we could take this further but it would mean passing in the datatypes_registry
test_fn = lambda o, result: isinstance( getter( o ), result )
# does the object itself have a datatype attr and does that datatype have the given dataprovider
elif test_type == 'has_dataprovider':
test_fn = lambda o, result: ( hasattr( getter( o ), 'has_dataprovider' )
and getter( o ).has_dataprovider( result ) )
# default to simple (string) equilavance (coercing the test_attr to a string)
else:
test_fn = lambda o, result: str( getter( o ) ) == result
tests.append({
'type' : test_type,
'result' : test_result,
'result_type' : test_result_type,
'fn' : test_fn
})
return tests
def parse_to_params( self, xml_tree_list ):
"""
Given a list of `to_param` elements, returns a dictionary that allows
the registry to convert the data_source into one or more appropriate
params for the visualization.
"""
to_param_dict = {}
if not xml_tree_list:
return to_param_dict
for element in xml_tree_list:
# param_name required
param_name = element.text
if not param_name:
raise ParsingException( 'to_param requires text (the param name)' )
param = {}
# assign is a shortcut param_attr that assigns a value to a param (as text)
assign = element.get( 'assign' )
if assign != None:
param[ 'assign' ] = assign
# param_attr is the attribute of the object (that the visualization will be applied to)
# that should be converted into a query param (e.g. param_attr="id" -> dataset_id)
#TODO:?? use the build attr getter here?
# simple (1 lvl) attrs for now
param_attr = element.get( 'param_attr' )
if param_attr != None:
param[ 'param_attr' ] = param_attr
# element must have either param_attr or assign? what about no params (the object itself)
if not param_attr and not assign:
raise ParsingException( 'to_param requires either assign or param_attr attributes: %s', param_name )
#TODO: consider making the to_param name an attribute (param="hda_ldda") and the text what would
# be used for the conversion - this would allow CDATA values to be passed
#<to_param param="json" type="assign"><![CDATA[{ "one": 1, "two": 2 }]]></to_param>
if param:
to_param_dict[ param_name ] = param
return to_param_dict
class ParamParser( object ):
"""
Component class of VisualizationsConfigParser that parses param elements
within visualization elements.
params are parameters that will be parsed (based on their `type`, etc.)
and sent to the visualization template by controllers.visualization.render.
"""
DEFAULT_PARAM_TYPE = 'str'
def parse( self, xml_tree ):
"""
Parse a visualization parameter from the given `xml_tree`.
"""
returned = {}
# don't store key, just check it
param_key = xml_tree.text
if not param_key:
raise ParsingException( 'Param entry requires text' )
returned[ 'type' ] = self.parse_param_type( xml_tree )
# is the parameter required in the template and,
# if not, what is the default value?
required = xml_tree.get( 'required' ) == "true"
returned[ 'required' ] = required
if not required:
# default defaults to None
default = None
if 'default' in xml_tree.attrib:
default = xml_tree.get( 'default' )
# convert default based on param_type here
returned[ 'default' ] = default
# does the param have to be within a list of certain values
# NOTE: the interpretation of this list is deferred till parsing and based on param type
# e.g. it could be 'val in constrain_to', or 'constrain_to is min, max for number', etc.
#TODO: currently unused
constrain_to = xml_tree.get( 'constrain_to' )
if constrain_to:
returned[ 'constrain_to' ] = constrain_to.split( ',' )
# is the param a comma-separated-value list?
returned[ 'csv' ] = xml_tree.get( 'csv' ) == "true"
# remap keys in the params/query string to the var names used in the template
var_name_in_template = xml_tree.get( 'var_name_in_template' )
if var_name_in_template:
returned[ 'var_name_in_template' ] = var_name_in_template
return returned
def parse_param_type( self, xml_tree ):
"""
Parse a param type from the given `xml_tree`.
"""
# default to string as param_type
param_type = xml_tree.get( 'type' ) or self.DEFAULT_PARAM_TYPE
#TODO: set parsers and validaters, convert here
return param_type
class ParamModifierParser( ParamParser ):
"""
Component class of VisualizationsConfigParser that parses param_modifier
elements within visualization elements.
param_modifiers are params from a dictionary (such as a query string)
that are not standalone but modify the parsing/conversion of a separate
(normal) param (e.g. 'hda_ldda' can equal 'hda' or 'ldda' and control
whether a visualizations 'dataset_id' param is for an HDA or LDDA).
"""
def parse( self, element ):
# modifies is required
modifies = element.get( 'modifies' )
if not modifies:
raise ParsingException( 'param_modifier entry requires a target param key (attribute "modifies")' )
returned = super( ParamModifierParser, self).parse( element )
return returned
class ResourceParser( object ):
"""
Given a parameter dictionary (often a converted query string) and a
configuration dictionary (curr. only VisualizationsRegistry uses this),
convert the entries in the parameter dictionary into resources (Galaxy
models, primitive types, lists of either, etc.) and return
in a new dictionary.
The keys used to store the new values can optionally be re-mapped to
new keys (e.g. dataset_id="NNN" -> hda=<HistoryDatasetAsscoation>).
"""
#TODO: kinda torn as to whether this belongs here or in controllers.visualization
# taking the (questionable) design path of passing a controller in
# (which is the responsible party for getting model, etc. resources )
# consider making this a base controller? use get_object for the model resources
# don't like passing in the app, tho
def parse_parameter_dictionary( self, trans, controller, param_config_dict, query_params, param_modifiers=None ):
"""
Parse all expected params from the query dictionary `query_params`.
If param is required and not present, raises a `KeyError`.
"""
# parse the modifiers first since they modify the params coming next
#TODO: this is all really for hda_ldda - which we could replace with model polymorphism
params_that_modify_other_params = self.parse_parameter_modifiers(
trans, controller, param_modifiers, query_params )
resources = {}
for param_name, param_config in param_config_dict.items():
# optionally rename the variable returned, defaulting to the original name
var_name_in_template = param_config.get( 'var_name_in_template', param_name )
# if the param is present, get it's value, any param modifiers for that param, and parse it into a resource
# use try catch here and not caller to fall back on the default value or re-raise if required
resource = None
query_val = query_params.get( param_name, None )
if query_val is not None:
try:
target_param_modifiers = params_that_modify_other_params.get( param_name, None )
resource = self.parse_parameter( trans, controller, param_config,
query_val, param_modifiers=target_param_modifiers )
except Exception, exception:
log.warn( 'Exception parsing visualization param from query: '
+ '%s, %s, (%s) %s' %( param_name, query_val, str( type( exception ) ), str( exception ) ))
resource = None
# here - we've either had no value in the query_params or there was a failure to parse
# so: error if required, otherwise get a default (which itself defaults to None)
if resource == None:
if param_config[ 'required' ]:
raise KeyError( 'required param %s not found in URL' %( param_name ) )
resource = self.parse_parameter_default( trans, param_config )
resources[ var_name_in_template ] = resource
return resources
#TODO: I would LOVE to rip modifiers out completely
def parse_parameter_modifiers( self, trans, controller, param_modifiers, query_params ):
"""
Parse and return parameters that are meant to modify other parameters,
be grouped with them, or are needed to successfully parse other parameters.
"""
# only one level of modification - down that road lies madness
# parse the modifiers out of query_params first since they modify the other params coming next
parsed_modifiers = {}
if not param_modifiers:
return parsed_modifiers
#precondition: expects a two level dictionary
# { target_param_name -> { param_modifier_name -> { param_modifier_data }}}
for target_param_name, modifier_dict in param_modifiers.items():
parsed_modifiers[ target_param_name ] = target_modifiers = {}
for modifier_name, modifier_config in modifier_dict.items():
query_val = query_params.get( modifier_name, None )
if query_val is not None:
modifier = self.parse_parameter( trans, controller, modifier_config, query_val )
target_modifiers[ modifier_name ] = modifier
else:
#TODO: required attr?
target_modifiers[ modifier_name ] = self.parse_parameter_default( trans, modifier_config )
return parsed_modifiers
def parse_parameter_default( self, trans, param_config ):
"""
Parse any default values for the given param, defaulting the default
to `None`.
"""
# currently, *default* default is None, so this is quaranteed to be part of the dictionary
default = param_config[ 'default' ]
# if default is None, do not attempt to parse it
if default == None:
return default
# otherwise, parse (currently param_config['default'] is a string just like query param and needs to be parsed)
# this saves us the trouble of parsing the default when the config file is read
# (and adding this code to the xml parser)
return self.parse_parameter( trans, param_config, default )
def parse_parameter( self, trans, controller, expected_param_data, query_param,
recurse=True, param_modifiers=None ):
"""
Use data in `expected_param_data` to parse `query_param` from a string into
a resource usable directly by a template.
'Primitive' types (string, int, etc.) are parsed here and more complex
resources (such as ORM models) are parsed via the `controller` passed
in.
"""
param_type = expected_param_data.get( 'type' )
constrain_to = expected_param_data.get( 'constrain_to' )
csv = expected_param_data.get( 'csv' )
parsed_param = None
# handle recursion for csv values
if csv and recurse:
parsed_param = []
query_param_list = galaxy.util.listify( query_param )
for query_param in query_param_list:
parsed_param.append( self._parse_param( trans, expected_param_data, query_param, recurse=False ) )
return parsed_param
primitive_parsers = {
'str' : lambda param: galaxy.util.sanitize_html.sanitize_html( param, 'utf-8' ),
'bool' : lambda param: galaxy.util.string_as_bool( param ),
'int' : lambda param: int( param ),
'float' : lambda param: float( param ),
#'date' : lambda param: ,
'json' : ( lambda param: galaxy.util.json.from_json_string(
galaxy.util.sanitize_html.sanitize_html( param ) ) ),
}
parser = primitive_parsers.get( param_type, None )
if parser:
#TODO: what about param modifiers on primitives?
parsed_param = parser( query_param )
#TODO: constrain_to
# this gets complicated - for strings - relatively simple but still requires splitting and using in
# for more complicated cases (ints, json) this gets weird quick
#TODO:?? remove?
# db models
#TODO: subclass here?
elif param_type == 'visualization':
encoded_visualization_id = query_param
#TODO:?? some fallback if there's no get_X in controller that's passed?
parsed_param = controller.get_visualization( trans, encoded_visualization_id,
check_ownership=False, check_accessible=True )
elif param_type == 'dataset':
encoded_dataset_id = query_param
# really an hda...
parsed_param = controller.get_dataset( trans, encoded_dataset_id,
check_ownership=False, check_accessible=True )
elif param_type == 'hda_or_ldda':
encoded_dataset_id = query_param
# needs info from another param...
hda_ldda = param_modifiers.get( 'hda_ldda' )
parsed_param = controller.get_hda_or_ldda( trans, hda_ldda, encoded_dataset_id )
#TODO: ideally this would check v. a list of valid dbkeys
elif param_type == 'dbkey':
dbkey = query_param
parsed_param = galaxy.util.sanitize_html.sanitize_html( dbkey, 'utf-8' )
#print ( '%s, %s -> %s, %s' %( param_type, query_param, str( type( parsed_param ) ), parsed_param ) )
return parsed_param
| [
"[email protected]"
] | |
add5382badb7f3163f8f0819693168c9c645993b | 8289fb4b9f2242a2f0a1b92b391058f421788682 | /Algorithms/Scheduling/main.py | f6a4949562bc672a3a2cbc07407d994c96961bd5 | [] | no_license | BawornsakS/XPrize | 5fedf7c7f0838b1da5c0749ef846ed7b2135356d | e6e900c6c63f0cef5cb3717a5b6724ec9ae00b69 | refs/heads/master | 2021-07-20T06:28:09.666385 | 2020-09-04T06:39:13 | 2020-09-04T06:39:13 | 211,299,370 | 3 | 3 | null | 2019-11-13T08:07:18 | 2019-09-27T10:56:38 | Makefile | UTF-8 | Python | false | false | 411 | py | from scheduling import Sch
thread1 = Sch('FCFS')
thread1.add('hand', 12)
thread1.add('ppp', 4)
thread1.time_pass(5)
thread1.add('head', 22)
thread1.add('leg', 15)
thread2 = Sch('RR', preemp=True,RRtime=6)
thread2.add('A',12,priority=2,preemp=True)
# thread2.time_pass(5)
thread2.add('C',4)
thread2.add('B',7,priority=7)
thread2.time_pass(17)
print(thread2.out())
thread1.time_pass(40)
print(thread1.out())
| [
"[email protected]"
] | |
350c6d6c1240a67e6c9621602893338471857e01 | 4b36339ecf9b3a1ff0a4ca2fd660da6306bd4d05 | /app/tools/actions.py | ceea7638ebb760bf71369e2f453742e78ac03e67 | [] | no_license | jamebluntcc/wheatgmap | 8942e6c603c4807a39ac8218c6cda4b34b4f0df5 | 5daafa6b97bb4ee6e73f84a2d0f305484f4c4322 | refs/heads/master | 2022-06-23T21:33:07.028988 | 2020-02-15T12:42:47 | 2020-02-15T12:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,396 | py | import os
from app.db import DB
import pandas as pd
import numpy as np
import copy
import time
import math
import subprocess
from sklearn.decomposition import PCA
from settings import basedir
from app.utils import processor, printPretty
from app.app import celery
from settings import Config
UPLOAD_FOLDER = os.path.join(basedir, 'app', 'static', 'download')
vcf_seq_script = os.path.join(Config.SCRIPT_PATH, Config.VCF_SEQ)
vcf_ann_script = os.path.join(Config.SCRIPT_PATH, Config.VCF_ANN)
vcf_pca_script = os.path.join(Config.SCRIPT_PATH, Config.VCF_PCA)
def wildcard_gene(genename):
return '%'.join([genename[:10], genename[11:]])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ('xlsx', 'csv', 'txt')
def batch_query_gene(genes, max_input=1000):
'''
get a gene string by search locus database
'''
if ',' in genes:
gene_list = []
genes = [each.split(',') for each in genes.split()]
for gene_part in genes:
gene_list += gene_part
else:
gene_list = genes.split()
if len(gene_list) > max_input:
return []
if len(gene_list) == 1:
_search = "('{0}')".format(gene_list[0])
else:
_search = tuple([str(each) for each in gene_list])
db = DB()
cmd = """select l.*, f.Description, f.Pfam_Description,
f.Interpro_Description, f.GO_Description from locus l left join func f
on l.GENE_ID=f.GENE_ID where l.GENE_ID in {0};
""".format(_search)
result = db.execute(cmd)
if result:
result = [(each[1],) + each[5:] for each in result]
df1 = pd.DataFrame(result)
df2 = pd.DataFrame(gene_list)
df3 = pd.merge(df2, df1, how='left').fillna("")
return [list(df3.iloc[i,:]) for i in range(len(df3))]
return []
def fetch_blast_result(genename):
MAX_ROW_LEN = 125
db = DB()
command = "select GENE_ID,VAL from {table} where GENE_ID like '{gene}%'"
pep_results = db.execute(command.format(table='pep_tb', gene=wildcard_gene(genename)))
cds_results = db.execute(command.format(table='cds_tb', gene=wildcard_gene(genename)))
if len(pep_results) == 0 and len(cds_results) == 0:
return {}
pro_seq = {k:v for k,v in pep_results}
cds_seq = {k:v for k,v in cds_results}
# print it pretty
for k,v in pro_seq.items():
if len(v) > MAX_ROW_LEN:
i = 0
over_len = math.ceil(len(v) / MAX_ROW_LEN) * MAX_ROW_LEN
tmp_str = ""
while i < over_len:
tmp_str += v[i:i+MAX_ROW_LEN] + '\n'
i += MAX_ROW_LEN
pro_seq[k] = tmp_str
for k,v in cds_seq.items():
if len(v) > MAX_ROW_LEN:
i = 0
over_len = math.ceil(len(v) / MAX_ROW_LEN) * MAX_ROW_LEN
tmp_str = ""
while i < over_len:
tmp_str += v[i:i+MAX_ROW_LEN] + '\n'
i += MAX_ROW_LEN
cds_seq[k] = tmp_str
return {'pro_seq': pro_seq,
'cds_seq': cds_seq}
def get_locus_result(genename, blast_results):
cds_seq_dict = blast_results.get('cds_seq', 'NA')
pro_seq_dict = blast_results.get('pro_seq', 'NA')
db = DB()
locus_result = {}
cmd = """select l.*, f.Description, f.Pfam_Description,
f.Interpro_Description, f.GO_Description from locus l left join func f
on l.GENE_ID=f.GENE_ID where l.GENE_ID='{0}';
""".format(genename)
result = db.execute(cmd, get_all=False)
if result:
locus_result['orthologous_gene'] = {}
ortho_header = ['Arabidopsis_thaliana', 'Hordeum_vulgare', 'Oryza_sativa', 'Triticum_aestivum', 'Zea_mays']
locus_result['orthologous_gene']['header'] = ortho_header
locus_result['orthologous_gene']['body'] = []
cmd = "select l.GENE_ID, o.* from locus l left join ortho o on l.GENE_ID=o.GENE_ID where l.GENE_ID='{0}';".format(genename)
ortho_result = db.execute(cmd, get_all=False)
if ortho_result:
ortho_result_list = ortho_result[3:]
ortho_result_list = [printPretty(each) for each in ortho_result_list if each is not None]
locus_result['orthologous_gene']['body'] = ortho_result_list
gene_id, chr, pos_start, pos_end = result[1:5]
description, pfam_desc, interpro_desc, go_desc = result[5:]
locus_result['gene_identification'] = {'Gene Product Name': description,
'Locus Name': genename}
locus_result['gene_attributes'] = {'Chromosome': chr,
"Gene Postion":'{start} - {end}'.format(start=pos_start, end=pos_end)}
header = ['Description', 'Pfam_Description', 'Interpro_Description', 'GO_Description']
locus_result['gene_annotation'] = {}
locus_result['gene_annotation']['header'] = header
locus_result['gene_annotation']['body'] = [description, pfam_desc, interpro_desc, go_desc]
# match 01G and 02G TraesCS1A02G000100
#result = db.execute("select * from tissue_expression where Gene_id='{0}'".format(genename))
result = db.execute("select * from tissue_expression where Gene_id like '{0}'".format(wildcard_gene(genename)))
if result:
row = [float(each) for each in result[0][2:]]
else:
row = []
locus_result['tissue_expression'] = row
locus_result['gene_cds_seq'] = cds_seq_dict
locus_result['gene_pro_seq'] = pro_seq_dict
return locus_result
def fetch_sequence(table, chr, start_pos, end_pos):
cmd = "python {script} \
--refer /home/data/wheat/reference/Chinese_Spring_v1.0.fasta \
--in_vcf {data_path}/{table}.vcf.gz \
--sample_name {sample} \
--chrom {chr} \
--start_pos {start_pos} \
--end_pos {end_pos}".format(script=vcf_seq_script, data_path=Config.VCF_SAMPLE_PATH, table=table, sample=table.split(".")[1], chr=chr, start_pos=start_pos, end_pos=end_pos)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
p.wait()
result = p.stdout.readlines()
result = ''.join([str(each, encoding="utf-8").replace('\n', '<br/>') for each in result])
return result
def _pandas_read(filename, header):
suffix = filename.rsplit('.', 1)[1].lower()
if suffix == 'csv':
df = pd.read_csv(filename, header=header)
elif suffix == 'xlsx':
df = pd.read_excel(filename, header=header)
elif suffix == 'txt':
df = pd.read_table(filename, header=header)
return df
def run_pca(filename, sample_group):
pca = PCA(n_components=2)
df = _pandas_read(os.path.join(UPLOAD_FOLDER, 'pca', filename), header=0)
mat = [list(each) for each in pca.fit_transform(np.log2(df.iloc[:,1:]+1).T)]
if sample_group:
sample_df = _pandas_read(os.path.join(UPLOAD_FOLDER, 'group', sample_group), header=None)
group = list(sample_df.iloc[:,0])
sample = list(sample_df.iloc[:,1])
else:
group = list(df.columns[1:])
sample = copy.copy(group)
for i in range(len(mat)):
mat[i].append(group[i])
mat[i].append(sample[i])
return mat
def run_vcf(filepath):
tmp_pca_path = os.path.join(UPLOAD_FOLDER, 'pca', 'tmp.pca.txt')
command = "sh {0} {1} {2}".format(vcf_pca_script, filepath, tmp_pca_path)
p = subprocess.Popen(command, shell=True)
p.wait()
df = pd.read_table(tmp_pca_path)
# subprocess.call("rm -rf {0} {1}".format(filepath, tmp_pca_path), shell=True)
mat = []
for i in range(len(df)):
mat.append(list(df.iloc[i,1:]))
group = list(df['ID'])
sample = copy.copy(group)
for i in range(len(mat)):
mat[i].append(group[i])
mat[i].append(sample[i])
return mat
def run_annotation(vcf_file, annotation_database):
annotation_prefix = '-'.join([str(int(time.time())), vcf_file.split('.vcf.gz')[0]])
cmd ="{script} {vcf_file} {annotation_database} {prefix}".format(
script=vcf_ann_script,
vcf_file=os.path.join(UPLOAD_FOLDER, 'vcf_ann', vcf_file),
annotation_database=annotation_database,
prefix=os.path.join(UPLOAD_FOLDER, 'vcf_ann', annotation_prefix)
)
processor.shRun(cmd)
processor.Run("zip {zipfile} {files}".format(
zipfile=os.path.join(UPLOAD_FOLDER, 'vcf_ann', annotation_prefix + '.zip'),
files=os.path.join(UPLOAD_FOLDER, 'vcf_ann', annotation_prefix) + '.ann.vcf.*'))
return annotation_prefix + '.zip'
@celery.task
def async_run_annotation(vcf_file, annotation_database):
annotation_prefix = '-'.join([str(int(time.time())), vcf_file.split('.vcf.gz')[0]])
cmd ="{script} {vcf_file} {annotation_database} {prefix}".format(
script=vcf_ann_script,
vcf_file=os.path.join(UPLOAD_FOLDER, 'vcf_ann', vcf_file),
annotation_database=annotation_database,
prefix=os.path.join(UPLOAD_FOLDER, 'vcf_ann', annotation_prefix)
)
processor.shRun(cmd)
processor.Run("zip {zipfile} {files}".format(
zipfile=os.path.join(UPLOAD_FOLDER, 'vcf_ann', annotation_prefix + '.zip'),
files=os.path.join(UPLOAD_FOLDER, 'vcf_ann', annotation_prefix) + '.ann.vcf.*'))
result = annotation_prefix + '.zip'
return {'task': 'vcf_ann', 'result': result}
| [
"[email protected]"
] | |
77e7f43c0dc2c6909ee7e29d66c8d98921b98c47 | 953d1bb0676e485b10a0b3c554927c964ff44ceb | /pug/invest/views.py | 1edb2a33b8d5e0fbd5cdff55a8d21e2c09fdf532 | [
"MIT"
] | permissive | lowks/pug | a24ace313eb650fbb8103bab8d0169f9e7fc8ce4 | f1f7172959e7bd8a072b62c1fe0da1a9b4330e74 | refs/heads/master | 2021-01-18T10:01:54.767102 | 2015-03-07T02:42:50 | 2015-03-07T02:42:50 | 31,819,209 | 0 | 0 | null | 2015-03-07T16:49:57 | 2015-03-07T16:49:57 | null | UTF-8 | Python | false | false | 2,522 | py | # Create your views here.
import datetime
from django.views.generic import TemplateView
import sim
from pug.miner.views import d3_plot_context
from models import Day, get_dataframes, get_panel
from pug.nlp import util
import pandas as pd
class HomeView(TemplateView):
"""Query the miner.AggregateResults table to retrieve values for plotting in a bar chart"""
template_name = 'invest/home.html'
class PlotSymbolView(TemplateView):
"""Query the miner.AggregateResults table to retrieve values for plotting in a bar chart"""
template_name = 'miner/line_plot.d3.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(PlotSymbolView, self).get_context_data(**kwargs)
symbols = [sym.upper() for sym in util.normalize_names(self.kwargs['symbols'])]
panel = get_panel(symbols=symbols).transpose(2,1,0)
# TODO: test!
# qs = []
# for i, symbol in enumerate(panel.items):
# series = panel[symbol]['Adj Close']
# for date, value in zip(series.index, series.values):
# qs += [Day(date=datetime.date(row[0].year, row[0].month, row[0].day), close=row[i+1], symbol=symbol)]
# Day.objects.bulk_create(qs)
context['df'] = pd.DataFrame(panel['Adj Close']).sort_index()
return d3_plot_context(context,
table=context['df'], title='Price History', xlabel='Date', ylabel='Adjusted Close')
class PlotPredictionView(TemplateView):
"""Query the miner.AggregateResults table to retrieve values for plotting in a bar chart"""
template_name = 'miner/line_plot.d3.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(PlotSymbolView, self).get_context_data(**kwargs)
symbols = sim.normalize_symbols(self.kwargs['symbols'])
df = sim.price_dataframe(symbols=symbols,
start=datetime.datetime(2010, 1, 1),
end=datetime.datetime(2010, 12, 31),
price_type='close')
# TODO: test!
qs = []
for i, symbol in enumerate(df.columns):
for row in df.records():
qs += [Day(date=datetime.date(row[0].year, row[0].month, row[0].day), close=row[i+1], symbol=symbol)]
Day.objects.bulk_create(qs)
context['df'] = df
return d3_plot_context(context,
table=df, title='Price History', xlabel='Date', ylabel='Adjusted Close') | [
"[email protected]"
] | |
a2ef81945096ac6a0c78b545b72474d26f445c3e | 25b0e82ec0ba2b667e6ae429e59e19333a641723 | /OS/cpuscheduling/venv/Scripts/easy_install-3.8-script.py | bf36c1465e29520fee6dcbefe595f071c93ab295 | [] | no_license | donkhan/msc | cf897a6dbfd72845074d13842351e49ebcf04557 | 73bc12fd3ad86e6915f51adc08af836dfdc52747 | refs/heads/master | 2021-07-10T06:43:52.687825 | 2020-11-09T06:54:14 | 2020-11-09T06:54:14 | 211,588,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | #!C:\Users\ADMIN\PycharmProjects\CPUScheduling\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"donkhan"
] | donkhan |
98679fa8bd9132a51581ffddde725287a09bf69c | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/webrtc/modules/pacing/DEPS | 42f3dfcb1453903fd6eae6943848d50d72907545 | [
"BSD-3-Clause",
"LicenseRef-scancode-google-patent-license-webrtc",
"LicenseRef-scancode-google-patent-license-webm",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 183 | include_rules = [
"+system_wrappers",
# Avoid directly using field_trial. Instead use FieldTrialsView.
"-system_wrappers/include/field_trial.h",
"+logging/rtc_event_log"
]
| [
"[email protected]"
] | ||
412b64cc9051320ec26ca9343c9b1aacc847759c | 19125627064473ca120076d08d0eb715744075cc | /preprocessing/stat_corpus.py | 63de3a9b266ab94e484bf7162a69ae2d01683a6c | [
"Apache-2.0"
] | permissive | arian-askari/dossier_coliee | 170061d570f010f2ea8a3e5f89d0b18a5f69c178 | d34189177b37eb45451ad520f94a8bbd82b5776a | refs/heads/main | 2023-04-08T02:47:43.658515 | 2021-04-24T09:31:34 | 2021-04-24T09:31:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,996 | py | import os
import argparse
import itertools
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import json
import re
import pickle
def lines_to_paragraphs(lines: list):
'''
creates a dictionary of paragraphs, the lines are accumulated to paragraphs according to their numbering of the lines
:param lines:
:return:
'''
paragraphs = {}
paragraph = ''
key = 'intro'
for line in lines:
if not (line == 'Summary:' or line.strip('[').strip(']').strip().isnumeric()):
paragraph = paragraph + ' ' + line
else:
# if paragraph is multiple times in document (for example in different languages)
if key in paragraphs.keys():
para = paragraphs.get(key)
para.append(paragraph)
paragraphs.update({key: para})
else:
paragraphs.update({key:[paragraph]})
key = line
paragraph = ''
# case for 002_200 if numbers of paragraphs are in same line as text
if not paragraphs:
for line in lines:
if not line.strip('[').strip()[0].isdigit():
paragraph = paragraph + ' ' + line
else:
if key == 'intro':
paragraphs.update({key: [paragraph]})
paragraphs.update({line.split(']')[0] + ']': [']'.join(line.split(']')[1:])]})
return paragraphs
def only_english(paragraphs: dict):
# check intro where the english version is, is it the first one or the second one of the paragraphs?
# but only if there are multiple options for the paratgraph
freen = '[English language version follows French language version]'
enfre = '[French language version follows English language version]'
if enfre in paragraphs.get('intro')[0]:
for key, value in paragraphs.items():
if len(value) > 1:
paragraphs.update({key: [value[0]]})
elif freen in paragraphs.get('intro')[0]:
for key, value in paragraphs.items():
if len(value) > 1:
paragraphs.update({key: [value[1]]})
return paragraphs
def only_string_in_dict(paragraphs: dict):
for key, value in paragraphs.items():
paragraphs.update({key: value[0]})
return paragraphs
def count_words(text: str):
if text:
return len(re.findall(r'\w+', text))
else:
return None
def count_doc(paragraphs: dict):
# count intro
text_intro = paragraphs.get('intro')
no_intro = count_words(text_intro)
# count summary
text_summary = paragraphs.get('Summary:')
if text_summary and not 'contains no summary' in text_summary:
no_summ = count_words(text_summary)
else:
no_summ = None
# paragraph lengths
lengths = []
for key, value in paragraphs.items():
if key != 'intro' and key != 'Summary:':
no_words = count_words(paragraphs[key])
lengths.append(no_words)
return no_intro, no_summ, lengths
def remove_duplicates_in_corpus(corpus_dir):
# remove duplicates from corpus
# first read in all text files and store them, then remove duplicates: read them in per line, just normal
# corpus lines
corpus_lines = read_folder(corpus_dir)
# check duplicates
files = list(corpus_lines.keys())
duplicates = []
for i in range(0, len(files)):
file_text = corpus_lines.get(files[i])
print(i)
duplicate_list = [files[i]]
# so that it does not check duplicates, if they have been already checked with another duplicate
if not files[i] in list(itertools.chain.from_iterable(duplicates)):
for j in range(i + 1, len(files)):
print(j)
if file_text == corpus_lines.get(files[j]):
duplicate_list.append(files[j])
if len(duplicate_list) > 1:
duplicates.append(duplicate_list)
print('This is how many duplicate pairs(and maybe more than just 2 pairs) we have: {}'.format(len(duplicates)))
print('This is how long on average the duplicate pairs are: {}'.format(np.mean([len(x) for x in duplicates])))
# now remove duplicates from corpus, and also store which file is kept for its duplicates:
# like {'076_015.txt': ['092_026.txt', '096_027.txt', '216_015.txt', '299_019.txt']}
# just in case we remove relevant files, so that we still find which files is now replacing it!
duplicates_removed = {}
for duplicate_list in duplicates:
for file in duplicate_list[1:]:
os.remove(os.path.join(corpus_dir, file))
duplicates_removed.update({duplicate_list[0]: duplicate_list[1:]})
with open(os.path.join(output_dir, 'corpus_removed_duplicates.pkl'), 'wb') as f:
pickle.dump(duplicates_removed, f)
print('Removed {} duplicate files in total!'.format(
len(list(itertools.chain.from_iterable(duplicates_removed.values())))))
def preprocess_label_file(output_dir, label_file):
# now open the label file and the removed duplicates pickle file and
# convert the label file in new format and duplicates
with open(os.path.join(output_dir, 'corpus_removed_duplicates.pkl'), 'rb') as f:
duplicates_removed = pickle.load(f)
with open(label_file, 'rb') as f:
labels = json.load(f)
# other format of labels:
labels_format = {}
for key, values in labels.items():
val_format = []
for value in values:
val_format.append('{}_{}'.format(key, value))
labels_format.update({key: val_format})
# turn mapping of duplicates removed around! key is the duplicate, value is the replacement
duplicate_mapping = {}
for key, values in duplicates_removed.items():
for value in values:
duplicate_mapping.update({value: key})
# now replace duplicates which got replaced in the corpus with the file id which is still there
labels_replaced = {}
i = 0
for key, values in labels_format.items():
val_replaced = []
for value in values:
if duplicate_mapping.get(value):
val_replaced.append(duplicate_mapping.get(value))
print('replaced {} for {} with {}'.format(value, key, duplicate_mapping.get(value)))
i += 1
else:
val_replaced.append(value)
labels_replaced.update({key: val_replaced})
print('replaced in total {} labels with their duplicate replacements'.format(i))
with open(os.path.join(output_dir, 'labels_duplicates_removed.pkl'), 'wb') as f:
pickle.dump(labels_replaced, f)
return labels_replaced
def read_in_para_lengths(corpus_dir: str, output_dir: str):
'''
reads in all files, separates them in intro, summary and the single paragraphs, counts the lengths
only considers the english versions of the files, prints if it fails to read a certain file and
stores it in failed_files
:param corpus_dir: directory of the corpus containing the text files
:param output_dir: output directory where the pickled files of the lengths of each file, the paragraphs of each
file and the failed_files are stored
:return: the lengths of each file, the paragraphs of each file and the failed_files
'''
lengths = {}
dict_paragraphs = {}
failed_files = []
for root, dirs, files in os.walk(corpus_dir):
for file in files:
# file = '001_001.txt'
with open(os.path.join(corpus_dir, file), 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines if line.strip('\n') is not ' ' and line.strip() is not '']
paragraphs = lines_to_paragraphs(lines)
if paragraphs:
paragraphs = only_english(paragraphs)
paragraphs = only_string_in_dict(paragraphs)
# now analyze the intro, the summary and the length of the paragraphs
no_intro, no_summ, lengths_para = count_doc(paragraphs)
lengths.update({file.split('.')[0]: {'intro': no_intro, 'summary': no_summ,
'lengths_paragraphs': lengths_para}})
dict_paragraphs.update({file.split('.')[0]: paragraphs})
# print('lengths for file {} done'.format(file))
else:
print('reading in of file {} doesnt work'.format(file))
failed_files.append(file)
#with open(os.path.join(output_dir, 'base_case_lengths.pickle'), 'wb') as f:
# pickle.dump(lengths, f)
#with open(os.path.join(output_dir, 'base_case_paragraphs.pickle'), 'wb') as f:
# pickle.dump(dict_paragraphs, f)
#with open(os.path.join(output_dir, 'base_case_failed_files.pickle'), 'wb') as f:
# pickle.dump(failed_files, f)
return lengths, dict_paragraphs, failed_files
def failed_files_in_labels(labels_replaced, failed_files):
# check if the files which failed, are in the relevant ones - here we find that only one file is not read in although
# it is relevant, reason: the files is empty, so we remove it form the corpus and the relevance assessments
# reading in of file 350_170.txt failed, because it is empty
# remove 350_170.txt from the labels, because this file is empty and also from the cleaned corpus!
for key, values in labels_replaced.items():
val_new = values
for value in values:
if value in failed_files:
print(
'Attention we couldnt read in the relevant file {}, therefore we now remove it from the labels'.format(
value))
val_new.remove(value)
labels_replaced.update({key: val_new})
print('updated dictionary to new pair {} for key {}'.format(val_new, key))
with open(os.path.join(output_dir, 'labels_duplicates_removed_failed_files.pkl'), 'wb') as f:
pickle.dump(labels_replaced, f)
return labels_replaced
def count_duplicates_in_text(flipped:dict, threshold=100):
"""
Counts how often a text passage is contained in different documents and returns the text passage which appear
in more than 100 documents along with the number of occurences in different documents
:param flipped: dictionary with text passages as key and list of documents which contain this text passage as values
:return: list of numbers, how often this text passage is contained in different documents,
list of text passages which are contained in more than 100 different documents,
"""
paragraphs = []
no_docs = []
for para, docs in flipped.items():
paragraphs.append(para)
no_docs.append(len(docs))
assert len(paragraphs) == len(no_docs)
intro_often = []
no_often = []
for i in range(len(no_docs)):
if no_docs[i] > threshold:
intro_often.append(paragraphs[i])
no_often.append(no_docs[i])
return no_often, intro_often
def read_folder(folder_dir: str):
corpus_lines = {}
for root, dirs, files in os.walk(folder_dir):
for file in files:
with open(os.path.join(folder_dir, file), 'r') as f:
lines = f.readlines()
corpus_lines.update({file: lines})
return corpus_lines
def analyze_text_passages(dict_paragraphs: dict, threshold=100):
# first if some intros are the same
flipped = {}
for key, value in dict_paragraphs.items():
if value.get('intro'):
if value.get('intro') not in flipped:
flipped[value.get('intro')] = [key]
else:
flipped[value.get('intro')].append(key)
print('number of unique intros {}'.format(len(flipped)))
no_often, intro_often = count_duplicates_in_text(flipped, threshold)
print('This is how many intros are non-informative and will get removed: {}'.format(sum(no_often)))
print('In total {} introductions make up for {} non-informative summaries in documents'.format(len(intro_often),
sum(no_often)))
# now analyze the summaries
flipped = {}
for key, value in dict_paragraphs.items():
if value.get('Summary:'):
if value.get('Summary:') not in flipped:
flipped[value.get('Summary:')] = [key]
else:
flipped[value.get('Summary:')].append(key)
print('number of unique summary {}'.format(len(flipped)))
no_often, summ_often = count_duplicates_in_text(flipped, threshold)
print('This is how many summaries are non-informative and will get removed: {}'.format(sum(no_often)))
print('In total {} summaries make up for {} non-informative summaries in documents'.format(len(summ_often), sum(no_often)))
# analyze paragraphs
flipped = {}
for key, value in dict_paragraphs.items():
for key2, value2 in value.items():
if key2 != 'intro' and key2 != 'Summary:':
if value2:
if value2 not in flipped:
flipped[value2] = [key]
else:
flipped[value2].append(key)
print('number of unique paragraphs {}'.format(len(flipped)))
no_often, para_often = count_duplicates_in_text(flipped, threshold)
print('This is how many paragraphs are non-informative and will get removed: {}'.format(sum(no_often)))
print('In total {} paragraphs make up for {} non-informative summaries in documents'.format(len(para_often),
sum(no_often)))
# these text fragments will be removed from the files as they are not considered informative!
# with open(os.path.join(output_dir, 'intro_text_often.pkl'), 'wb') as f:
# pickle.dump(intro_often, f)
# with open(os.path.join(output_dir, 'summ_text_often.pkl'), 'wb') as f:
# pickle.dump(summ_often, f)
# with open(os.path.join(output_dir, 'para_text_often.pkl'), 'wb') as f:
# pickle.dump(para_often, f)
return intro_often, summ_often, para_often
def analyze_text_removal_from_base_case(dict_paragraphs: dict, output_dir):
'''
analyze how many of the parargaphs and which paragraphs would get removed, if removing non-informative
# intros and summaries (attained from the corpus) from the base_cases
:param bc_dict_paragraphs:
:param output_dir:
:return:
'''
with open(os.path.join(output_dir, 'intro_text_often.pkl'), 'rb') as f:
intro_often = pickle.load(f)
with open(os.path.join(output_dir, 'summ_text_often.pkl'), 'rb') as f:
summ_often = pickle.load(f)
flipped = {}
for key, value in dict_paragraphs.items():
if value.get('intro'):
if value.get('intro') not in flipped:
flipped[value.get('intro')] = [key]
else:
flipped[value.get('intro')].append(key)
for intro in intro_often:
print(intro)
if flipped.get(intro):
print('this is how many base_cases contain this intro and would get removed: {}'.format(len(flipped.get(intro))))
print('number of unique intros {}'.format(len(flipped)))
# now analyze the summaries
flipped = {}
for key, value in dict_paragraphs.items():
if value.get('Summary:'):
if value.get('Summary:') not in flipped:
flipped[value.get('Summary:')] = [key]
else:
flipped[value.get('Summary:')].append(key)
for summ in summ_often:
print(summ)
if flipped.get(summ):
print('this is how many base_cases contain this summary and would get removed: {}'.format(len(flipped.get(summ))))
print('number of unique summary {}'.format(len(flipped)))
def plot_hist(array: np.array, xaxis_title: str, title: str, output_dir: str):
"""
plots a histogram for the given numpy array
:param array: numpy array containing numbers
:return: shows a histogram which displays the frequency for each number (for example year)
"""
plt.figure()
plot = sns.displot(array, binwidth=10, color="orange") # , kde=False, hist_kws={"align": "left"}
plt.axvline(x=np.mean(array), color='orange', linestyle='--')
#plot.set(xticks=range(0, 1000, 100))
#plot.set_xlim([-1000, 1000])
plt.title(title)
plt.ylabel("Frequency")
plt.xlabel(xaxis_title)
file_name = os.path.join(output_dir, 'plots/{0}_{1}_frequency.svg'.format(xaxis_title, title))
#if not os.path.exists(os.path.dirname(file_name)):
# try:
# os.makedirs(os.path.dirname(file_name))
# except OSError as exc: # Guard against race condition
# if exc.errno != errno.EEXIST:
# raise
plt.savefig(file_name)
#plt.show()
def analyze_corpus_in_numbers(lengths, dict_paragraphs, labels_train, output_dir):
"""
Analyzes the corpus with respect to numbers and lengths of introduction, summaries and paragraphs as well as labels
:param lengths:
:param dict_paragraphs:
:param labels_train:
:param labels_test:
:return:
"""
print('number of files in corpus {}'.format(len(lengths.keys())))
avg_length = []
for key, value in lengths.items():
avg_length.append(value.get('intro') if value.get('intro') else 0
+ value.get('summary') if value.get(
'summary') else 0
+ 0 if value.get('lengths_paragraphs') == [] else sum(
[x for x in value.get('lengths_paragraphs') if x]))
print('the documents have an average length of {}'.format(np.mean(avg_length)))
# intros
print(
'number of documents with an intro {}'.format(sum([1 for key, value in lengths.items() if value.get('intro')])))
print('the intros have an average length of {}'.format(
np.mean([value.get('intro') for key, value in lengths.items() if value.get('intro')])))
print('the shortest intro is {} words long'.format(
np.min([value.get('intro') for key, value in lengths.items() if value.get('intro')])))
print('the longest intro is {} words long'.format(
np.max([value.get('intro') for key, value in lengths.items() if value.get('intro')])))
plot_hist(np.array([value.get('intro') for key, value in lengths.items() if value.get('intro') and value.get('intro')<1000]),
'number of words', 'Introduction length distribution', output_dir)
# summaries
summ_len = []
for key, value in dict_paragraphs.items():
if value.get('Summary:'):
summ_len.append(count_words(value.get('Summary:')))
print('number of documents with a summary {}'.format(len(summ_len)))
print('the summaries have an average length of {}'.format(np.mean(summ_len)))
print('the shortest summary has {} words'.format(np.min(summ_len)))
print('the longest summary has {} words'.format(np.max(summ_len)))
plot_hist(np.array([x for x in summ_len if x<1000]), 'number of words', 'Summary length distribution', output_dir)
# paragraphs
print('average number of paragraphs per document {}'.format(
np.mean([len(value.get('lengths_paragraphs')) for key, value in lengths.items()])))
para = []
for key, value in lengths.items():
if value.get('lengths_paragraphs'):
list = value.get('lengths_paragraphs')
list_wo_none = [x for x in list if x]
para.extend(list_wo_none)
print('the paragraphs have an average length of {}'.format(np.mean(para)))
print('the shortest paragraph has {} words'.format(np.min(para)))
print('the longest paragraph has {} words'.format(np.max(para)))
plot_hist(np.array([x for x in para if x<1000]), 'number of words', 'Paragraph length distribution', output_dir)
print('there are in total {} paragraphs'.format(len(para)))
# labels
print('average number of relevant documents for train {}'.format(
np.mean([len(value) for value in labels_train.values()])))
#print('average number of relevant documents for test {}'.format(
# np.mean([len(value) for value in labels_test.values()])))
def check_duplicates_corpus_base_case(corpus_dir: str, base_case_dir: str):
# check if base_cases are contained in corpus too?
# first read in all text files and store them, then compare corpus with base_cases
# corpus
corpus_lines = read_folder(corpus_dir)
# base_cases
base_case_lines = read_folder(base_case_dir)
# check duplicates of base_cases in corpus
duplicates = {}
for base_case in base_case_lines.keys():
print(base_case)
duplicate_list = []
base_case_text = base_case_lines.get(base_case)
for file in corpus_lines.keys():
if base_case_text == corpus_lines.get(file):
duplicate_list.append(file)
if duplicate_list:
duplicates.update({base_case: duplicate_list})
print('This is how many base_cases also appear in the corpus: {}'.format(len(duplicates)))
with open(os.path.join(output_dir, 'duplicates_base_case_corpus.pkl'), 'wb') as f:
pickle.dump(duplicates, f)
if __name__ == "__main__":
#
# config
#
#parser = argparse.ArgumentParser()
#parser.add_argument('--corpus-dir', action='store', dest='corpus_dir',
# help='corpus directory location', required=True)
# parser.add_argument('--output-dir', action='store', dest='output_dir',
# help='output directory location', required=True)
# parser.add_argument('--label-file-train', action='store', dest='label_file',
# help='label file train', required=True)
# parser.add_argument('--label-file-test', action='store', dest='label_file_test',
# help='label file test', required=True)
#args = parser.parse_args()
corpus_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/corpus_test'
output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/pickle_files'
label_file = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/task1_train_2020_labels.json'
label_file_test = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/task1_test_2020_labels.json'
base_case_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/base_case_all'
# remove_duplicates_in_corpus(corpus_dir)
lengths, dict_paragraphs, failed_files = read_in_para_lengths(corpus_dir, output_dir)
# labels_replaced = preprocess_label_file(output_dir, label_file)
# labels_replaced = failed_files_in_labels(labels_replaced, failed_files)
# labels_replaced_test = preprocess_label_file(output_dir, label_file_test)
# labels_replaced_test = failed_files_in_labels(labels_replaced_test, failed_files)
with open(os.path.join(output_dir, 'corpus_lengths.pickle'), 'rb') as f:
lengths = pickle.load(f)
with open(os.path.join(output_dir, 'corpus_paragraphs.pickle'), 'rb') as f:
dict_paragraphs = pickle.load(f)
with open(os.path.join(output_dir, 'corpus_failed_files.pickle'), 'rb') as f:
failed_files = pickle.load(f)
with open(os.path.join(output_dir, 'labels_duplicates_removed.pkl'), 'rb') as f:
labels_replaced = pickle.load(f)
with open(os.path.join(output_dir, 'labels_test_duplicates_removed.pkl'), 'rb') as f:
labels_replaced_test = pickle.load(f)
# analyze corpus numbers
#analyze_corpus_in_numbers(lengths, dict_paragraphs, labels_replaced, labels_replaced_test)
# analyze the text
#intro_often, summ_often, para_often = analyze_text_passages(dict_paragraphs, 100)
# analyze the base cases
check_duplicates_corpus_base_case(corpus_dir, base_case_dir)
bc_lengths, bc_dict_para, bc_failed_files = read_in_para_lengths(base_case_dir, output_dir)
analyze_corpus_in_numbers(bc_lengths, bc_dict_para, labels_replaced, output_dir)
analyze_text_passages(bc_dict_para, 50)
# how many of the non-informative text of the corpus are in the base_cases?
analyze_text_removal_from_base_case(bc_dict_para, output_dir)
| [
"[email protected]"
] | |
9cf9ade063ac20f68cb1bb76f0484d0a03ae9aeb | c2cb79bba837bb2d899855a809fe5d4c8211c057 | /C4 - Convolutional Neural Networks/Week 1/__init__.py | 9ecee62232f13f56e2f971f67f186c423a866618 | [] | no_license | 0ear/Coursera | 2698b802cb892ae4f0df08aa2f538a26495223db | 6b839516d71ba5d10dd141948c458f482be21f0e | refs/heads/master | 2023-06-24T07:34:19.103812 | 2021-07-29T15:21:53 | 2021-07-29T15:21:53 | 355,775,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | from cnn_utils import * | [
"[email protected]"
] | |
40a886d4a0c8dff67429d69f92c7fa181db3ce13 | 5fd23deacc4f0b48df89d8e6acfbe475d64e7afa | /src/restaurants/forms.py | 01972eff995e2f423d74be6842514dbfe843a0fd | [] | no_license | nickf12/Try-Django-1.11 | afc33250f1d41c342f67bf6b41f067688853c05d | b3f54a47912fd1a6c7fae3e6a56cb7e6104ff76e | refs/heads/master | 2021-07-17T11:31:18.632641 | 2017-10-19T08:53:26 | 2017-10-19T08:53:26 | 106,943,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | from django import forms
from .models import RestaurantLocation
from .validators import validate_category
class RestaurantCreateForm(forms.Form):
name = forms.CharField(max_length=120)
location = forms.CharField(required=False)
category = forms.CharField(required=False)
def clean_name(self):
name = self.cleaned_data.get("name")
if name == "Hello":
raise forms.ValidationError("Not a Valid Name")
return name
class RestaurantLocationCreateForm(forms.ModelForm):
#email = forms.EmailField()
#category = forms.CharField(required=False, validators=[validate_category])
class Meta:
model = RestaurantLocation
fields = [
'name',
'location',
'category',
'slug',
]
def clean_name(self):
name = self.cleaned_data.get("name")
if name == "Hello":
raise forms.ValidationError("Not a Valid Name")
return name
# def clean_email(self):
# email = self.cleaned_data.get("email")
# print email
# if ".edu" in email:
# raise forms.ValidationError("We do not accept edu emails")
# return email | [
"[email protected]"
] | |
85c554f0cdbdf2f15b3898004d708f7d2e8c7888 | c1bf5c27993f69d1ca5ccc6f380ca59f17d6afee | /data_parser/services.py | fb662cb31bdecbb536abbcb7cc4fec5288827f3d | [] | no_license | scorpaena/swapi | a3963d44bd761ece339d85c5f9e2a78e3d59025f | a833fbb4a0a4576d6cad1f4a17fa991b84c917f0 | refs/heads/master | 2023-07-12T18:51:32.041634 | 2021-08-11T10:44:50 | 2021-08-11T10:44:50 | 394,433,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,385 | py | import requests
from datetime import datetime
import csv
import re
import os
from urllib.parse import urljoin
from django.core.exceptions import ObjectDoesNotExist
from .exceptions import DoesNotExistError
from .models import StarWarsFilesModel
class StarWarsAPIClient:
def __init__(self):
self.base_url = "https://swapi.py4e.com/api/"
def _item_url_to_id(self, url):
match = re.search(r"\d+/$", url) or re.search(r"\d+$", url)
if match is None:
raise ValueError(f"{url} doesn't contain item ID")
id = url[match.start() : match.end()]
if "/" in id:
id = id.replace("/", "")
return id
def _resource_url_generator(self, resource):
return urljoin(self.base_url, resource)
def _item_url_generator(self, resource, id):
resource_path = self._resource_url_generator(resource)
return urljoin(resource_path, str(id))
def _item_url_list_to_id_list(self, url_list):
id_list = []
if len(url_list) != 0:
for url in url_list:
id_list.append(self._item_url_to_id(url))
return id_list
def _people_result_substitutor(self, item):
result_dict = {}
for key in item:
result_dict[key] = item[key]
result_dict["homeworld"] = self._item_url_to_id(url=item["homeworld"])
result_dict["films"] = self._item_url_list_to_id_list(
url_list=item["films"]
)
result_dict["species"] = self._item_url_list_to_id_list(
url_list=item["species"]
)
result_dict["vehicles"] = self._item_url_list_to_id_list(
url_list=item["vehicles"]
)
result_dict["starships"] = self._item_url_list_to_id_list(
url_list=item["starships"]
)
return result_dict
def _get_object_by_id(self, resource, id):
url = self._item_url_generator(resource, id)
response = requests.get(url)
if response.status_code != 200:
raise DoesNotExistError(url)
return response.json()
def _get_lookup_value(self, resource, lookup_key, id):
value = self._get_object_by_id(resource, id).get(lookup_key)
if value is None:
raise KeyError(f"item #{id} does not have '{lookup_key}' attribute")
return value
def _get_lookup_values_list(self, resource, lookup_key, id):
values_list = []
for item in id:
values_list.append(self._get_lookup_value(resource, lookup_key, id=item))
return values_list
def _get_data_per_page(self, page_number, resource):
page_url = self._resource_url_generator(resource)
response = requests.get(page_url, params={"page": page_number}).json()
results = response.get("results")
if results is None:
raise KeyError(f"{resource} does not have 'results' attribute")
elif len(results) == 0:
raise ValueError(f"'results' list is empty")
else:
new_result = []
for item in results:
new_result.append(self._people_result_substitutor(item))
response["results"] = new_result
return response
def _get_data_all(self, resource):
page_number = 1
next_page = True
while next_page is not None:
response = self._get_data_per_page(page_number, resource)
page_number += 1
next_page = response["next"]
for item in response["results"]:
yield item
def get_planets_detail(self, id, resource="planets/", lookup_key="name"):
return self._get_lookup_value(resource, lookup_key, id)
def get_films_detail(self, id, resource="films/", lookup_key="title"):
return self._get_lookup_values_list(resource, lookup_key, id)
def get_species_detail(self, id, resource="species/", lookup_key="name"):
return self._get_lookup_values_list(resource, lookup_key, id)
def get_vehicles_detail(self, id, resource="vehicles/", lookup_key="name"):
return self._get_lookup_values_list(resource, lookup_key, id)
def get_starships_detail(self, id, resource="starships/", lookup_key="name"):
return self._get_lookup_values_list(resource, lookup_key, id)
def get_people_per_page(self, page_number, resource="people/"):
return self._get_data_per_page(page_number, resource)
def get_people_all(self, resource="people/"):
return self._get_data_all(resource)
class StarWarsAPIDataProcessor:
def __init__(self):
self.api_client = StarWarsAPIClient()
def people_all_data_set(self):
data = self.api_client.get_people_all()
for item in data:
result_dict = item
result_dict["homeworld"] = self.api_client.get_planets_detail(
id=item["homeworld"]
)
result_dict["films"] = self.api_client.get_films_detail(id=item["films"])
result_dict["species"] = self.api_client.get_species_detail(
id=item["species"]
)
result_dict["vehicles"] = self.api_client.get_vehicles_detail(
id=item["vehicles"]
)
result_dict["starships"] = self.api_client.get_starships_detail(
id=item["starships"]
)
yield result_dict
class StarWarsModelProccessor:
def __init__(self):
self.model = StarWarsFilesModel
def _get_file_name(self, id):
try:
object = self.model.objects.get(id=id)
except ObjectDoesNotExist as error:
raise error
return object.file_name
class StarWarsCSVFileProcessor:
def __init__(self):
self.data_processor = StarWarsAPIDataProcessor()
self.model_processor = StarWarsModelProccessor()
self.file_folder_path = "data_parser/csv_files/"
self.csv_columns = [
"name",
"height",
"mass",
"hair_color",
"skin_color",
"eye_color",
"birth_year",
"gender",
"homeworld",
"films",
"species",
"vehicles",
"starships",
"created",
"edited",
"url",
]
def create_csv_file(self, file_name):
data_to_save = self.data_processor.people_all_data_set()
with open(f"data_parser/csv_files/{file_name}", "w") as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=self.csv_columns)
csv_writer.writeheader()
for item in data_to_save:
csv_writer.writerow(item)
def _get_file_path(self, id):
file_name = self.model_processor._get_file_name(id)
path = f"{self.file_folder_path}{file_name}"
is_file = os.path.isfile(path)
if not is_file:
raise FileNotFoundError(f"{file_name} is not found")
return path
def read_from_csv_file(self, id):
path = self._get_file_path(id)
with open(path) as csv_file:
csv_reader = csv.DictReader(csv_file)
for item in csv_reader:
yield item
def csv_file_name():
now = datetime.now().strftime("%m-%d-%y %H:%M:%S")
return f"people_{now}.csv"
| [
"[email protected]"
] | |
7ec8281677357b2d237a5a7a28b6534db419871b | 15f3fb0d73e49e4c29e65718b86b3dab6d659dad | /glyphs.py | ea1781ada5eccf735d645ca1104da3bb87c507da | [
"MIT"
] | permissive | iilei/gonville | 2acb09be68faf809969df3ed8a2877b9f12b5e06 | 177659ad9da1090474da9d507b8c213d63505c9a | refs/heads/master | 2020-09-11T16:56:31.781520 | 2014-10-25T13:45:11 | 2014-10-25T13:45:11 | 222,131,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199,990 | py | #!/usr/bin/env python
import sys
import os
import string
import types
import math
import time
import base64
from curves import *
try:
# New Python 2.6 way of spawning subprocesses
import subprocess
def popen2(command):
p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, close_fds=True)
return (p.stdin, p.stdout)
except ImportError, e:
# Old-style fallback, deprecated in 2.6
from os import popen2
class GlyphContext:
def __init__(self):
self.curves = {}
self.curveid = 0
self.canvas = self # simplest thing :-)
self.extra = self.before = ""
# Scale in units per inch. 1900 happens to be the scale at
# which I drew most of these glyphs; unhelpfully, the real
# scale used by Lilypond (and Mus) is 3600.
self.scale = 1900 # default unless otherwise specified
# Location of the glyph's origin, in output coordinates
# (i.e. the location in the glyph's own coordinates will be
# this, divided by 3600, multiplied by self.scale).
self.origin = 1000, 1000
# Default size of canvas (in glyph coordinates) on which we
# will display the image.
self.canvas_size = 1000, 1000
# Default extra resolution factor over the glyph coordinates
# used for rendering to potrace.
self.trace_res = 4
# Default number of points to interpolate along each curve.
self.curve_res = 1001
def create_line(self, *args, **kw):
return None
def delete(self, *args, **kw):
pass
def makeps(self):
out = "gsave 1 setlinecap\n"
out = out + self.before + "\n"
for cid, curve in self.curves.items():
for it in range(self.curve_res):
t = it / float(self.curve_res-1)
x, y = curve.compute_point(t)
nib = curve.compute_nib(t)
if type(nib) == types.TupleType:
radius, angle, fdist, bdist = nib
c = cos(angle)
s = -sin(angle)
out = out + "newpath %g %g moveto %g %g lineto %g setlinewidth stroke\n" % \
(x+c*fdist, y+s*fdist, x-c*bdist, y-s*bdist, 2*radius)
elif nib != 0:
out = out + "newpath %g %g %g 0 360 arc fill\n" % (x, y, nib)
e = self.extra
if not (type(e) == types.TupleType or type(e) == types.ListType):
e = (e,)
for ee in e:
if type(ee) == types.StringType:
out = out + ee + "\n"
else:
out = out + ee.makeps()
out = out + "\ngrestore\n"
return out
def testdraw(self):
print "gsave clippath flattenpath pathbbox 0 exch translate"
print "1 -1 scale pop pop pop"
print self.makeps()
print "grestore showpage"
# Python doesn't have the ?: operator, bah.
def qc(cond,t,f):
if cond:
return t
else:
return f
# UTF-7 encoding, ad-hocked to do it the way Fontforge wants it done
# (encoding control characters and double quotes, in particular).
def utf7_encode(s):
out = ""
b64 = ""
# Characters we encode directly: RFC 2152's Set D, plus space.
ok = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:? "
for c in s + "\0":
assert ord(c) < 128 # we support ASCII only
if not (c in ok):
b64 = b64 + "\0" + c
else:
if b64 != "":
b64 = base64.b64encode(b64)
b64 = string.replace(b64, "\n", "") # just in case
b64 = string.replace(b64, "=", "")
out = out + "+" + b64 + "-"
b64 = ""
if c != '\0':
out = out + c
return out
# 2x2 matrix multiplication.
def matmul((a,b,c,d),(e,f,g,h)):
return (a*e+b*g, a*f+b*h, c*e+d*g, c*f+d*h)
# 2x2 matrix inversion.
def matinv((a,b,c,d)):
det = a*d-b*c
return (d/det, -b/det, -c/det, a/det)
# Turn a straight line from (0,0) to (1,1) into a quadratic curve
# hitting the same two points but passing through (1/2,1/2-k)
# instead of (1/2,1/2).
def depress(t,k):
return t - t*(1-t)*4*k
# Nib helper function which sets up a chisel nib with one end on the
# curve and the other end at a specified other point.
def ptp_nib(c,x,y,t,theta,x1,y1,nr):
angle = atan2(y-y1, x1-x)
dist = sqrt((y-y1)**2 + (x1-x)**2)
return nr, angle, dist, 0
# Nib helper function which sets up a chisel nib with one end
# following the curve and the other end following a completely
# different curve or chain of curves.
def follow_curveset_nib(c,x,y,t,theta,carray,i,n,r):
tt = (t + i) * len(carray) / n
ti = int(tt)
if ti == len(carray):
ti = ti - 1
x1, y1 = carray[ti].compute_point(tt-ti)
return ptp_nib(c,x,y,t,theta,x1,y1,r)
# Function which draws a blob on the end of a line.
def blob(curve, end, whichside, radius, shrink, nibradius=None):
if nibradius == None:
nibradius = curve.compute_nib(end)
assert type(nibradius) != types.TupleType
x, y = curve.compute_point(end)
dx, dy = curve.compute_direction(end)
if end == 0:
dx, dy = -dx, -dy
dlen = sqrt(dx*dx + dy*dy)
dx, dy = dx/dlen, dy/dlen
if whichside == 'r':
nx, ny = -dy, dx
elif whichside == 'l':
nx, ny = dy, -dx
# We want to draw a near-circle which is essentially a single
# involute going all the way round, so that its radius shrinks
# from 'radius' to (radius-shrink) on the way round. That means
# it needs to unwind once round a circle of circumference
# 'shrink'.
r = shrink/(2*pi)
cx = x + radius*nx - r*dx
cy = y + radius*ny - r*dy
for i in range(4):
if whichside == 'r':
newnx, newny = -ny, nx
elif whichside == 'l':
newnx, newny = ny, -nx
radius = radius - shrink/4.
newx = cx - r*nx - radius*newnx
newy = cy - r*ny - radius*newny
newcurve = CircleInvolute(curve.cont, x, y, dx, dy, newx, newy, nx, ny)
x, y, dx, dy, nx, ny = newx, newy, nx, ny, newnx, newny
newcurve.nib = lambda c,x,y,t,theta: ptp_nib(c,x,y,t,theta,cx,cy,nibradius)
# Construct a PostScript path description which follows the centre
# of some series of curve objects and visits other points in
# between. Used to arrange that one quaver tail doesn't go past
# another.
def clippath(elements):
coords = []
for e in elements:
if type(e) == types.InstanceType:
# Curve.
for it in range(e.cont.curve_res):
t = it / float(e.cont.curve_res-1)
coords.append(e.compute_point(t))
else:
# Plain coordinate pair.
coords.append(e)
for i in range(len(coords)):
if i == 0:
coords[i] = "%g %g moveto" % coords[i]
else:
coords[i] = "%g %g lineto" % coords[i]
coords.append("closepath")
return " ".join(coords)
def update_bbox(bbox, x, y):
x0,y0,x1,y1 = bbox
if x0 == None:
x0,y0,x1,y1 = x,y,x,y
else:
x0 = min(x0, x)
y0 = min(y0, y)
x1 = max(x1, x)
y1 = max(y1, y)
return x0,y0,x1,y1
def bezfn(x0, x1, x2, x3, t):
return x0*(1-t)**3 + 3*x1*(1-t)**2*t + 3*x2*(1-t)*t**2 + x3*t**3
def break_curve(x0,y0, x1,y1, x2,y2, x3,y3):
# We must differentiate the separate cubics for the curve's x and
# y coordinates, find any stationary points in [0,1], and break
# the curve at those points.
#
# A single coordinate of a Bezier curve has the equation
#
# x = x0 (1-t)^3 + 3 x1 (1-t)^2 t + 3 x2 (1-t) t^2 + x3 t^3
# = x0 (1-3t+3t^2-t^3) + 3 x1 (t-2t^2+t^3) + 3 x2 (t^2-t^3) + x3 t^3
# = t^3 (x3-3x2+3x1-x0) + t^2 (3x2-6x1+3x0) + t (3x1-3x0) + x0
#
# and hence its derivative is at^2+bt+c where
# a = 3(x3-3x2+3x1-x0)
# b = 6(x2-2x1+x0)
# c = 3(x1-x0)
breakpts = [(0,0),(1,0)]
for (axis,c0,c1,c2,c3) in ((1,x0,x1,x2,x3),(2,y0,y1,y2,y3)):
a = 3*(c3-3*c2+3*c1-c0)
b = 6*(c2-2*c1+c0)
c = 3*(c1-c0)
#sys.stderr.write("%d: a=%g b=%g c=%g\n" % (axis, a, b, c))
tlist = ()
if a == 0:
if b != 0:
breakpts.append((-c/b,axis))
else:
disc = b*b-4*a*c
if disc >= 0:
rdisc = math.sqrt(disc)
breakpts.append(((-b + rdisc)/(2*a),axis))
breakpts.append(((-b - rdisc)/(2*a),axis))
breakpts.sort()
curves = []
#sys.stderr.write("break %g,%g %g,%g %g,%g %g,%g:\n" % (x0,y0,x1,y1,x2,y2,x3,y3))
#sys.stderr.write(" at %s\n" % repr(breakpts))
for i in range(len(breakpts)-1):
(t0, axis0) = breakpts[i]
(t1, axis1) = breakpts[i+1]
if 0 <= t0 and t0 < t1 and t1 <= 1:
nx0 = bezfn(x0,x1,x2,x3,t0)
ny0 = bezfn(y0,y1,y2,y3,t0)
nx3 = bezfn(x0,x1,x2,x3,t1)
ny3 = bezfn(y0,y1,y2,y3,t1)
nx1 = nx0 + (t1-t0) * ((x3-3*x2+3*x1-x0)*t0**2 + 2*(x2-2*x1+x0)*t0 + (x1-x0))
ny1 = ny0 + (t1-t0) * ((y3-3*y2+3*y1-y0)*t0**2 + 2*(y2-2*y1+y0)*t0 + (y1-y0))
nx2 = nx3 - (t1-t0) * ((x3-3*x2+3*x1-x0)*t1**2 + 2*(x2-2*x1+x0)*t1 + (x1-x0))
ny2 = ny3 - (t1-t0) * ((y3-3*y2+3*y1-y0)*t1**2 + 2*(y2-2*y1+y0)*t1 + (y1-y0))
if axis0 == 1:
nx1 = nx0
elif axis0 == 2:
ny1 = ny0
if axis1 == 1:
nx2 = nx3
elif axis1 == 2:
ny2 = ny3
curves.append((nx0,ny0,nx1,ny1,nx2,ny2,nx3,ny3))
#sys.stderr.write(" got %g,%g %g,%g %g,%g %g,%g\n" % curves[-1])
return curves
# Use potrace to compute the PS path outline of any glyph.
def get_ps_path(char, debug=None):
path = []
xsize, ysize = char.canvas_size
res = char.trace_res
if debug == None:
tee1 = tee2 = ""
else:
tee1 = " | tee z1.%s" % debug
tee2 = " | tee z2.%s" % debug
fin, fout = popen2("gs -sDEVICE=pbm -sOutputFile=- -g%dx%d -r%d -dBATCH -dNOPAUSE -q -%s | potrace -b ps -c -q -W 1in -H 1in -r 4000 -M 1000 -O 1 -o - -%s" % (xsize*res, ysize*res, 72*res, tee1, tee2))
fin.write("0 %d translate 1 -1 scale\n" % ysize)
fin.write(char.makeps())
fin.write("showpage")
fin.close()
# Now we read and parse potrace's PostScript output. This is easy
# enough if we've configured potrace to output as simply as
# possible (which we did) and are also ignoring most of the fiddly
# bits, which we are. I happen to know that potrace (as of v1.8 at
# least) transforms its coordinate system into one based on tenths
# of a pixel measured up and right from the lower left corner, so
# I'm going to ignore the scale and translate commands and just
# skip straight to parsing the actual lines and curves on that
# basis.
psstack = []
pscurrentpoint = None, None
output = "newpath"
scale = 4.0 / char.trace_res
while 1:
s = fout.readline()
if s == "": break
if s[:1] == "%":
continue # comment
ss = string.split(s)
for word in ss:
if word[:1] in "-0123456789":
psstack.append(float(word))
elif word == "gsave":
pass # ignore
elif word == "grestore":
pass # ignore
elif word == "showpage":
pass # ignore
elif word == "scale":
psstack.pop(); psstack.pop() # ignore
elif word == "translate":
psstack.pop(); psstack.pop() # ignore
elif word == "setgray":
psstack.pop() # ignore
elif word == "newpath":
pscurrentpoint = None, None
elif word == "moveto" or word == "rmoveto":
y1 = psstack.pop(); x1 = psstack.pop()
x0, y0 = pscurrentpoint
if word == "moveto":
x1, y1 = x1, y1
else:
assert x0 != None
x1, y1 = x1 + x0, y1 + y0
pscurrentpoint = x1, y1
path.append(('m', x1*scale, y1*scale))
elif word == "lineto" or word == "rlineto":
y1 = psstack.pop(); x1 = psstack.pop()
x0, y0 = pscurrentpoint
if word == "moveto":
x1, y1 = x1, y1
else:
assert x0 != None
x1, y1 = x1 + x0, y1 + y0
pscurrentpoint = x1, y1
path.append(('l', x0*scale, y0*scale, x1*scale, y1*scale))
elif word == "curveto" or word == "rcurveto":
y3 = psstack.pop(); x3 = psstack.pop()
y2 = psstack.pop(); x2 = psstack.pop()
y1 = psstack.pop(); x1 = psstack.pop()
x0, y0 = pscurrentpoint
assert x0 != None
if word == "curveto":
x1, y1 = x1, y1
x2, y2 = x2, y2
x3, y3 = x3, y3
else:
x1, y1 = x1 + x0, y1 + y0
x2, y2 = x2 + x0, y2 + y0
x3, y3 = x3 + x0, y3 + y0
pscurrentpoint = x3, y3
for c in break_curve(x0*scale,y0*scale,x1*scale,y1*scale,\
x2*scale,y2*scale,x3*scale,y3*scale):
path.append(('c',) + c)
elif word == "closepath":
path.append(('cp',))
fout.close()
bbox = None, None, None, None
for c in path:
if c[0] != 'cp':
bbox = update_bbox(bbox, c[-2], c[-1])
return bbox, path
# Wrapper on os.system() that enforces a success return.
def system(cmd):
ret = os.system(cmd)
assert ret == 0
# ----------------------------------------------------------------------
# G clef (treble).
#
# The G clef is drawn in two parts. First, we have a connected
# sequence of curves which draws the main outline of the clef (with
# varying stroke width as detailed below). But at the very top of
# the clef, the two edges of the thick stroke diverge: the outside
# of the curve is pointy, but the inside curves round smoothly. So
# we have a secondary context containing an alternative version of
# the top two curves (c6,c7), used as the inner smooth curve. The
# actual drawing uses the main context, but with an exciting nib
# function for c6 and c7 which moves one end of the nib along the
# main curve while the other tracks the curves in the secondary
# context.
def tmpfn():
# Secondary curves.
tmp = cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 603, 161, -0.33035, -0.943858, 563, 145, -0.943858, 0.33035)
c1 = CircleInvolute(cont, 563, 145, -0.943858, 0.33035, 504.709, 289.062, 0.208758, 0.977967)
c0.weld_to(1, c1, 0)
# End saved data
tc0, tc1 = c0, c1
# Main context.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 528, 654, -0.90286, -0.429934, 569, 507, 1, 0)
c1 = CircleInvolute(cont, 569, 507, 1, 0, 666, 607, 0, 1)
c2 = CircleInvolute(cont, 666, 607, 0, 1, 549, 715, -1, 0)
c3 = CircleInvolute(cont, 549, 715, -1, 0, 437, 470, 0.581238, -0.813733)
c4 = CircleInvolute(cont, 437, 470, 0.581238, -0.813733, 536, 357, 0.731055, -0.682318)
c5 = CircleInvolute(cont, 536, 357, 0.731055, -0.682318, 603, 161, -0.33035, -0.943858)
c6 = CircleInvolute(cont, 603, 161, -0.33035, -0.943858, 559, 90, -0.83205, -0.5547)
c7 = CircleInvolute(cont, 559, 90, -0.77193, 0.635707, 500, 267, 0.211282, 0.977425)
c8 = StraightLine(cont, 500, 267, 605.66, 762)
c9 = ExponentialInvolute(cont, 606, 762, 0.211282, 0.977425, 598, 856, -0.514496, 0.857493)
c10 = CircleInvolute(cont, 598, 856, -0.514496, 0.857493, 446, 865, -0.633238, -0.773957)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c6, 0)
c6.weld_to(1, c7, 0, 1)
c7.weld_to(1, c8, 0)
c8.weld_to(1, c9, 0)
c9.weld_to(1, c10, 0)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: 17+11*cos(theta-c.nibdir(t))
c0.nibdir = c1.nibdir = c2.nibdir = lambda t: 0
phi = c4.compute_theta(1)
c3.nibdir = lambda t: phi*t
c4.nibdir = lambda t: phi
gamma = c5.compute_theta(1) - pi
c5.nibdir = lambda t: phi + (gamma-phi)*t
c5.nib = lambda c,x,y,t,theta: 18+10*cos(theta-c.nibdir(t))
c6.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[tc0,tc1],0,2,8)
c7.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[tc0,tc1],1,2,8)
c8.nib = c9.nib = c10.nib = 8
blob(c10, 1, 'r', 45, 9)
# I drew this one at a silly scale for some reason
cont.scale = 1736
cont.origin = 800, 822
cont.hy = 1000 - (cont.origin[1] * cont.scale / 3600.) # I should probably work this out better
return cont
clefG = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = ".8 dup scale", clefG
cont.scale = clefG.scale
cont.origin = clefG.origin
cont.hy = .8 * clefG.hy
return cont
clefGsmall = tmpfn()
# ----------------------------------------------------------------------
# F clef (bass).
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 534, 761, 0.964764, -0.263117, 783, 479, 0, -1)
c1 = CircleInvolute(cont, 783, 479, 0, -1, 662, 352, -0.999133, -0.0416305)
c2 = CircleInvolute(cont, 662, 352, -0.999133, -0.0416305, 585, 510, 0.993884, 0.110432)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: ((c.nibmax+6) + (c.nibmax-6)*cos(2*(theta-c.nibdir(t))))/2
theta0 = c0.compute_theta(0)
theta1 = c1.compute_theta(0)
c0.nib = lambda c,x,y,t,theta: (34 + (6-34)*abs((theta-theta1)/(theta0-theta1))**1.4)
c1.nibdir = lambda t: theta1
c1.nibmax = 34
c2.nibdir = lambda t: theta1
c2.nibmax = 12
blob(c2, 1, 'l', 47, 3)
# The two dots.
cont.extra = "newpath 857 417 20 0 360 arc fill " + \
"newpath 857 542 20 0 360 arc fill";
# The hot-spot y coordinate is precisely half-way between the
# two dots.
cont.hy = (417+542)/2.0
return cont
clefF = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = ".8 dup scale", clefF
cont.hy = .8 * clefF.hy
return cont
clefFsmall = tmpfn()
# ----------------------------------------------------------------------
# C clef (alto, tenor).
#
# This one took considerable thinking! The sharp point between c3
# and c4 is difficult to achieve, and I eventually did it by having
# the nib narrow to 2 pixels at that point - so it isn't actually
# perfectly sharp, but I can't bring myself to care. So what happens
# is simply that the backward C shape is drawn with a nib function
# that narrows to a near-point, and then turns a corner to go down
# to the centreline via c4. Meanwhile, tc0 defines a cutoff line at
# which the plain circular nib going along c3 suddenly shifts to
# being a point-to-point nib of the same radius with its other end
# at the end of tc0 on the centreline.
#
# (Note that, due to the nontrivial nib width at the point where the
# cutoff occurs, the actual edge that ends up drawn will not run
# precisely along tc0. Again, I don't care.)
def tmpfn():
# Secondary context.
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 698, 474, 744, 398)
c1 = StraightLine(cont, 698, 474, 744, 550)
# End saved data
tc0, tc1 = c0, c1
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 698, 242, 0.707107, -0.707107, 762, 216, 1, 0)
c1 = CircleInvolute(cont, 762, 216, 1, 0, 870, 324, 0, 1)
c2 = CircleInvolute(cont, 870, 324, 0, 1, 773, 436, -1, 0)
c3 = CircleInvolute(cont, 773, 436, -1, 0, 705, 355, -0.0434372, -0.999056)
c4 = CircleInvolute(cont, 705, 355, -0.220261, 0.975441, 635, 474, -0.894427, 0.447214)
c5 = CircleInvolute(cont, 698, 706, 0.707107, 0.707107, 762, 732, 1, 0)
c6 = CircleInvolute(cont, 762, 732, 1, 0, 870, 624, 0, -1)
c7 = CircleInvolute(cont, 870, 624, 0, -1, 773, 512, -1, -0)
c8 = CircleInvolute(cont, 773, 512, -1, -0, 705, 593, -0.0434372, 0.999056)
c9 = CircleInvolute(cont, 705, 593, -0.220261, -0.975441, 635, 474, -0.894427, -0.447214)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0, 1)
c5.weld_to(1, c6, 0)
c6.weld_to(1, c7, 0)
c7.weld_to(1, c8, 0)
c8.weld_to(1, c9, 0, 1)
# End saved data
def mad_cclef_points(c,x,y,t,theta,nibfn,cutoffline):
# Get the ordinary nib width which the normal nib function
# would specify for this point on the curve.
nw = nibfn(c,x,y,t,theta)
# If we're to the left of the cutoff line, do a PTP nib
# pointing to the start point of the cutoff line.
cx0, cy0, cx1, cy1 = cutoffline.inparams
cx = cx0 + (cx1-cx0) * (y-cy0) / (cy1-cy0)
if x < cx:
return ptp_nib(c,x,y,t,theta,cx0,cy0,nw)
else:
return nw
c0.nib = lambda c,x,y,t,theta: 6
c1.nib = c2.nib = lambda c,x,y,t,theta: (lambda x1,x2: ((lambda k: (6, pi, k, 0))(44*((x-min(x1,x2))/abs(x2-x1))**2)))(c.compute_x(0),c.compute_x(1))
c3.nib = lambda c,x,y,t,theta: mad_cclef_points(c,x,y,t,theta,c0.nib,tc0)
cx0,cy0 = tc0.compute_point(0)
r0 = c3.compute_nib(1)[0]
c4.nib = lambda c,x,y,t,theta: ptp_nib(c,x,y,t,theta,cx0,cy0,r0)
c5.nib = lambda c,x,y,t,theta: 6
c6.nib = c7.nib = lambda c,x,y,t,theta: (lambda x1,x2: ((lambda k: (6, pi, k, 0))(44*((x-min(x1,x2))/abs(x2-x1))**2)))(c.compute_x(0),c.compute_x(1))
c8.nib = lambda c,x,y,t,theta: mad_cclef_points(c,x,y,t,theta,c5.nib,tc1)
cx1,cy1 = tc1.compute_point(0)
r1 = c8.compute_nib(1)[0]
c9.nib = lambda c,x,y,t,theta: ptp_nib(c,x,y,t,theta,cx1,cy1,r1)
blob(c0, 0, 'l', 28, 6)
blob(c5, 0, 'r', 28, 6)
cont.extra = \
"/box { newpath 3 index 3 index moveto 3 index 1 index lineto 1 index 1 index lineto 1 index 3 index lineto closepath fill pop pop pop } def " + \
"537 206 601 742 box " + \
"625 206 641 742 box "
return cont
clefC = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = ".8 dup scale", clefC
return cont
clefCsmall = tmpfn()
# ----------------------------------------------------------------------
# Percussion 'clef'.
def tmpfn():
cont = GlyphContext()
cont.extra = \
"newpath 410 368 moveto 410 632 lineto " + \
"470 632 lineto 470 368 lineto closepath fill " + \
"newpath 530 368 moveto 530 632 lineto " + \
"590 632 lineto 590 368 lineto closepath fill "
return cont
clefperc = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = ".8 dup scale", clefperc
return cont
clefpercsmall = tmpfn()
# ----------------------------------------------------------------------
# Tablature 'clef': just the letters "TAB", written vertically in a
# vaguely calligraphic style.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 100, 104, 900, 104)
c1 = StraightLine(cont, 100, 368, 900, 368)
c2 = StraightLine(cont, 100, 632, 900, 632)
c3 = StraightLine(cont, 100, 896, 900, 896)
c4 = CircleInvolute(cont, 354, 153, 0.564684, -0.825307, 443, 128, 0.964764, 0.263117)
c5 = CircleInvolute(cont, 443, 128, 0.964764, 0.263117, 555, 118, 0.83205, -0.5547)
c6 = CircleInvolute(cont, 463, 136, 0.110432, 0.993884, 434, 313, -0.571064, 0.820905)
c7 = CircleInvolute(cont, 434, 313, -0.571064, 0.820905, 376, 334, -0.83205, -0.5547)
c8 = CircleInvolute(cont, 333, 603, 0.98387, 0.178885, 486, 384, 0.0416305, -0.999133)
c9 = CircleInvolute(cont, 486, 384, 0.110432, 0.993884, 527, 572, 0.398726, 0.91707)
c10 = CircleInvolute(cont, 527, 572, 0.398726, 0.91707, 572, 605, 0.963518, -0.267644)
c11 = CircleInvolute(cont, 441, 541, 0.724999, 0.688749, 482, 555, 0.977176, -0.21243)
c12 = CircleInvolute(cont, 355, 698, 0.5547, -0.83205, 464, 648, 0.998168, -0.0604951)
c13 = CircleInvolute(cont, 464, 648, 0.998168, -0.0604951, 551, 700, 0, 1)
c14 = CircleInvolute(cont, 551, 700, 0, 1, 475, 756, -0.995634, 0.0933407)
c15 = CircleInvolute(cont, 475, 756, 0.98995, 0.141421, 555, 822, 0.0416305, 0.999133)
c16 = CircleInvolute(cont, 555, 822, 0.0416305, 0.999133, 427, 856, -0.815507, -0.578747)
c17 = CircleInvolute(cont, 446, 667, 0.119145, 0.992877, 417, 815, -0.447214, 0.894427)
c18 = CircleInvolute(cont, 417, 815, -0.447214, 0.894427, 342, 858, -0.876812, -0.480833)
c4.weld_to(1, c5, 0)
c6.weld_to(1, c7, 0)
c8.weld_to(1, c9, 0, 1)
c9.weld_to(1, c10, 0)
c12.weld_to(1, c13, 0)
c13.weld_to(1, c14, 0)
c14.weld_to(1, c15, 0, 1)
c15.weld_to(1, c16, 0)
c17.weld_to(1, c18, 0)
# End saved data
# Stave lines as guides used when I was drawing it
c0.nib = c1.nib = c2.nib = c3.nib = 0
cont.default_nib = lambda c,x,y,t,theta: 12+10*sin(theta)**2
# Vertical of T needs not to overlap top of T
c6.nib = lambda c,x,y,t,theta: (12, theta+pi/2, 10*sin(theta)**2, 10*sin(theta)**2)
# Special nib for crossbar of A
c11.nib = lambda c,x,y,t,theta: 12-6*t
cont.hy = (c1.compute_y(0) + c2.compute_y(0)) / 2.0
return cont
clefTAB = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = ".8 dup scale", clefTAB
cont.hy = .8 * clefTAB.hy
return cont
clefTABsmall = tmpfn()
# ----------------------------------------------------------------------
# Quaver tails.
# Vertical space between multiple tails, which after some
# experimentation I decided should be different between the up- and
# down-pointing stems.
#
# For down stems (so that the tails have to fit under the note
# head), it's about 80% of the spacing between adjacent stave lines
# (which is, in this coordinate system, 250 * 1900/3600 = 132 minus
# 1/18. For up stems, it's a bit more than that: closer to 87%.
quavertaildispdn = 105
quavertaildispup = 115
def clipup(tail):
# Clipped version of an up-quaver-tail designed to fit above
# another identical tail and stop where it crosses the latter.
cont = GlyphContext()
clip = clippath([tail.c0, tail.c1, (900,1900), (900,100), (100,100), (100,1900)])
cont.extra = "gsave 0 %g translate newpath" % quavertaildispup, clip, \
"clip 0 -%g translate" % quavertaildispup, tail, "grestore"
cont.ox = tail.ox
cont.oy = tail.oy
return cont
def clipdn(tail):
# Clipped version of a down-quaver-tail designed to fit below
# another identical tail and stop where it crosses the latter.
cont = GlyphContext()
clip = clippath([tail.c0, tail.c1, (900,100), (900,1900), (100,1900), (100,900)])
cont.extra = "gsave 0 -%g translate newpath" % quavertaildispdn, clip, \
"clip 0 %g translate" % quavertaildispdn, tail, "grestore"
cont.ox = tailquaverdn.ox
cont.oy = tailquaverdn.oy
return cont
def multiup(n, tail):
# Up-pointing multitail.
short = clipup(tail)
cont = GlyphContext()
# To make room for the five-tailed quasihemidemisemiquaver, we
# translate downwards a bit. 95 (== 5*19) in glyph coordinates
# equals 128 (== 5*36) in output coordinates.
cont.extra = ("0 95 translate", tail,) + ("0 -%g translate" % quavertaildispup, short) * (n-1)
cont.ox = tail.ox
cont.oy = tail.oy - quavertaildispup*(n-1) + 95
cont.origin = tail.origin
cont.origin = (cont.origin[0], cont.origin[1] + quavertaildispup*(n-1)*3600./cont.scale - 180)
return cont
def multidn(n, tail):
# Down-pointing multitail.
short = clipdn(tail)
cont = GlyphContext()
cont.extra = (tail,) + ("0 %g translate" % quavertaildispdn, short) * (n-1)
cont.ox = tail.ox
cont.oy = tail.oy + quavertaildispdn*(n-1)
cont.origin = tail.origin
cont.origin = (cont.origin[0], cont.origin[1] - quavertaildispdn*(n-1)*3600./cont.scale)
return cont
def tmpfn():
# Full-size tail for a quaver with an up-pointing stem.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 567, 0.948683, 0.316228, 611, 607, 0.7282, 0.685365)
c1 = CircleInvolute(cont, 611, 607, 0.7282, 0.685365, 606, 840, -0.661622, 0.749838)
c2 = CircleInvolute(cont, 535, 465, 0.233373, 0.972387, 605, 581, 0.73994, 0.672673)
c3 = CircleInvolute(cont, 605, 581, 0.73994, 0.672673, 606, 840, -0.661622, 0.749838)
c4 = StraightLine(cont, 660, 875, 660, 506)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) - c2.compute_nib(0)[0] - 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 9)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
tailquaverup = tmpfn()
def tmpfn():
# Single tail for an up-pointing semiquaver.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 556, 1, 0, 602, 571, 0.825307, 0.564684)
c1 = CircleInvolute(cont, 602, 571, 0.825307, 0.564684, 617, 779, -0.661622, 0.749838)
c2 = CircleInvolute(cont, 535, 465, 0.371391, 0.928477, 613, 566, 0.732793, 0.680451)
c3 = CircleInvolute(cont, 613, 566, 0.732793, 0.680451, 617, 779, -0.661622, 0.749838)
c4 = StraightLine(cont, 660, 783.16, 660, 496.816)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
# Make sure the tail length matches what it should be.
assert round(c1.compute_y(1) - (840 + 54 - quavertaildispup*1)) == 0
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) - c2.compute_nib(0)[0] - 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 9)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
tailsemiup = multiup(2, tmpfn())
def tmpfn():
# Single tail for an up-pointing demisemiquaver.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 555, 0.998868, -0.0475651, 586, 561, 0.913812, 0.406138)
c1 = CircleInvolute(cont, 586, 561, 0.913812, 0.406138, 621, 800, -0.536875, 0.843662)
c2 = CircleInvolute(cont, 535, 465, 0.416655, 0.909065, 608, 555, 0.734803, 0.67828)
c3 = CircleInvolute(cont, 608, 555, 0.734803, 0.67828, 621, 800, -0.536875, 0.843662)
c4 = StraightLine(cont, 660, 835.64, 660, 502.064)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
# Make sure the tail length matches what it should be.
assert round(c1.compute_y(1) - (840 + 58 - quavertaildispup*2 + 132)) == 0
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) - c2.compute_nib(0)[0] - 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 9)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
taildemiup = multiup(3, tmpfn())
def tmpfn():
# Single tail for an up-pointing hemidemisemiquaver.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 555, 0.853282, -0.52145, 577, 552, 0.894427, 0.447214)
c1 = CircleInvolute(cont, 577, 552, 0.894427, 0.447214, 640, 753, -0.447214, 0.894427)
c2 = CircleInvolute(cont, 535, 465, 0.28, 0.96, 592, 545, 0.77193, 0.635707)
c3 = CircleInvolute(cont, 592, 545, 0.77193, 0.635707, 640, 753, -0.447214, 0.894427)
c4 = StraightLine(cont, 660, 815.96, 660, 500.096)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
# Make sure the tail length matches what it should be.
assert round(c1.compute_y(1) - (840 + 60 - quavertaildispup*3 + 132*1.5)) == 0
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) - c2.compute_nib(0)[0] - 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 9)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
tailhemiup = multiup(4, tmpfn())
def tmpfn():
# Single tail for an up-pointing quasihemidemisemiquaver.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 546, 0.996546, 0.0830455, 607, 575, 0.707107, 0.707107)
c1 = CircleInvolute(cont, 607, 575, 0.707107, 0.707107, 629, 772, -0.611448, 0.791285)
c2 = CircleInvolute(cont, 535, 465, 0.371391, 0.928477, 595, 544, 0.707107, 0.707107)
c3 = CircleInvolute(cont, 595, 544, 0.707107, 0.707107, 629, 772, -0.611448, 0.791285)
c4 = StraightLine(cont, 660, 868.44, 660, 505.344)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
# Make sure the tail length matches what it should be.
assert round(c1.compute_y(1) - (840 + 62 - quavertaildispup*4 + 132*2.5)) == 0
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) - c2.compute_nib(0)[0] - 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 9)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
tailquasiup = multiup(5, tmpfn())
def tmpfn():
# Full-size tail for a quaver with a down-pointing stem.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 363, 0.999201, -0.039968, 585, 354, 0.948683, -0.316228)
c1 = CircleInvolute(cont, 585, 354, 0.948683, -0.316228, 635, 90, -0.563337, -0.826227)
c2 = CircleInvolute(cont, 535, 465, 0.338427, -0.940993, 627, 349, 0.742268, -0.670103)
c3 = CircleInvolute(cont, 627, 349, 0.742268, -0.670103, 635, 90, -0.563337, -0.826227)
c4 = StraightLine(cont, 680, 55, 680, 424)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) + c2.compute_nib(0)[0] + 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 8)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
tailquaverdn = tmpfn()
def tmpfn():
# Single tail for a down-pointing semiquaver.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 378, 0.99083, -0.135113, 611, 356, 0.868243, -0.496139)
c1 = CircleInvolute(cont, 611, 356, 0.868243, -0.496139, 663, 195, -0.447214, -0.894427)
c2 = CircleInvolute(cont, 535, 465, 0.467531, -0.883977, 620, 363, 0.768221, -0.640184)
c3 = CircleInvolute(cont, 620, 363, 0.768221, -0.640184, 663, 195, -0.447214, -0.894427)
c4 = StraightLine(cont, 680, 186.2, 680, 437.12)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
# Make sure the tail length matches what it should be.
assert round(c1.compute_y(1) - (90 + quavertaildispdn*1)) == 0
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) + c2.compute_nib(0)[0] + 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 8)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
tailsemidn = multidn(2, tmpfn())
def tmpfn():
# Single tail for a down-pointing demisemiquaver.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 380, 0.9916, -0.129339, 615, 354, 0.861934, -0.50702)
c1 = CircleInvolute(cont, 615, 354, 0.861934, -0.50702, 653, 168, -0.536875, -0.843662)
c2 = CircleInvolute(cont, 535, 465, 0.450869, -0.89259, 616, 376, 0.789352, -0.613941)
c3 = CircleInvolute(cont, 616, 376, 0.789352, -0.613941, 653, 168, -0.536875, -0.843662)
c4 = StraightLine(cont, 680, 173.08, 680, 435.808)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
# Make sure the tail length matches what it should be.
assert round(c1.compute_y(1) - (90 + quavertaildispdn*2 - 132)) == 0
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) + c2.compute_nib(0)[0] + 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 8)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
taildemidn = multidn(3, tmpfn())
def tmpfn():
# Single tail for a down-pointing hemidemisemiquaver.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 534, 382, 0.999133, -0.0416305, 605, 369, 0.921635, -0.388057)
c1 = CircleInvolute(cont, 605, 369, 0.921635, -0.388057, 646, 207, -0.784883, -0.619644)
c2 = CircleInvolute(cont, 535, 465, 0.338719, -0.940888, 630, 363, 0.825307, -0.564684)
c3 = CircleInvolute(cont, 630, 363, 0.825307, -0.564684, 646, 207, -0.784883, -0.619644)
c4 = StraightLine(cont, 680, 232.12, 680, 441.712)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
# Make sure the tail length matches what it should be.
assert round(c1.compute_y(1) - (90 + quavertaildispdn*3 - 132*1.5)) == 0
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) + c2.compute_nib(0)[0] + 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 8)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
tailhemidn = multidn(4, tmpfn())
def tmpfn():
# Single tail for a down-pointing quasihemidemisemiquaver.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 384, 0.982007, -0.188847, 608, 357, 0.885547, -0.464549)
c1 = CircleInvolute(cont, 608, 357, 0.885547, -0.464549, 653, 180, -0.606043, -0.795432)
c2 = CircleInvolute(cont, 535, 465, 0.338719, -0.940888, 633, 348, 0.768221, -0.640184)
c3 = CircleInvolute(cont, 633, 348, 0.768221, -0.640184, 653, 180, -0.606043, -0.795432)
c4 = StraightLine(cont, 680, 219, 680, 440.4)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c3, 1, 1)
c2.weld_to(1, c3, 0)
# End saved data
# Make sure the tail length matches what it should be.
assert round(c1.compute_y(1) - (90 + quavertaildispdn*4 - 132*2.5)) == 0
c4.nib = 0 # guide line to get the width the same across all versions
c0.nib = c1.nib = 0
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],0,2,8)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c0,c1],1,2,8)
cont.c0 = c0 # for tailshortdn
cont.c1 = c1 # for tailshortdn
cont.oy = c2.compute_y(0) + c2.compute_nib(0)[0] + 2
cx = c2.compute_x(0) + c2.compute_nib(0)[0]
cont.ox = cx
cont.extra = "gsave newpath %g 0 moveto 0 1000 rlineto -100 0 rlineto 0 -1000 rlineto closepath 1 setgray fill grestore" % (cx - 8)
cont.origin = cx * 3600. / cont.scale - 12, (1000-cont.oy) * 3600. / cont.scale
return cont
tailquasidn = multidn(5, tmpfn())
# ----------------------------------------------------------------------
# Minim note head.
#
# A minim head has an elliptical outline, and then a long thin
# elliptical hole in the middle.
def tmpfn():
cont = GlyphContext()
# Parameters: a unit vector giving the direction of the ellipse's
# long axis, the squash ratio (short axis divided by long).
angle = 37
sq = 0.35
# The long and short axes as unit vectors.
lx, ly = cos(angle*(pi/180)), -sin(angle*(pi/180))
sx, sy = -sin(angle*(pi/180)), -cos(angle*(pi/180))
# We want to find an ellipse, centred on the origin, which is large
# enough to be just tangent to the outline ellipse. To do this, we
# transform the coordinate system so that the new ellipse is
# circular, then construct the image of the outline ellipse and find
# its closest approach to the origin. The circle of that radius,
# transformed back again, is the ellipse we want.
#
# Our original ellipse, discounting translation, is the unit circle
# fed through a 2x2 matrix transform. We have a second 2x2 matrix
# transform here, so we multiply the two to construct the matrix
# which transforms the coordinate system in which the note outline
# is a circle into the one in which the hole in the middle is a
# circle.
mat1 = (1,-.3,0,1) # the shear matrix from the head outline
mat2 = (76,0,0,67) # the scaling matrix from the head outline
mat3 = (lx,ly,sx,sy) # rotate so that our desired axes become i,j
mat4 = (1,0,0,1/sq) # unsquash in the s-axis
imat = matmul(matmul(mat4,mat3), matmul(mat2,mat1))
mat = matinv(imat)
# The equation of the outline ellipse in the new coordinate system
# is given by transforming (x,y) by the above matrix and then
# setting the sum of the squares of the transformed coordinates
# equal to 1. In other words, we have
#
# (x y) (a c) (a b) (x) = 1
# (b d) (c d) (y)
#
# => (x y) (a^2+c^2 ab+cd ) (x) = 1
# ( ba+dc b^2+d^2) (y)
#
# and then the matrix in the middle is symmetric, which means we can
# decompose it into an orthogonal eigenvector matrix and a diagonal
# eigenvalue matrix, giving us
#
# (x y) (p q) (u 0) (p r) (x) = 1
# (r s) (0 v) (q s) (y)
#
# Now the eigenvector matrix rotates our coordinate system into one
# which has the basis vectors aligned with the axes of the ellipse,
# so in that coordinate system the equation of the ellipse is merely
# u x^2 + v y^2 = 1. Thus u and v are the squared reciprocals of the
# lengths of our major and minor axes, so sqrt(min(1/u,1/v)) is the
# closest approach to the origin of the ellipse in question.
#
# (We don't even bother calculating the eigenvector matrix, though
# we could if we wanted to.)
matO = (mat[0]*mat[0]+mat[2]*mat[2], mat[1]*mat[0]+mat[3]*mat[2],
mat[0]*mat[1]+mat[2]*mat[3], mat[1]*mat[1]+mat[3]*mat[3])
# Characteristic equation of a 2x2 matrix is
# (m0-lambda)(m3-lambda) - m1*m2 = 0
# => lambda^2 - (m0+m3)lambda + (m0*m3-m1*m2) = 0
# So the eigenvalues are the solutions of that quadratic, i.e.
# (m0+m3 +- sqrt((m0-m3)^2+4*m1*m2)) / 2
u = (matO[0] + matO[3] + sqrt((matO[0]-matO[3])**2 + 4*matO[1]*matO[2]))/2
v = (matO[0] + matO[3] - sqrt((matO[0]-matO[3])**2 + 4*matO[1]*matO[2]))/2
r = sqrt(min(1/u, 1/v)) * 0.999 # small hedge against rounding glitches
# And now we can draw our ellipse: it's the circle about the origin
# of radius r, squashed in the y-direction by sq, rotated by angle.
cont.extra = \
"gsave 527 472 translate newpath " + \
"matrix currentmatrix 76 67 scale [1 0 -.3 1 0 0] concat 1 0 moveto 0 0 1 0 360 arc closepath setmatrix " + \
"matrix currentmatrix -%g rotate 1 %g scale %g 0 moveto 0 0 %g 360 0 arcn closepath setmatrix " % (angle,sq,r,r) + \
"gsave fill grestore 8 setlinewidth stroke grestore"
# Incidentally, another useful datum output from all of this is
# that we can compute the exact point on the outer ellipse at
# which the tangent is vertical, which is where we'll have to
# put a note stem. This is given by reconstituting the elements
# of the outer ellipse's transformation matrix into a quadratic
# in x,y of the form
#
# ax^2 + bxy + cy^2 = 1
#
# From this we can differentiate with respect to y to get
#
# 2ax dx/dy + bx + by dx/dy + 2cy = 0
#
# and then solve for dx/dy to give
#
# dx/dy = -(bx + 2cy) / (2ax + by)
#
# which is zero iff its denominator is zero, i.e. y = -bx/2c.
# Substituting that back into the original equation gives
#
# ax^2 + bx(-bx/2c) + c(bx/2c)^2 = 1
# => ax^2 - (b^2/2c)x^2 + (b^2/4c)x^2 = 1
# => (a - b^2/4c)x^2 = 1
# => x = 1/sqrt(a - b^2/4c)
#
# Substituting that into the expression for y and rearranging a
# bit gives us
#
# x = (2*sqrt(c)) / sqrt(4ac-b^2)
# y = (-b/sqrt(c)) / sqrt(4ac-b^2)
#
# (Of course, that's on the outer elliptical _path_, which isn't
# the very outside of the note shape due to the stroke width; so
# another (currentlinewidth/2) pixels are needed to get to
# there. But the y-position is correct.)
matK = matinv(matmul(mat2,mat1))
matL = (matK[0]*matK[0]+matK[2]*matK[2], matK[1]*matK[0]+matK[3]*matK[2],
matK[0]*matK[1]+matK[2]*matK[3], matK[1]*matK[1]+matK[3]*matK[3])
a, b, c = matL[0], matL[1]+matL[2], matL[3]
denom = sqrt(4*a*c-b*b)
#sys.stderr.write("%.17f, %.17f\n" % (2*sqrt(c)/denom, -b/sqrt(c)/denom))
cont.ay = 472 - b/sqrt(c)/denom
return cont
headminim = tmpfn()
# ----------------------------------------------------------------------
# Filled note head, for crotchet/quaver/semiquaver/etc.
#
# This is identical to the minim head but without the inner hole.
def tmpfn():
cont = GlyphContext()
cont.extra = \
"gsave 527 472 translate newpath " + \
"matrix currentmatrix 76 67 scale [1 0 -.3 1 0 0] concat 1 0 moveto 0 0 1 0 360 arc closepath setmatrix " + \
"gsave fill grestore 8 setlinewidth stroke grestore"
cont.ay = headminim.ay
return cont
headcrotchet = tmpfn()
# ----------------------------------------------------------------------
# Semibreve head. This is another nested pair of ellipses. The outer
# ellipse is unskewed and half again as wide as the crotchet/minim
# head; the inner one is at a totally different angle.
def tmpfn():
cont = GlyphContext()
angle = 120
sq = 0.75
# Everything below is repaated from the minim head.
lx, ly = cos(angle*(pi/180)), -sin(angle*(pi/180))
sx, sy = -sin(angle*(pi/180)), -cos(angle*(pi/180))
mat2 = (116,0,0,67) # the scaling matrix from the head outline
mat3 = (lx,ly,sx,sy) # rotate so that our desired axes become i,j
mat4 = (1,0,0,1/sq) # unsquash in the s-axis
imat = matmul(matmul(mat4,mat3), mat2)
mat = matinv(imat)
mat2 = (mat[0]*mat[0]+mat[2]*mat[2], mat[1]*mat[0]+mat[3]*mat[2],
mat[0]*mat[1]+mat[2]*mat[3], mat[1]*mat[1]+mat[3]*mat[3])
u = (mat2[0] + mat2[3] + sqrt((mat2[0]-mat2[3])**2 + 4*mat2[1]*mat2[2]))/2
v = (mat2[0] + mat2[3] - sqrt((mat2[0]-mat2[3])**2 + 4*mat2[1]*mat2[2]))/2
r = sqrt(min(1/u, 1/v))
cont.extra = \
"gsave 527 472 translate newpath " + \
"matrix currentmatrix 116 67 scale 1 0 moveto 0 0 1 0 360 arc closepath setmatrix " + \
"matrix currentmatrix -%g rotate 1 %g scale %g 0 moveto 0 0 %g 360 0 arcn closepath setmatrix " % (angle,sq,r,r) + \
"gsave fill grestore 8 setlinewidth stroke grestore"
return cont
semibreve = tmpfn()
# A breve is just a semibreve with bars down the sides.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 398, 390, 398, 554)
c1 = StraightLine(cont, 656, 390, 656, 554)
c2 = StraightLine(cont, 362, 390, 362, 554)
c3 = StraightLine(cont, 692, 390, 692, 554)
# End saved data
cont.default_nib = 10
cont.extra = semibreve
return cont
breve = tmpfn()
# ----------------------------------------------------------------------
# Shaped note heads used for drum and other notation.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 411, 472, 0.970536, 0.240956, 527, 539, 0.633646, 0.773623)
c1 = CircleInvolute(cont, 527, 539, 0.633646, -0.773623, 643, 472, 0.970536, -0.240956)
c2 = CircleInvolute(cont, 643, 472, -0.970536, -0.240956, 527, 405, -0.633646, -0.773623)
c3 = CircleInvolute(cont, 527, 405, -0.633646, 0.773623, 411, 472, -0.970536, 0.240956)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c3, 1, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: (6, 0, (527-x)/3, 0)
return cont
diamondsemi = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 448, 472, 0.939517, 0.342501, 527, 539, 0.487147, 0.87332)
c1 = CircleInvolute(cont, 527, 539, 0.487147, -0.87332, 606, 472, 0.939517, -0.342501)
c2 = CircleInvolute(cont, 606, 472, -0.939517, -0.342501, 527, 405, -0.487147, -0.87332)
c3 = CircleInvolute(cont, 527, 405, -0.487147, 0.87332, 448, 472, -0.939517, 0.342501)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c3, 1, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
# End saved data
cont.default_nib = 6
c1.nib = lambda c,x,y,t,theta: (6, 127*pi/180, min(12, 300*t, 100*(1-t)), 0)
c3.nib = lambda c,x,y,t,theta: (6, -53*pi/180, min(12, 300*t, 100*(1-t)), 0)
return cont
diamondminim = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 448, 472, 0.939517, 0.342501, 527, 539, 0.487147, 0.87332)
c1 = CircleInvolute(cont, 527, 539, 0.487147, -0.87332, 606, 472, 0.939517, -0.342501)
c2 = CircleInvolute(cont, 606, 472, -0.939517, -0.342501, 527, 405, -0.487147, -0.87332)
c3 = CircleInvolute(cont, 527, 405, -0.487147, 0.87332, 448, 472, -0.939517, 0.342501)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c3, 1, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
# End saved data
# Fill the diamond.
cont.default_nib = lambda c,x,y,t,theta: ptp_nib(c,x,y,t,theta,527,472,6)
return cont
diamondcrotchet = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 411, 550, 0.944497, -0.328521, 643, 550, 0.944497, 0.328521)
c1 = CircleInvolute(cont, 643, 550, -0.784883, -0.619644, 527, 405, -0.519947, -0.854199)
c2 = CircleInvolute(cont, 527, 405, -0.519947, 0.854199, 411, 550, -0.784883, 0.619644)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c2, 1, 1)
c1.weld_to(1, c2, 0, 1)
# End saved data
c0.nib = 6
angle = abs(1/tan(c0.compute_theta(0) + pi/30))
ybase = c0.compute_y(0)
c1.nib = lambda c,x,y,t,theta: (6, 0, 0, min((x-527)/3, (ybase-y)*angle))
c2.nib = lambda c,x,y,t,theta: (6, 0, min((527-x)/3, (ybase-y)*angle), 0)
return cont
trianglesemi = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 448, 550, 0.890571, -0.454844, 606, 550, 0.890571, 0.454844)
c1 = CircleInvolute(cont, 606, 550, -0.65319, -0.757194, 527, 405, -0.382943, -0.923772)
c2 = CircleInvolute(cont, 527, 405, -0.382943, 0.923772, 448, 550, -0.65319, 0.757194)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c2, 1, 1)
c1.weld_to(1, c2, 0, 1)
# End saved data
c1.nib = 6
angle = 127*pi/180
vx, vy = cos(angle), -sin(angle)
vdist = lambda x1,y1,x2,y2: abs(vx*(x1-x2) + vy*(y1-y2))
x0, y0 = c0.compute_point(0)
c0.nib = lambda c,x,y,t,theta: (6, angle, vdist(x0,y0,x,y)/3, 0)
c2.nib = lambda c,x,y,t,theta: (6, angle, 0, min(vdist(x0,y0,x,y)/3, 350*t))
cont.ay = c0.compute_y(0)
cont.iy = 2*472 - cont.ay
return cont
triangleminim = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 448, 550, 0.890571, -0.454844, 606, 550, 0.890571, 0.454844)
c1 = CircleInvolute(cont, 606, 550, -0.65319, -0.757194, 527, 405, -0.382943, -0.923772)
c2 = CircleInvolute(cont, 527, 405, -0.382943, 0.923772, 448, 550, -0.65319, 0.757194)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c2, 1, 1)
c1.weld_to(1, c2, 0, 1)
# End saved data
# Fill the triangle.
cont.default_nib = lambda c,x,y,t,theta: ptp_nib(c,x,y,t,theta,527,472,6)
cont.ay = c0.compute_y(0)
cont.iy = 2*472 - cont.ay
return cont
trianglecrotchet = tmpfn()
def tmpfn():
cont = GlyphContext()
outerw = 9
innerr = 12
outerr = innerr + 2*outerw
ax, ay = 116 - outerr, 70 - outerr
cont.extra = ["gsave 527 472 translate",
"newpath %g %g 1 index neg 1 index neg moveto 1 index 1 index lineto 1 index neg 1 index moveto neg lineto" % (ax,ay),
"gsave %g setlinewidth stroke grestore %g setlinewidth 1 setgray stroke" % (2*outerr, 2*innerr),
"grestore"]
cont.ay = ay
return cont
crosssemi = tmpfn()
def tmpfn():
cont = GlyphContext()
outerw = 9
innerr = 10
outerr = innerr + 2*outerw
ax, ay = 79 - outerr, 70 - outerr
cont.extra = ["gsave 527 472 translate",
"newpath %g %g 1 index neg 1 index neg moveto 1 index 1 index lineto 1 index neg 1 index moveto neg lineto" % (ax,ay),
"gsave %g setlinewidth stroke grestore %g setlinewidth 1 setgray stroke" % (2*outerr, 2*innerr),
"grestore"]
cont.ay = 472 - ay
return cont
crossminim = tmpfn()
def tmpfn():
cont = GlyphContext()
r = 12
ax, ay = 79 - r, 70 - r
cont.extra = ["gsave 527 472 translate",
"newpath %g %g 1 index neg 1 index neg moveto 1 index 1 index lineto 1 index neg 1 index moveto neg lineto" % (ax,ay),
"%g setlinewidth stroke" % (2*r),
"grestore"]
cont.ay = 472 - ay
return cont
crosscrotchet = tmpfn()
def tmpfn():
cont = GlyphContext()
r = 12
ax, ay = 70 - r, 70 - r
cont.extra = ["gsave 527 472 translate",
"newpath %g %g 1 index neg 1 index neg moveto 1 index 1 index lineto 1 index neg 1 index moveto neg lineto" % (ax,ay),
"%g dup 0 moveto 0 exch 0 exch 0 360 arc" % (sqrt(ax*ax+ay*ay)),
"%g setlinewidth stroke" % (2*r),
"grestore"]
return cont
crosscircle = tmpfn()
def tmpfn():
cont = GlyphContext()
r = 12
xouter = 116 - r
xwidth = 160
ay = 130 - r
cont.extra = ["gsave 527 472 translate",
"newpath %g %g moveto %g %g lineto %g %g lineto %g %g lineto closepath" % (xouter,-ay,xouter-xwidth,-ay,-xouter,ay,-xouter+xwidth,ay),
"%g setlinewidth 1 setlinejoin stroke" % (2*r),
"grestore"]
cont.ay = 472 - ay
return cont
slashsemi = tmpfn()
def tmpfn():
cont = GlyphContext()
r = 12
xouter = 76 - r
xwidth = 80
ay = 130 - r
cont.extra = ["gsave 527 472 translate",
"newpath %g %g moveto %g %g lineto %g %g lineto %g %g lineto closepath" % (xouter,-ay,xouter-xwidth,-ay,-xouter,ay,-xouter+xwidth,ay),
"%g setlinewidth 1 setlinejoin stroke" % (2*r),
"grestore"]
cont.ay = 472 - ay
return cont
slashminim = tmpfn()
def tmpfn():
cont = GlyphContext()
r = 12
xouter = 56 - r
xwidth = 40
ay = 130 - r
cont.extra = ["gsave 527 472 translate",
"newpath %g %g moveto %g %g lineto %g %g lineto %g %g lineto closepath" % (xouter,-ay,xouter-xwidth,-ay,-xouter,ay,-xouter+xwidth,ay),
"gsave %g setlinewidth 1 setlinejoin stroke grestore fill" % (2*r),
"grestore"]
cont.ay = 472 - ay
return cont
slashcrotchet = tmpfn()
# ----------------------------------------------------------------------
# Trill sign. There seem to be two standard-ish designs for this:
# one flowery one in which there are loops all over the place as if
# it's been drawn in several strokes by somebody who didn't bother
# taking the pen off the paper between them (e.g. Euterpe,
# Lilypond), and one simpler one that just looks like 'tr' written
# in an italic font and squashed together. Mine follows the latter
# model, but has a more chisel-nib-calligraphy look than other
# examples I've seen. (I drew it like that as an experiment and
# found I liked it more than the one I was comparing to!)
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 497, 274, 452, 425)
c1 = CircleInvolute(cont, 452, 425, -0.285601, 0.958349, 488, 456, 0.860055, -0.510202)
c2 = StraightLine(cont, 488, 456, 547, 421)
c3 = StraightLine(cont, 413, 344, 488, 343)
c4 = CircleInvolute(cont, 488, 343, 0.999911, -0.0133321, 559, 335, 0.974222, -0.225592)
c5 = CircleInvolute(cont, 559, 335, 0.974222, -0.225592, 573, 345, -0.290482, 0.956881)
c6 = StraightLine(cont, 573, 345, 539, 457)
c7 = CircleInvolute(cont, 561.107, 382, 0.274721, -0.961524, 621, 332, 1, 0)
c8 = CircleInvolute(cont, 621, 332, 1, 0, 636, 356, -0.242536, 0.970142)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c6, 0)
c7.weld_to(1, c8, 0)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: (3, c.nibdir(t), 17, 17)
k2 = c4.compute_theta(1)
c3.nibdir = c4.nibdir = c5.nibdir = c6.nibdir = lambda t: k2
c7.nibdir = c8.nibdir = c3.nibdir
topy = c0.compute_y(0)
c0.nib = lambda c,x,y,t,theta: qc(t>0.5, 20, (3, 0, 17, min(17,-17+(y-topy)*1.2)))
theta0 = c0.compute_theta(0)
theta2 = c2.compute_theta(0)
c1.nib = c2.nib = lambda c,x,y,t,theta: 14+6*cos(pi*(theta-theta0)/(theta2-theta0))
return cont
trill = tmpfn()
# ----------------------------------------------------------------------
# Crotchet rest. The top section is done by curve-following, drawing
# the two sides of the stroke independently; the bottom section is a
# single curve on the right, with the nib width varying in such a
# way as to present a nice curve on the left.
def tmpfn():
cont = GlyphContext()
# Secondary curve set.
# Saved data from gui.py
c0 = StraightLine(cont, 502, 276, 589, 352)
c1 = CircleInvolute(cont, 589, 352, -0.585491, 0.810679, 592, 535, 0.74783, 0.66389)
c0.weld_to(1, c1, 0, 1)
# End saved data
tc0, tc1 = c0, c1
cont = GlyphContext()
# Primary curve set.
# Saved data from gui.py
c0 = CircleInvolute(cont, 502, 276, 0.753113, 0.657892, 494, 448, -0.613941, 0.789352)
c1 = StraightLine(cont, 494, 448, 592, 535)
c2 = CircleInvolute(cont, 592, 535, -0.952424, -0.304776, 524, 569, -0.378633, 0.925547)
c3 = CircleInvolute(cont, 524, 569, -0.378633, 0.925547, 547, 649, 0.745241, 0.666795)
c0.weld_to(1, c1, 0, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0)
# End saved data
# Dependencies between the above: tc0 and c0 must start at the
# same place heading in the same direction, and tc1 and c1 must
# end at the same place heading in the same direction.
c0.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[tc0,tc1],0,2,6)
c1.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[tc0,tc1],1,2,6)
phi0 = c2.compute_theta(0)
phi1 = c3.compute_theta(1) + pi
phia = (phi0 + phi1) / 2
c2.nib = lambda c,x,y,t,theta: (6, phia, (1-(1-t)**2)*40, 0)
c3.nib = lambda c,x,y,t,theta: (6, phia, (1-t**2)*40, 0)
return cont
restcrotchet = tmpfn()
# ----------------------------------------------------------------------
# Quaver rest and friends.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 531, 271, 588, 81)
c1 = CircleInvolute(cont, 588, 81, -0.347314, 0.937749, 480, 125, -0.784883, -0.619644)
c0.weld_to(1, c1, 0, 1)
# End saved data
cont.default_nib = 8
blob(c1, 1, 'r', 33, 3)
cont.cy = c1.compute_y(1) - 33*sin(c1.compute_theta(1)-pi/2) + 76
cont.origin = 1000, ((1000-cont.cy) * 3600 / cont.scale)
return cont
restquaver = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 492, 401, 588, 81)
c1 = CircleInvolute(cont, 588, 81, -0.347314, 0.937749, 480, 125, -0.784883, -0.619644)
c2 = CircleInvolute(cont, 549, 211, -0.347314, 0.937749, 441, 255, -0.784883, -0.619644)
c0.weld_to(1, c1, 0, 1)
# End saved data
cont.default_nib = 8
blob(c1, 1, 'r', 33, 3)
blob(c2, 1, 'r', 33, 3)
cont.cy = c1.compute_y(1) - 33*sin(c1.compute_theta(1)-pi/2) + 76
cont.origin = 1000-(39*1800/cont.scale), ((1000-cont.cy) * 3600 / cont.scale)
return cont
restsemi = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 453, 531, 588, 81)
c1 = CircleInvolute(cont, 588, 81, -0.347314, 0.937749, 480, 125, -0.784883, -0.619644)
c2 = CircleInvolute(cont, 549, 211, -0.347314, 0.937749, 441, 255, -0.784883, -0.619644)
c3 = CircleInvolute(cont, 510, 341, -0.347314, 0.937749, 402, 385, -0.784883, -0.619644)
c0.weld_to(1, c1, 0, 1)
# End saved data
cont.default_nib = 8
blob(c1, 1, 'r', 33, 3)
blob(c2, 1, 'r', 33, 3)
blob(c3, 1, 'r', 33, 3)
cont.cy = c2.compute_y(1) - 33*sin(c1.compute_theta(1)-pi/2) + 76
cont.origin = 1000-(39*2*1800/cont.scale), ((1000-cont.cy) * 3600 / cont.scale)
return cont
restdemi = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 414, 661, 588, 81)
c1 = CircleInvolute(cont, 588, 81, -0.347314, 0.937749, 480, 125, -0.784883, -0.619644)
c2 = CircleInvolute(cont, 549, 211, -0.347314, 0.937749, 441, 255, -0.784883, -0.619644)
c3 = CircleInvolute(cont, 510, 341, -0.347314, 0.937749, 402, 385, -0.784883, -0.619644)
c4 = CircleInvolute(cont, 471, 471, -0.347314, 0.937749, 363, 515, -0.784883, -0.619644)
c0.weld_to(1, c1, 0, 1)
# End saved data
cont.default_nib = 8
blob(c1, 1, 'r', 33, 3)
blob(c2, 1, 'r', 33, 3)
blob(c3, 1, 'r', 33, 3)
blob(c4, 1, 'r', 33, 3)
cont.cy = c2.compute_y(1) - 33*sin(c1.compute_theta(1)-pi/2) + 76
cont.origin = 1000-(39*3*1800/cont.scale), ((1000-cont.cy) * 3600 / cont.scale)
return cont
resthemi = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 375, 791, 588, 81)
c1 = CircleInvolute(cont, 588, 81, -0.347314, 0.937749, 480, 125, -0.784883, -0.619644)
c2 = CircleInvolute(cont, 549, 211, -0.347314, 0.937749, 441, 255, -0.784883, -0.619644)
c3 = CircleInvolute(cont, 510, 341, -0.347314, 0.937749, 402, 385, -0.784883, -0.619644)
c4 = CircleInvolute(cont, 471, 471, -0.347314, 0.937749, 363, 515, -0.784883, -0.619644)
c5 = CircleInvolute(cont, 432, 601, -0.347314, 0.937749, 324, 645, -0.784883, -0.619644)
c0.weld_to(1, c1, 0, 1)
# End saved data
cont.default_nib = 8
blob(c1, 1, 'r', 33, 3)
blob(c2, 1, 'r', 33, 3)
blob(c3, 1, 'r', 33, 3)
blob(c4, 1, 'r', 33, 3)
blob(c5, 1, 'r', 33, 3)
cont.cy = c3.compute_y(1) - 33*sin(c1.compute_theta(1)-pi/2) + 76
cont.origin = 1000-(39*4*1800/cont.scale), ((1000-cont.cy) * 3600 / cont.scale)
return cont
restquasi = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.before = "1000 0 translate -1 1 scale"
cont.extra = restquaver
return cont
restcrotchetx = tmpfn()
# ----------------------------------------------------------------------
# Rectangular rests (minim/semibreve, breve, longa, double longa).
def tmpfn():
cont = GlyphContext()
cont.extra = \
"newpath 440 439 moveto 440 505 lineto 614 505 lineto 614 439 lineto closepath fill "
return cont
restminim = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = \
"newpath 452 406 moveto 452 538 lineto 602 538 lineto 602 406 lineto closepath fill "
return cont
restbreve = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = \
"newpath 452 406 moveto 452 670 lineto 602 670 lineto 602 406 lineto closepath fill "
return cont
restlonga = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = restlonga, "-300 0 translate", restlonga
return cont
restdbllonga = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = restminim, \
"newpath 390 505 moveto 664 505 lineto 12 setlinewidth 1 setlinecap stroke"
cont.oy = 505
return cont
restminimo = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = restminim, \
"newpath 390 439 moveto 664 439 lineto 12 setlinewidth 1 setlinecap stroke"
cont.oy = 439
return cont
restsemibreveo = tmpfn()
# ----------------------------------------------------------------------
# Digits for time signatures.
def tmpfn(): # zero
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 528, 253, 1, 0, 572, 273, 0.60745, 0.794358)
c1 = CircleInvolute(cont, 572, 273, 0.60745, 0.794358, 597, 362, 0, 1)
c2 = CircleInvolute(cont, 597, 362, 0, 1, 572, 451, -0.60745, 0.794358)
c3 = CircleInvolute(cont, 572, 451, -0.60745, 0.794358, 528, 471, -1, 0)
c4 = CircleInvolute(cont, 528, 471, -1, 0, 484, 451, -0.60745, -0.794358)
c5 = CircleInvolute(cont, 484, 451, -0.60745, -0.794358, 459, 362, 0, -1)
c6 = CircleInvolute(cont, 459, 362, 0, -1, 484, 273, 0.60745, -0.794358)
c7 = CircleInvolute(cont, 484, 273, 0.60745, -0.794358, 528, 253, 1, 0)
c0.weld_to(1, c1, 0)
c0.weld_to(0, c7, 1)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c6, 0)
c6.weld_to(1, c7, 0)
# End saved data
ymid = float(c1.compute_y(1))
yext = abs(ymid - c0.compute_y(0))
cont.default_nib = lambda c,x,y,t,theta: (6, 0, 25*(1-abs((y-ymid)/yext)**2.5), 25*(1-abs((y-ymid)/yext)**2.5))
return cont
big0 = tmpfn()
def tmpfn(): # one
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 467, 342, 513, 257)
c1 = StraightLine(cont, 538, 257, 538, 467)
# End saved data
c0.nib = lambda c,x,y,t,theta: (6, 0, 10*t, 0)
y2 = c1.compute_y(1)
y1 = y2-50 # this value is the same as is used for the serif on the 4
serif = lambda y: qc(y<y1,0,26*((y-y1)/(y2-y1))**4)
c1.nib = lambda c,x,y,t,theta: (6, 0, 25+serif(y), 25+serif(y))
return cont
big1 = tmpfn()
def tmpfn(): # two
# At the top of the 2 I use the same hack as I did for the 3 to
# get the inner curve. See below.
# Secondary context.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 615, 419, -0.26963, 0.962964, 560, 424, -0.865426, -0.501036)
c1 = CircleInvolute(cont, 560, 424, -0.865426, -0.501036, 449, 467, -0.419058, 0.907959)
c0.weld_to(1, c1, 0)
# End saved data
tc0, tc1 = c0, c1
# Primary context.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 615, 419, -0.26963, 0.962964, 548, 468, -0.83205, -0.5547)
c1 = CircleInvolute(cont, 548, 468, -0.83205, -0.5547, 449, 467, -0.419058, 0.907959)
c2 = CircleInvolute(cont, 449, 467, 0, -1, 523, 381, 0.94299, -0.33282)
c3 = CircleInvolute(cont, 523, 381, 0.94299, -0.33282, 583, 307, 0, -1)
c4 = CircleInvolute(cont, 583, 307, 0, -1, 530, 253, -1, 0)
c5 = CircleInvolute(cont, 530, 253, -1, 0, 467, 275, -0.7282, 0.685365)
c6 = CircleInvolute(cont, 561, 307, 0, -1, 512, 261, -1, 0)
c7 = CircleInvolute(cont, 512, 261, -1, 0, 467, 275, -0.7282, 0.685365)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c6.weld_to(1, c7, 0)
# End saved data
cont.default_nib = 6
xr = c0.compute_x(0)
xl = c1.compute_x(1)
c0.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[tc0,tc1],0,2,6)
c1.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[tc0,tc1],1,2,6)
c4.nib = lambda c,x,y,t,theta: (lambda x1,x2: ((lambda k: (6, 0, k, k))(22*((x-min(x1,x2))/abs(x2-x1)))))(c.compute_x(0),c.compute_x(1))
c3.nib = c2.nib = lambda c,x,y,t,theta: (lambda x1,x2: ((lambda k: (6, 0, k, k))(22*((x-min(x1,x2))/abs(x2-x1)))))(c2.compute_x(0),c3.compute_x(1))
blob(c5, 1, 'l', 25, 4)
return cont
big2 = tmpfn()
def tmpfn(): # three
cont = GlyphContext()
# Bit of a hack here. The x-based formula I use for the nib
# thickness of the right-hand curves c1-c4 leaves a nasty corner
# at the very top and bottom, which I solve by drawing an
# independent inner curve at each end (c6-c9). Normally I would
# solve this using follow_curveset_nib, filling the area between
# c6-c7 and c0-c1 and that between c8-c9 and c2-c3; however,
# that gets the inner curve right but destroys the outer curve
# from the x-based formula. So instead I just do the simplest
# possible thing: draw c1-c4 with the nib thickness formula as
# before, but then draw c6-c9 over the top at constant
# thickness, relying on the fact that they never separate far
# enough from what would otherwise be the inner curve to open a
# gap between them.
# Saved data from gui.py
c0 = CircleInvolute(cont, 462, 446, 0.7282, 0.685365, 525, 471, 1, 0)
c1 = CircleInvolute(cont, 525, 471, 1, 0, 580, 416, 0, -1)
c2 = CircleInvolute(cont, 580, 416, 0, -1, 504, 352, -1, 0)
c3 = CircleInvolute(cont, 504, 352, 1, 0, 578, 303, 0, -1)
c4 = CircleInvolute(cont, 578, 303, 0, -1, 525, 253, -1, 0)
c5 = CircleInvolute(cont, 525, 253, -1, 0, 462, 276, -0.7282, 0.685365)
c6 = CircleInvolute(cont, 462, 446, 0.7282, 0.685365, 510, 464, 1, 0)
c7 = CircleInvolute(cont, 510, 464, 1, 0, 558, 416, 0, -1)
c8 = CircleInvolute(cont, 556, 303, 0, -1, 511, 261, -1, 0)
c9 = CircleInvolute(cont, 511, 261, -1, 0, 462, 276, -0.7282, 0.685365)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0, 1)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c6.weld_to(1, c7, 0)
c8.weld_to(1, c9, 0)
# End saved data
cont.default_nib = 6
c1.nib = c2.nib = c3.nib = c4.nib = lambda c,x,y,t,theta: (lambda x1,x2: ((lambda k: (6, 0, k, k))(22*((x-min(x1,x2))/abs(x2-x1)))))(c.compute_x(0),c.compute_x(1))
blob(c0, 0, 'r', 25, 4)
blob(c5, 1, 'l', 25, 4)
return cont
big3 = tmpfn()
def tmpfn(): # four
# Secondary context
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 496, 257, 0, 1, 432, 413, -0.665255, 0.746617)
# End saved data
tc0 = c0
# Primary context
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 571, 257, 432, 413)
c1 = StraightLine(cont, 432, 413, 514, 413)
c2 = StraightLine(cont, 551, 299, 551, 467)
c3 = StraightLine(cont, 450, 411, 599, 411)
c0.weld_to(1, c1, 0, 1)
# End saved data
c0.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[tc0],0,1,6)
c1.nib = 6
gradient = tan(c0.compute_theta(0))
y0 = c2.compute_y(0)
y2 = c2.compute_y(1)
y1 = y2-50 # this value is the same as is used for the serif on the 1
serif = lambda y: qc(y<y1,0,26*((y-y1)/(y2-y1))**4)
c2.nib = lambda c,x,y,t,theta: (6, 0, 25+serif(y), min(25+serif(y), -25+(y-y0)/gradient))
c3.nib = 8
# Top line and baseline of the digits are defined by the 4.
cont.ty = c0.compute_y(0) - c0.compute_nib(0)[0]
cont.by = c2.compute_y(1) + c2.compute_nib(1)[0]
# Icky glitch-handling stuff (see -lily section).
cont.gy = (cont.ty + cont.by) / 2 + (250*cont.scale/3600.0)
return cont
big4 = tmpfn()
def tmpfn(): # five
cont = GlyphContext()
# At the bottom of the 5 I use the same hack as I did for the 3
# to get the inner curve. See below.
# Saved data from gui.py
c0 = CircleInvolute(cont, 461, 442, 0.7282, 0.685365, 524, 471, 1, 0)
c1 = CircleInvolute(cont, 524, 471, 1, 0, 579, 400, 0, -1)
c2 = CircleInvolute(cont, 579, 400, 0, -1, 520, 332, -1, 0)
c3 = CircleInvolute(cont, 520, 332, -1, 0, 461, 351, -0.795432, 0.606043)
c4 = StraightLine(cont, 461, 351, 469, 257)
c5 = CircleInvolute(cont, 469, 257, 0.938343, 0.345705, 596, 257, 0.953583, -0.301131)
c6 = CircleInvolute(cont, 461, 442, 0.7282, 0.685365, 506, 463, 1, 0)
c7 = CircleInvolute(cont, 506, 463, 1, 0, 557, 400, 0, -1)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0, 1)
c4.weld_to(1, c5, 0, 1)
c6.weld_to(1, c7, 0)
# End saved data
cont.default_nib = 6
c1.nib = c2.nib = lambda c,x,y,t,theta: (lambda x1,x2: ((lambda k: (6, 0, k, k))(22*((x-min(x1,x2))/abs(x2-x1)))))(c.compute_x(0),c.compute_x(1))
xr = c5.compute_x(1)
xl = c5.compute_x(0)
taper = lambda x: (qc(x>0, (lambda t: t**4), (lambda t: 0)))(x)
xm = xl + 0.5*(xr-xl)
c5.nib = lambda c,x,y,t,theta: (6,-pi/2,32*(1-taper((x-xm)/(xr-xm))),0)
blob(c0, 0, 'r', 25, 4)
return cont
big5 = tmpfn()
def tmpfn(): # six
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 535, 471, -1, 0, 479, 408, 0, -1)
c1 = CircleInvolute(cont, 479, 408, 0, -1, 535, 349, 1, 0)
c2 = CircleInvolute(cont, 535, 349, 1, 0, 591, 408, 0, 1)
c3 = CircleInvolute(cont, 591, 408, 0, 1, 535, 471, -1, 0)
c4 = CircleInvolute(cont, 535, 471, -1, 0, 491, 446, -0.60745, -0.794358)
c5 = CircleInvolute(cont, 491, 446, -0.60745, -0.794358, 466, 360, 0, -1)
c6 = CircleInvolute(cont, 466, 360, 0, -1, 493, 277, 0.658505, -0.752577)
c7 = CircleInvolute(cont, 493, 277, 0.658505, -0.752577, 546, 253, 1, 0)
c8 = CircleInvolute(cont, 546, 253, 1, 0, 598, 275, 0.7282, 0.685365)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c6, 0)
c6.weld_to(1, c7, 0)
c7.weld_to(1, c8, 0)
# End saved data
ymid = float(c5.compute_y(1))
yext = abs(ymid - c4.compute_y(0))
cont.default_nib = lambda c,x,y,t,theta: (6, 0, 25*(1-abs((y-ymid)/yext)**2.5), 25*(1-abs((y-ymid)/yext)**2.5))
ytop2 = c2.compute_y(0)
ybot2 = c3.compute_y(1)
ymid2 = (ytop2+ybot2)/2
yext2 = abs(ymid2 - ytop2)
c2.nib = c3.nib = lambda c,x,y,t,theta: (6, 0, 22*(1-abs((y-ymid2)/yext2)**2.5), 22*(1-abs((y-ymid2)/yext2)**2.5))
ythreshold = c1.compute_y(0.5)
c0.nib = c1.nib = lambda c,x,y,t,theta: (6, 0, 22*(1-abs((y-ymid2)/yext2)**2.5), qc(y>ythreshold, 0, 22*(1-abs((y-ymid2)/yext2)**2.5)))
c8.nib = 6
blob(c8, 1, 'r', 25, 4)
# FIXME: consider redoing this using the x-based formula I used
# on the 3.
return cont
big6 = tmpfn()
def tmpfn(): # seven
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 538, 467, 0, -1, 568, 353, 0.544988, -0.838444)
c1 = CircleInvolute(cont, 568, 353, 0.544988, -0.838444, 604, 257, 0.233373, -0.972387)
c2 = CircleInvolute(cont, 604, 257, -0.546268, 0.837611, 491, 284, -0.7282, -0.685365)
c3 = CircleInvolute(cont, 491, 284, -0.7282, -0.685365, 444, 283, -0.563337, 0.826227)
c4 = CircleInvolute(cont, 479, 467, 0, -1, 545, 345, 0.759257, -0.650791)
c5 = CircleInvolute(cont, 545, 345, 0.759257, -0.650791, 604, 257, 0.233373, -0.972387)
c6 = CircleInvolute(cont, 604, 257, -0.563337, 0.826227, 558, 273, -0.768221, -0.640184)
c7 = CircleInvolute(cont, 558, 273, -0.768221, -0.640184, 444, 283, -0.563337, 0.826227)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c6, 0, 1)
c6.weld_to(1, c7, 0)
# End saved data
c0.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c4,c5,c6,c7],0,4,6)
c1.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c4,c5,c6,c7],1,4,6)
c2.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c4,c5,c6,c7],2,4,6)
c3.nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,[c4,c5,c6,c7],3,4,6)
c4.nib = c5.nib = c6.nib = c7.nib = 1 # essentially ignore these
x2 = c7.compute_x(1)
x0 = c7.compute_x(0)
x1 = x2 + 0.4 * (x0-x2)
serif = lambda x: qc(x>x1,0,26*((x-x1)/(x2-x1))**4)
xc3 = eval(c3.serialise())
xc7 = eval(c7.serialise())
xc3.nib = xc7.nib = lambda c,x,y,t,theta: (lambda k: (6,pi/2,k,k))(serif(x))
return cont
big7 = tmpfn()
def tmpfn(): # eight
# The traditional 8 just contains _too_ many ellipse-like curves
# to draw sensibly using involutes, so I resorted to squashing
# the x-axis down by 3/4 so that the ellipses became more
# circular.
# This glyph is designed so that its _exterior_ outline is
# mirror-symmetric. To this end, constraints currently
# unenforced by gui.py are:
# - c4 should be an exact mirror image of c3
# - c2 should be an exact mirror image of c7
#
# Also, of course, c0 must join up precisely to c3 just as c4
# does, and likewise c2 to c7 just like c6.
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 529, 255, -1, 0, 490, 293, 0.485643, 0.874157, mx=(0.75, 0, 0, 1))
c1 = CircleInvolute(cont, 490, 293, 0.485643, 0.874157, 575, 353, 0.925547, 0.378633, mx=(0.75, 0, 0, 1))
c2 = CircleInvolute(cont, 575, 353, 0.925547, 0.378633, 529, 469, -1, 0, mx=(0.75, 0, 0, 1))
c3 = CircleInvolute(cont, 559, 365, 0.942302, -0.334765, 529, 255, -1, 0, mx=(0.75, 0, 0, 1))
c4 = CircleInvolute(cont, 529, 255, -1, 0, 499, 365, 0.942302, 0.334765, mx=(0.75, 0, 0, 1))
c5 = CircleInvolute(cont, 499, 365, 0.942302, 0.334765, 576, 427, 0.263117, 0.964764, mx=(0.75, 0, 0, 1))
c6 = CircleInvolute(cont, 576, 427, 0.263117, 0.964764, 529, 469, -1, 0, mx=(0.75, 0, 0, 1))
c7 = CircleInvolute(cont, 529, 469, -1, 0, 483, 353, 0.925547, -0.378633, mx=(0.75, 0, 0, 1))
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c6, 0)
c6.weld_to(1, c7, 0)
# End saved data
tcurves = c0,c1,c2
curves = c4,c5,c6
for i in range(len(tcurves)):
tcurves[i].nib = 0
for i in range(len(curves)):
curves[i].i = i
curves[i].nib = lambda c,x,y,t,theta: follow_curveset_nib(c,x,y,t,theta,tcurves,c.i,len(curves),8)
c3.nib = lambda c,x,y,t,theta: (lambda x1,x2: ((lambda k: (8, 0, 0, k))(9*((x-min(x1,x2))/abs(x2-x1)))))(c.compute_x(0),c.compute_x(1))
c7.nib = lambda c,x,y,t,theta: (lambda x1,x2: ((lambda k: (8, 0, k, 0))(9*((max(x1,x2)-x)/abs(x2-x1)))))(c.compute_x(0),c.compute_x(1))
return cont
big8 = tmpfn()
def tmpfn(): # nine
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 522, 253, 1, 0, 578, 316, 0, 1)
c1 = CircleInvolute(cont, 578, 316, 0, 1, 522, 375, -1, 0)
c2 = CircleInvolute(cont, 522, 375, -1, 0, 466, 316, 0, -1)
c3 = CircleInvolute(cont, 466, 316, 0, -1, 522, 253, 1, 0)
c4 = CircleInvolute(cont, 522, 253, 1, 0, 566, 278, 0.60745, 0.794358)
c5 = CircleInvolute(cont, 566, 278, 0.60745, 0.794358, 591, 364, 0, 1)
c6 = CircleInvolute(cont, 591, 364, 0, 1, 564, 447, -0.658505, 0.752577)
c7 = CircleInvolute(cont, 564, 447, -0.658505, 0.752577, 511, 471, -1, 0)
c8 = CircleInvolute(cont, 511, 471, -1, 0, 459, 449, -0.7282, -0.685365)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c6, 0)
c6.weld_to(1, c7, 0)
c7.weld_to(1, c8, 0)
# End saved data
ymid = float(c5.compute_y(1))
yext = abs(ymid - c4.compute_y(0))
cont.default_nib = lambda c,x,y,t,theta: (6, 0, 25*(1-abs((y-ymid)/yext)**2.5), 25*(1-abs((y-ymid)/yext)**2.5))
ytop2 = c2.compute_y(0)
ybot2 = c3.compute_y(1)
ymid2 = (ytop2+ybot2)/2
yext2 = abs(ymid2 - ytop2)
c2.nib = c3.nib = lambda c,x,y,t,theta: (6, 0, 22*(1-abs((y-ymid2)/yext2)**2.5), 22*(1-abs((y-ymid2)/yext2)**2.5))
ythreshold = c1.compute_y(0.5)
c0.nib = c1.nib = lambda c,x,y,t,theta: (6, 0, qc(y<ythreshold, 0, 22*(1-abs((y-ymid2)/yext2)**2.5)), 22*(1-abs((y-ymid2)/yext2)**2.5))
c8.nib = 6
blob(c8, 1, 'r', 25, 4)
# FIXME: consider redoing this using the x-based formula I used
# on the 3. (Well, recopying from the 6 if I do.)
return cont
big9 = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 500, 362, 600, 362)
c1 = StraightLine(cont, 550, 312, 550, 412)
# End saved data
cont.default_nib = 12
return cont
asciiplus = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 500, 362, 600, 362)
# End saved data
cont.default_nib = 12
return cont
asciiminus = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 573, 435, 0.843662, 0.536875, 548, 535, -0.894427, 0.447214)
# End saved data
c0.nib = lambda c,x,y,t,theta: 4+25*cos(pi/2*t)**2
blob(c0, 0, 'l', 5, 0)
return cont
asciicomma = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "newpath 500 439 34 0 360 arc fill"
return cont
asciiperiod = tmpfn()
for x in big0,big1,big2,big3,big5,big6,big7,big8,big9,\
asciiplus,asciiminus,asciicomma,asciiperiod:
x.ty,x.by,x.gy = big4.ty,big4.by,big4.gy
# ----------------------------------------------------------------------
# The small digits used for ntuplets and fingering marks. Scaled and
# sheared versions of the big time-signature digits.
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big0, "grestore"
return cont
small0 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big1, "grestore"
return cont
small1 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big2, "grestore"
return cont
small2 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big3, "grestore"
return cont
small3 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big4, "grestore"
return cont
small4 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big5, "grestore"
return cont
small5 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big6, "grestore"
return cont
small6 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big7, "grestore"
return cont
small7 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big8, "grestore"
return cont
small8 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 480 480 translate 0.6 0.72 scale [1 0 -.3 1 0 0] concat -480 -480 translate", big9, "grestore"
return cont
small9 = tmpfn()
# ----------------------------------------------------------------------
# The big C for common time signature.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 732, 391, -0.5547, -0.83205, 659, 353, -1, 0)
c1 = CircleInvolute(cont, 659, 353, -1, 0, 538, 470, 0, 1)
c2 = CircleInvolute(cont, 538, 470, 0, 1, 650, 587, 1, 0)
c3 = CircleInvolute(cont, 650, 587, 1, 0, 742, 508, 0.135113, -0.99083)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
# End saved data
c0.nib = c3.nib = 6
c1.nib = c2.nib = lambda c,x,y,t,theta: (lambda x1,x2: ((lambda k: (6, 0, k, 0))(44*((x-max(x1,x2))/abs(x2-x1))**2)))(c.compute_x(0),c.compute_x(1))
blob(c0, 0, 'r', 32, 8)
return cont
timeC = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 648, 272, 648, 672)
# End saved data
cont.default_nib = 8
cont.extra = timeC
return cont
timeCbar = tmpfn()
# ----------------------------------------------------------------------
# Dynamics marks (f,m,p,s,z).
def tmpfn(): # m (we do this one first to define the baseline)
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 539, 378, 0.328521, -0.944497, 585, 331, 1, 0)
c1 = CircleInvolute(cont, 585, 331, 1, 0, 606, 360, -0.287348, 0.957826)
c2 = StraightLine(cont, 606, 360, 576, 460)
c3 = CircleInvolute(cont, 621, 360, 0.287348, -0.957826, 648, 331, 1, 0)
c4 = CircleInvolute(cont, 648, 331, 1, 0, 669, 360, -0.287348, 0.957826)
c5 = StraightLine(cont, 669, 360, 639, 460)
c6 = CircleInvolute(cont, 684, 360, 0.287348, -0.957826, 711, 331, 1, 0)
c7 = CircleInvolute(cont, 711, 331, 1, 0, 732, 360, -0.286206, 0.958168)
c8 = StraightLine(cont, 732, 360, 709, 437)
c9 = CircleInvolute(cont, 709, 437, -0.286206, 0.958168, 726, 463, 1, 0)
c10 = CircleInvolute(cont, 726, 463, 1, 0, 773, 415, 0.328521, -0.944497)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c6.weld_to(1, c7, 0)
c7.weld_to(1, c8, 0)
c8.weld_to(1, c9, 0)
c9.weld_to(1, c10, 0)
# End saved data
cont.default_nib = 4
c2.nib = c5.nib = c8.nib = (4,0,15,15)
phi = c1.compute_theta(1)
psi = c0.compute_theta(0)
c0.nib = c1.nib = c3.nib = c4.nib = c6.nib = c7.nib = c9.nib = c10.nib = lambda c,x,y,t,theta: (lambda k: 4+k)(15*cos(pi/2*(theta-phi)/(psi-phi))**2)
cont.lby = c2.compute_y(1)
cont.by = c2.compute_y(1) + c2.compute_nib(1)[0]
cont.lx = 557 + (-41.38 - 34.62) * cont.scale / 3600.0
cont.rx = 751 - (-49.53 - -87.53) * cont.scale / 3600.0
return cont
dynamicm = tmpfn()
def tmpfn(): # f
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 720, 269, -0.60745, -0.794358, 690, 254, -1, 0)
c1 = CircleInvolute(cont, 690, 254, -1, 0, 600, 359, -0.21243, 0.977176)
c2 = CircleInvolute(cont, 600, 359, -0.21243, 0.977176, 550, 506, -0.462566, 0.886585)
c3 = CircleInvolute(cont, 550, 506, -0.462566, 0.886585, 490, 552, -1, 0)
c4 = CircleInvolute(cont, 490, 552, -1, 0, 463, 516, 0.301131, -0.953583)
c5 = StraightLine(cont, 540, 349, 661, 349)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0)
# End saved data
cont.default_nib = 8
yt = c1.compute_y(0)
yb = c3.compute_y(1)
m = 0.6
# Construct a quintic which is 0 with derivative 0 at both 0 and
# 1, and 1 with derivative 0 at m. Second derivative at 0 is
# non-negative iff m <= 0.6, so we require 0.4 <= m <= 0.6 for
# the values on [0,1] to be contained within [0,1].
denom = m*m*m*(-1+m*(3+m*(-3+m)))
a = (2-4*m)/denom
b = (-4+m*(5+m*5))/denom
c = (2+m*(2+m*-10))/denom
d = (m*(-3+m*5))/denom
quintic = lambda x: x*x*(d+x*(c+x*(b+x*a)))
c1.nib = c2.nib = c3.nib = lambda c,x,y,t,theta: (8+20*quintic((y-yb)/(yt-yb))**0.3)
#cos(pi/2 * ((theta % (2*pi))-phi)/(psi-phi))**2)
c5.nib = 10
blob(c0, 0, 'r', 20, 8)
blob(c4, 1, 'r', 20, 8)
cont.by = dynamicm.by
cont.lx = 496.7 + (-81.74 - -86.74) * cont.scale / 3600.0
cont.rx = 657.7 - (-139.36 - -165.36) * cont.scale / 3600.0
return cont
dynamicf = tmpfn()
def tmpfn(): # p
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 539, 378, 0.328521, -0.944497, 585, 331, 1, 0)
c1 = CircleInvolute(cont, 585, 331, 1, 0, 606, 360, -0.289177, 0.957276)
c2 = StraightLine(cont, 606, 360, 548, 552)
c3 = CircleInvolute(cont, 607, 428, 0, -1, 669, 336, 1, 0)
c4 = CircleInvolute(cont, 669, 336, 1, 0, 697, 370, 0, 1)
c5 = CircleInvolute(cont, 697, 370, 0, 1, 633, 464, -1, 0)
c6 = CircleInvolute(cont, 633, 464, -1, 0, 607, 428, 0, -1)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c3.weld_to(1, c4, 0)
c3.weld_to(0, c6, 1)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c6, 0)
# End saved data
y2 = c2.compute_y(1)
y1 = y2 - 20
serif = lambda y: qc(y<y1,0,26*((y-y1)/(y2-y1))**4)
c2.nib = lambda c,x,y,t,theta: (lambda k: (6,0,k,k))(18 + serif(y))
phi = c1.compute_theta(1)
psi = c0.compute_theta(0)
c0.nib = c1.nib = lambda c,x,y,t,theta: (lambda k: 4+k)(20*cos(pi/2*(theta-phi)/(psi-phi))**2)
gamma = 1/tan(c2.compute_theta(0.5))
shear = lambda theta: (lambda dx,dy: atan2(-dy,dx+gamma*dy))(cos(theta),-sin(theta))
cont.default_nib = lambda c,x,y,t,theta: 12-9*sin(shear(theta))
cont.by = dynamicm.by
cont.lx = 510.4 + (-23.26 - -38.26) * cont.scale / 3600.0
cont.rx = 690.615 - (-51.72 - -28.72) * cont.scale / 3600.0
return cont
dynamicp = tmpfn()
def tmpfn(): # r
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 551, 348, 0.635707, -0.77193, 585, 331, 1, 0)
c1 = CircleInvolute(cont, 585, 331, 1, 0, 606, 360, -0.287348, 0.957826)
c2 = StraightLine(cont, 606, 360, 576, 460)
c3 = CircleInvolute(cont, 617, 360, 0.287348, -0.957826, 687, 344, 0.707107, 0.707107)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
# End saved data
cont.default_nib = 4
c2.nib = (4,0,15,15)
phi = c1.compute_theta(1)
psi = c0.compute_theta(0)
c0.nib = c1.nib = lambda c,x,y,t,theta: (lambda k: 4+k)(15*cos(pi/2*(theta-phi)/(psi-phi))**2)
c3.nib = lambda c,x,y,t,theta: (lambda k: 8+k)(15*cos(pi/2*(theta-phi)/(psi-phi))**2)
cont.by = dynamicm.by
cont.lx = 557 + (-18.93 - 58.07) * cont.scale / 3600.0
cont.rx = 670.187 - (-66.39 - -57.39) * cont.scale / 3600.0
return cont
dynamicr = tmpfn()
def tmpfn(): # s
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 635, 341, -0.845489, -0.533993, 564, 361, 0, 1)
c1 = CircleInvolute(cont, 564, 361, 0, 1, 592, 398, 0.885832, 0.464007)
c2 = CircleInvolute(cont, 592, 398, 0.885832, 0.464007, 619, 437, -0.196116, 0.980581)
c3 = CircleInvolute(cont, 619, 437, -0.196116, 0.980581, 541, 452, -0.776114, -0.630593)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
# End saved data
phi = c1.compute_theta(1)
cont.default_nib = lambda c,x,y,t,theta: 15+6*cos(theta-phi)
blob(c0, 0, 'r', 12, 7)
blob(c3, 1, 'r', 12, 7)
cont.by = dynamicm.by
cont.lx = 529 + (-0.36 - 52.64) * cont.scale / 3600.0
cont.rx = 628.788 - (-36.35 - -51.35) * cont.scale / 3600.0
return cont
dynamics = tmpfn()
def tmpfn(): # z
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 568, 338, 678, 338)
c1 = StraightLine(cont, 678, 338, 539, 453)
c2 = CircleInvolute(cont, 539, 453, 0.707107, -0.707107, 602, 441, 0.784883, 0.619644)
c3 = CircleInvolute(cont, 602, 441, 0.784883, 0.619644, 654, 427, 0.33035, -0.943858)
c4 = CircleInvolute(cont, 654, 427, 0.33035, -0.943858, 654, 411, -0.341743, -0.939793)
c0.weld_to(1, c1, 0, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0)
# End saved data
x2 = c0.compute_x(0)
x1 = x2 + 30
x0 = c0.compute_x(1)
serif = lambda x: qc(x>x1,0,13*((x-x1)/(x2-x1))**4)
serifangle = 1.15 # radians of the slant at the end of the z's top stroke
c0.nib = lambda c,x,y,t,theta: (lambda k: (6,1.15,0,k))(min(26 + serif(x), x0-x))
c1.nib = 6
xr = c3.compute_x(1)
xl = c2.compute_x(0)
m = 0.5
# Construct a cubic which is 0 at both 0 and 1, and 1 with
# derivative 0 at m. Second derivative at 0 is non-negative iff
# m <= 2/3, so we require 1/3 <= m <= 2/3 for the values on
# [0,1] to be contained within [0,1].
a = (1-2*m)/(m**4-2*m**3+m**2)
b = (3*m**2-1)/(m**4-2*m**3+m**2)
c = -a-b
#sys.stderr.write("set xrange [0:1]\nplot x*(%g+x*(%g+x*%g))\n" % (c,b,a))
cubic = lambda x: x*(c+x*(b+x*a))
slantangle = c1.compute_theta(1)
c2.nib = c3.nib = lambda c,x,y,t,theta: ((lambda k: (6, slantangle, k, k))(16*cubic((x-xl)/(xr-xl))))
c4.nib = 6
blob(c4, 1, 'l', 12, 8)
cont.by = dynamicm.by
cont.lx = 533 + (-0.2 - 22.8) * cont.scale / 3600.0
cont.rx = 650.1 - (-65.44 - -42.44) * cont.scale / 3600.0
return cont
dynamicz = tmpfn()
for x in dynamicf, dynamicm, dynamicp, dynamicr, dynamics, dynamicz:
x.origin = (x.by * 3600. / x.scale, x.lx * 3600. / x.scale)
x.width = x.rx - x.lx
# ----------------------------------------------------------------------
# Accent mark.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 421, 415, 633, 472)
c1 = StraightLine(cont, 633, 472, 421, 529)
c0.weld_to(1, c1, 0, 1)
# End saved data
cont.default_nib = 10
return cont
accent = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = accent, "800 0 translate -1 1 scale", accent
return cont
espressivo = tmpfn()
# ----------------------------------------------------------------------
# Miscellaneous articulation marks.
def tmpfn(): # stopping
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 527, 316, 527, 466)
c1 = StraightLine(cont, 453, 391, 601, 391)
# End saved data
cont.default_nib = 8
return cont
stopping = tmpfn()
def tmpfn(): # legato
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 454, 461, 600, 461)
# End saved data
cont.default_nib = 8
cont.ly = c0.compute_y(0.5)
return cont
legato = tmpfn()
def tmpfn(): # staccato
cont = GlyphContext()
cont.extra = "newpath 527 446 26 0 360 arc fill "
return cont
staccato = tmpfn()
def tmpfn(): # 'portato' - a staccato stacked on a legato
cont = GlyphContext()
cont.extra = legato, "0 -54 translate", staccato
cont.ly = legato.ly
return cont
portatoup = tmpfn()
def tmpfn(): # portato, the other way up
cont = GlyphContext()
cont.extra = "0 1000 translate 1 -1 scale", portatoup
cont.ly = 1000 - portatoup.ly
return cont
portatodn = tmpfn()
def tmpfn(): # staccatissimo
cont = GlyphContext()
cont.extra = "newpath 498 381 moveto 526 478 lineto 554 381 lineto closepath fill "
return cont
staccatissdn = tmpfn()
def tmpfn(): # staccatissimo pointing the other way
cont = GlyphContext()
cont.extra = "newpath 498 478 moveto 526 381 lineto 554 478 lineto closepath fill "
return cont
staccatissup = tmpfn()
def tmpfn(): # snap-pizzicato
cont = GlyphContext()
cont.extra = "newpath 500 500 50 0 360 arc 500 500 moveto 500 400 lineto 16 setlinewidth 1 setlinejoin 1 setlinecap stroke"
return cont
snappizz = tmpfn()
# ----------------------------------------------------------------------
# The 'segno' sign (for 'D.S. al Fine' sort of stuff).
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 504, 162, -0.284088, -0.958798, 420, 152, -0.393919, 0.919145)
c1 = CircleInvolute(cont, 420, 152, -0.393919, 0.919145, 514, 295, 0.923077, 0.384615)
c2 = CircleInvolute(cont, 514, 295, 0.923077, 0.384615, 608, 438, -0.393919, 0.919145)
c3 = CircleInvolute(cont, 608, 438, -0.393919, 0.919145, 524, 428, -0.284088, -0.958798)
c4 = StraightLine(cont, 624, 128, 404, 462)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
# End saved data
c4.nib = 10
cont.default_nib = lambda c,x,y,t,theta: 8+16*cos(theta-c.nibdir(t))**2
phi0 = c0.compute_theta(0)
phi1 = c1.compute_theta(0) + 3*pi/2
phi2 = c1.compute_theta(1) + pi
c0.nibdir = lambda t: phi0 + (phi1-phi0)*t
c1.nibdir = lambda t: phi1 + (phi2-phi1)*t
c2.nibdir = lambda t: phi2 + (phi1-phi2)*t
c3.nibdir = lambda t: phi1 + (phi0-phi1)*t
# Draw the two dots.
cont.extra = \
"newpath 618 251 24 0 360 arc fill " + \
"newpath 410 339 24 0 360 arc fill "
return cont
segno = tmpfn()
# ----------------------------------------------------------------------
# The coda sign.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 528, 198, 528, 475)
c1 = StraightLine(cont, 418, 337, 639, 337)
c2 = CircleInvolute(cont, 528, 230, 1, 0, 596, 337, 0, 1)
c3 = CircleInvolute(cont, 596, 337, 0, 1, 528, 444, -1, 0)
c4 = CircleInvolute(cont, 528, 444, -1, 0, 460, 337, 0, -1)
c5 = CircleInvolute(cont, 460, 337, 0, -1, 528, 230, 1, 0)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c2, 0)
# End saved data
c0.nib = c1.nib = 10
cont.default_nib = lambda c,x,y,t,theta: 8+12*abs(sin(theta))**2.5
return cont
coda = tmpfn()
def tmpfn(): # variant square form used by Lilypond
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 528, 198, 528, 475)
c1 = StraightLine(cont, 418, 337, 639, 337)
c2 = CircleInvolute(cont, 469, 241, 0.970143, -0.242536, 587, 241, 0.970143, 0.242536)
c3 = CircleInvolute(cont, 587, 241, 0.110432, 0.993884, 587, 433, -0.110432, 0.993884)
c4 = CircleInvolute(cont, 587, 433, -0.970143, 0.242536, 469, 433, -0.970143, -0.242536)
c5 = CircleInvolute(cont, 469, 433, -0.110432, -0.993884, 469, 241, 0.110432, -0.993884)
c2.weld_to(1, c3, 0, 1)
c3.weld_to(1, c4, 0, 1)
c4.weld_to(1, c5, 0, 1)
c5.weld_to(1, c2, 0, 1)
# End saved data
c0.nib = c1.nib = 10
c3.nib = c5.nib = 8, 0, 12, 12
xmid = c0.compute_x(0)
xend = c2.compute_x(0)
xdiff = xend - xmid
c2.nib = c4.nib = lambda c,x,y,t,theta: (lambda k: (8, 0, k, k))(12.0*(x-xmid)/xdiff)
return cont
varcoda = tmpfn()
# ----------------------------------------------------------------------
# The turn sign.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 443, 448, -0.860927, 0.508729, 370, 401, 0, -1)
c1 = CircleInvolute(cont, 370, 401, 0, -1, 423, 347, 1, 0)
c2 = CircleInvolute(cont, 423, 347, 1, 0, 525, 402, 0.707107, 0.707107)
c3 = CircleInvolute(cont, 525, 402, 0.707107, 0.707107, 627, 457, 1, 0)
c4 = CircleInvolute(cont, 627, 457, 1, 0, 681, 395, 0, -1)
c5 = CircleInvolute(cont, 681, 395, 0, -1, 607, 356, -0.860927, 0.508729)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
c3.weld_to(1, c4, 0)
c4.weld_to(1, c5, 0)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: 8+16*cos(theta-c.nibdir(theta))**2
shift = lambda theta: (theta+pi/2) % (2*pi) - pi/2
theta0 = shift(c0.compute_theta(0))
phi0 = theta0
theta2 = shift(c2.compute_theta(1))
phi2 = theta2 + pi
c0.nibdir = c1.nibdir = c2.nibdir = c3.nibdir = c4.nibdir = c5.nibdir = \
lambda theta: phi0 + (phi2-phi0)*(shift(theta)-theta0)/(theta2-theta0)
return cont
turn = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 525, 304, 525, 500)
# End saved data
cont.default_nib = 8
cont.extra = turn
return cont
invturn = tmpfn()
# ----------------------------------------------------------------------
# Mordent and its relatives.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 397.935, 402, 426, 368)
c1 = StraightLine(cont, 426, 368, 498, 439)
c2 = StraightLine(cont, 498, 439, 556, 368)
c3 = StraightLine(cont, 556, 368, 628, 439)
c4 = StraightLine(cont, 628, 439, 656.065, 405)
c0.weld_to(1, c1, 0, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
c3.weld_to(1, c4, 0, 1)
# End saved data
alpha = c2.compute_theta(.5)
cont.default_nib = (8, alpha, 30, 30)
cont.cy = c2.compute_y(.5)
return cont
mordentupper = tmpfn()
def tmpfn(): # and the same with a vertical line through it
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 526, 264, 526, 466)
# End saved data
cont.default_nib = 8
# These things are stacked above the note, so they each have a
# baseline and a height rather than being vertically centred.
# Hence we must translate the other mordent sign upwards.
cont.extra = "gsave 0 -43 translate", mordentupper, "grestore"
cont.cy = mordentupper.cy - 43
return cont
mordentlower = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 397.935, 402, 426, 368)
c1 = StraightLine(cont, 426, 368, 498, 439)
c2 = StraightLine(cont, 498, 439, 556, 368)
c3 = StraightLine(cont, 556, 368, 628, 439)
c4 = StraightLine(cont, 628, 439, 686, 368)
c5 = StraightLine(cont, 686, 368, 758, 439)
c6 = StraightLine(cont, 758, 439, 786.065, 405)
c0.weld_to(1, c1, 0, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
c3.weld_to(1, c4, 0, 1)
c4.weld_to(1, c5, 0, 1)
c5.weld_to(1, c6, 0, 1)
# End saved data
alpha = c2.compute_theta(.5)
cont.default_nib = (8, alpha, 30, 30)
cont.cy = mordentupper.cy
return cont
mordentupperlong = tmpfn()
def tmpfn(): # and the same with a vertical line through it
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 656, 264, 656, 466)
# End saved data
cont.default_nib = 8
# These things are stacked above the note, so they each have a
# baseline and a height rather than being vertically centred.
# Hence we must translate the other mordent sign upwards.
cont.extra = "gsave 0 -43 translate", mordentupperlong, "grestore"
cont.cy = mordentupper.cy - 43
return cont
mordentupperlower = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 397.935, 402, 426, 368)
c1 = StraightLine(cont, 426, 368, 498, 439)
c2 = StraightLine(cont, 498, 439, 556, 368)
c3 = StraightLine(cont, 556, 368, 628, 439)
c4 = StraightLine(cont, 628, 439, 686, 368)
c5 = StraightLine(cont, 686, 368, 758, 439)
c6 = StraightLine(cont, 758, 439, 786.065, 405)
c7 = CircleInvolute(cont, 370, 524, -0.354654, -0.934998, 397.935, 402, 0.636585, -0.771206)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c7, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
c3.weld_to(1, c4, 0, 1)
c4.weld_to(1, c5, 0, 1)
c5.weld_to(1, c6, 0, 1)
# End saved data
alpha = c2.compute_theta(.5)
cont.default_nib = (8, alpha, 30, 30)
c0.nib = c7.nib = 8
cont.cy = mordentupper.cy
return cont
upmordentupperlong = tmpfn()
def tmpfn(): # and the same with a vertical line through it
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 656, 264, 656, 466)
# End saved data
cont.default_nib = 8
# These things are stacked above the note, so they each have a
# baseline and a height rather than being vertically centred.
# Hence we must translate the other mordent sign upwards.
cont.extra = "gsave 0 -43 translate", upmordentupperlong, "grestore"
cont.cy = mordentupper.cy - 43
return cont
upmordentupperlower = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 378.602, 425.667, 426, 368)
c1 = StraightLine(cont, 426, 368, 498, 439)
c2 = StraightLine(cont, 498, 439, 556, 368)
c3 = StraightLine(cont, 556, 368, 628, 439)
c4 = StraightLine(cont, 628, 439, 686, 368)
c5 = StraightLine(cont, 686, 368, 758, 439)
c6 = StraightLine(cont, 758, 439, 786.065, 405)
c7 = CircleInvolute(cont, 378, 287, -0.481919, 0.876216, 378.602, 425.667, 0.636585, 0.771206)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c7, 1, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
c3.weld_to(1, c4, 0, 1)
c4.weld_to(1, c5, 0, 1)
c5.weld_to(1, c6, 0, 1)
# End saved data
alpha = c2.compute_theta(.5)
cont.default_nib = (8, alpha, 30, 30)
c0.nib = c7.nib = 8
cont.cy = mordentupper.cy
return cont
downmordentupperlong = tmpfn()
def tmpfn(): # and the same with a vertical line through it
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 656, 264, 656, 466)
# End saved data
cont.default_nib = 8
# These things are stacked above the note, so they each have a
# baseline and a height rather than being vertically centred.
# Hence we must translate the other mordent sign upwards.
cont.extra = "gsave 0 -43 translate", downmordentupperlong, "grestore"
cont.cy = mordentupper.cy - 43
return cont
downmordentupperlower = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 378.602, 425.667, 426, 368)
c1 = StraightLine(cont, 426, 368, 498, 439)
c2 = StraightLine(cont, 498, 439, 556, 368)
c3 = StraightLine(cont, 556, 368, 628, 439)
c4 = StraightLine(cont, 628, 439, 686, 368)
c5 = StraightLine(cont, 686, 368, 758, 439)
c6 = StraightLine(cont, 758, 439, 786.065, 405)
c7 = StraightLine(cont, 378.602, 277, 378.602, 425.667)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c7, 1, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
c3.weld_to(1, c4, 0, 1)
c4.weld_to(1, c5, 0, 1)
c5.weld_to(1, c6, 0, 1)
# End saved data
alpha = c2.compute_theta(.5)
cont.default_nib = (8, alpha, 30, 30)
c0.nib = c7.nib = 8
cont.cy = mordentupper.cy
return cont
straightmordentupperlong = tmpfn()
def tmpfn():
# Lilypond renders this glyph as a reflection of
# upmordentupperlong, but it seems obviously preferable to me to
# render it as a rotation of downmordentupperlong, so as to get
# the mordent zigzag itself the same way round.
cont = GlyphContext()
cont.extra = "gsave 1000 1000 translate -1 -1 scale", downmordentupperlong
cont.cy = 1000 - mordentupper.cy
return cont
mordentupperlongdown = tmpfn()
def tmpfn():
# Likewise, Lilypond uses a reflection of downmordentupperlong,
# whereas I rotate upmordentupperlong.
cont = GlyphContext()
cont.extra = "gsave 1000 1000 translate -1 -1 scale", upmordentupperlong
cont.cy = 1000 - mordentupper.cy
return cont
mordentupperlongup = tmpfn()
# ----------------------------------------------------------------------
# Fermata signs.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 364, 465, 0, -1, 527, 313, 1, 0)
c1 = CircleInvolute(cont, 527, 313, 1, 0, 690, 465, 0, 1)
c0.weld_to(1, c1, 0)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: 8+18*cos(theta)**2
# Draw the dot.
cont.extra = "newpath 527 446 24 0 360 arc fill "
return cont
fermata = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 384, 465, 527, 234)
c1 = StraightLine(cont, 527, 233, 670, 465)
c0.weld_to(1, c1, 0, 1)
# End saved data
c0.nib = 8
c1.nib = lambda c,x,y,t,theta: (8, pi, min(24, t*250), 0)
# Draw the dot.
cont.extra = "newpath 527 446 24 0 360 arc fill "
return cont
fermata0 = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 384, 441, 384, 313)
c1 = StraightLine(cont, 384, 313, 670, 313)
c2 = StraightLine(cont, 670, 313, 670, 441)
c0.weld_to(1, c1, 0, 1)
c1.weld_to(1, c2, 0, 1)
# End saved data
cont.default_nib = 8, pi/2, 24, 24
# Draw the dot.
cont.extra = "newpath 527 446 24 0 360 arc fill "
return cont
fermata2 = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 424, 447, 424, 370)
c1 = StraightLine(cont, 424, 370, 630, 370)
c2 = StraightLine(cont, 630, 370, 630, 447)
c3 = StraightLine(cont, 384, 441, 384, 286)
c4 = StraightLine(cont, 384, 286, 670, 286)
c5 = StraightLine(cont, 670, 286, 670, 441)
c0.weld_to(1, c1, 0, 1)
c1.weld_to(1, c2, 0, 1)
c3.weld_to(1, c4, 0, 1)
c4.weld_to(1, c5, 0, 1)
# End saved data
c0.nib = c1.nib = c2.nib = 8, pi/2, 18, 18
c3.nib = c4.nib = c5.nib = 8, pi/2, 24, 24
# Draw the dot.
cont.extra = "newpath 527 446 24 0 360 arc fill "
return cont
fermata3 = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = '0 1000 translate 1 -1 scale', fermata
return cont
fermataup = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = '0 1000 translate 1 -1 scale', fermata0
return cont
fermata0up = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = '0 1000 translate 1 -1 scale', fermata2
return cont
fermata2up = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = '0 1000 translate 1 -1 scale', fermata3
return cont
fermata3up = tmpfn()
# ----------------------------------------------------------------------
# Parentheses to go round accidentals.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 367, 334, -0.478852, 0.877896, 367, 604, 0.478852, 0.877896)
# End saved data
c0.nib = lambda c,x,y,t,theta: 6+8*sin(pi*t)
cont.rx = c0.compute_x(0) + c0.compute_nib(0) + 10
return cont
acclparen = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 1000 0 translate -1 1 scale", acclparen, "grestore"
cont.lx = 1000 - acclparen.rx
return cont
accrparen = tmpfn()
# ----------------------------------------------------------------------
# Braces between staves.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 442, 109, -0.490261, 0.871576, 401, 692, 0.33035, 0.943858)
c1 = CircleInvolute(cont, 401, 692, 0.33035, 0.943858, 313, 994, -0.810679, 0.585491)
c0.weld_to(1, c1, 0)
# End saved data
c0.nib = lambda c,x,y,t,theta: 2+30*sin(pi/2*t)**2
c1.nib = lambda c,x,y,t,theta: 2+30*cos(pi/2*t)**2
cont.scale = 1600
cont.origin = 1000, 10
return cont
braceupper = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 442, 919, -0.490261, -0.871576, 401, 336, 0.33035, -0.943858)
c1 = CircleInvolute(cont, 401, 336, 0.33035, -0.943858, 313, 34, -0.810679, -0.585491)
c0.weld_to(1, c1, 0)
# End saved data
c0.nib = lambda c,x,y,t,theta: 2+30*sin(pi/2*t)**2
c1.nib = lambda c,x,y,t,theta: 2+30*cos(pi/2*t)**2
cont.scale = 1600
cont.origin = 1000, 2170
return cont
bracelower = tmpfn()
def tmpfn(span): # arbitrarily sized brace
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 87, 20, -0.490261, 0.871576, 64, 313, 0.33035, 0.943858)
c1 = CircleInvolute(cont, 64, 313, 0.33035, 0.943858, 20, 464, -0.810679, 0.585491)
c2 = CircleInvolute(cont, 20, 464, 0.810679, 0.585491, 64, 615, -0.33035, 0.943858)
c3 = CircleInvolute(cont, 64, 615, -0.33035, 0.943858, 87, 907, 0.490261, 0.871576)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0)
# End saved data
# We want the absolute distance between the _outer_ edges of the
# tips - i.e. the logical tip positions incremented by the
# thinnest nib width - to be equal to 'span'. The minimum nib
# width is fixed at that which would have equalled 4 under the
# scale of 1600, i.e. 4*3600/1600 = 9 in output coordinates.
# Hence we want the logical distance between the tip centres to
# be span-18.
xtop = c0.compute_y(0)
xbot = c3.compute_y(1)
cont.scale = 3600 * (xbot-xtop) / float(span-18)
# Now the maximum nib width is fixed relative to the brace
# shape, and hence is (nearly) always 16. The minimum is
# calculated from the above scale.
nibmin = 4 * cont.scale / 1600
nibmax = (8 + (32-8)*sqrt((span-525)/(4000.-525))) * cont.scale / 1600
nibdiff = nibmax - nibmin
c0.nib = lambda c,x,y,t,theta: nibmin+nibdiff*sin(pi/2*t)**2
c1.nib = lambda c,x,y,t,theta: nibmin+nibdiff*cos(pi/2*t)**2
c2.nib = lambda c,x,y,t,theta: nibmin+nibdiff*sin(pi/2*t)**2
c3.nib = lambda c,x,y,t,theta: nibmin+nibdiff*cos(pi/2*t)**2
cont.canvas_size = 105, 930
cont.trace_res = max(8, int(ceil(8*sqrt(1600.0/cont.scale))))
cont.curve_res = max(1001, int(span))
return cont
scaledbrace = tmpfn # note this is a function, not an actual GlyphContext
# Should be equivalent to 'braceupper'+'bracelower'
fixedbrace = scaledbrace(3982)
# ----------------------------------------------------------------------
# End pieces for an arbitrary-sized bracket between two staves.
def tmpfn(vwid):
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 616, 615, -0.808736, -0.588172, 407, 541, -1, 0)
c1 = StraightLine(cont, 407, 541, 407, 441)
c0.weld_to(1, c1, 0, 1)
# End saved data
if vwid < 0:
c1.nib = 0
else:
c1.nib = (4,0,vwid,0)
y0 = c0.compute_y(0)
y1 = c0.compute_y(1)
c0.nib = lambda c,x,y,t,theta: (4,pi/2,45*(y-y0)/(y1-y0),0)
cont.hy = c1.compute_y(0)
return cont
bracketlower = tmpfn(75)
bracketlowerlily = tmpfn(-1) # omit the vertical
def tmpfn(x):
cont = GlyphContext()
cont.extra = "0 946 translate 1 -1 scale", x
cont.hy = 946 - x.hy
return cont
bracketupper = tmpfn(bracketlower)
bracketupperlily = tmpfn(bracketlowerlily)
# ----------------------------------------------------------------------
# Note head indicating an artificial harmonic above another base
# note.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 526, 402, 0.526355, 0.850265, 609, 476, 0.884918, 0.465746)
c1 = CircleInvolute(cont, 609, 476, -0.850265, 0.526355, 528, 541, -0.613941, 0.789352)
c2 = CircleInvolute(cont, 528, 541, -0.526355, -0.850265, 445, 467, -0.884918, -0.465746)
c3 = CircleInvolute(cont, 445, 467, 0.850265, -0.526355, 526, 402, 0.613941, -0.789352)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c3, 1, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
# End saved data
c0.nib = c2.nib = lambda c,x,y,t,theta: (2,theta-pi/2,min(24,t*200,(1-t)*200),0)
c1.nib = c3.nib = lambda c,x,y,t,theta: (2,theta-pi/2,min(6,t*50,(1-t)*50),0)
cont.ay = c1.compute_y(0)
return cont
harmart = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 526, 402, 0.526355, 0.850265, 609, 476, 0.884918, 0.465746)
c1 = CircleInvolute(cont, 609, 476, -0.850265, 0.526355, 528, 541, -0.613941, 0.789352)
c2 = CircleInvolute(cont, 528, 541, -0.526355, -0.850265, 445, 467, -0.884918, -0.465746)
c3 = CircleInvolute(cont, 445, 467, 0.850265, -0.526355, 526, 402, 0.613941, -0.789352)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c3, 1, 1)
c1.weld_to(1, c2, 0, 1)
c2.weld_to(1, c3, 0, 1)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: ptp_nib(c,x,y,t,theta,527,472,2)
cont.ay = c1.compute_y(0)
return cont
harmartfilled = tmpfn()
# ----------------------------------------------------------------------
# Natural harmonic mark and a couple of other miscellaneous note flags.
def tmpfn():
cont = GlyphContext()
cont.extra = "newpath 527 439 40 0 360 arc 6 setlinewidth stroke "
return cont
harmnat = tmpfn()
def tmpfn(thumb):
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 500, 450, 1, 0, 537, 500, 0, 1)
c1 = CircleInvolute(cont, 537, 500, 0, 1, 500, 550, -1, 0)
c2 = CircleInvolute(cont, 500, 550, -1, 0, 463, 500, 0, -1)
c3 = CircleInvolute(cont, 463, 500, 0, -1, 500, 450, 1, 0)
c4 = StraightLine(cont, 500, 580, 500, 554)
c0.weld_to(1, c1, 0)
c0.weld_to(0, c3, 1)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: 6 + 4*sin(theta)**2
if thumb:
c4.nib = 10
else:
c4.nib = 0
cont.cy = c0.compute_y(1)
return cont
flagopen = tmpfn(0)
flagthumb = tmpfn(1)
# ----------------------------------------------------------------------
# Ditto (same as previous bar) mark.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 425, 604, 630, 339)
# End saved data
c0.nib = (4,0,40,40)
cont.extra = \
"newpath 423 397 35 0 360 arc fill " + \
"newpath 632 546 35 0 360 arc fill "
return cont
ditto = tmpfn()
# ----------------------------------------------------------------------
# Breath mark and related stuff.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 577, 341, 0.843661, 0.536875, 548, 466, -0.894427, 0.447214)
# End saved data
c0.nib = lambda c,x,y,t,theta: 4+30*cos(pi/2*t)**2
blob(c0, 0, 'l', 5, 0)
return cont
breath = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 547, 466, 587, 341)
# End saved data
c0.nib = lambda c,x,y,t,theta: 4+14*t
return cont
varbreath = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "1000 1000 translate -1 -1 scale", breath
return cont
revbreath = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "1000 1000 translate -1 -1 scale", varbreath
return cont
revvarbreath = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 400, 625, 550, 375)
c1 = StraightLine(cont, 475, 625, 625, 375)
# End saved data
cont.default_nib = 8
return cont
caesura = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 400, 625, 550-400, 375-625, 500, 375, 0, -1)
c1 = CircleInvolute(cont, 475, 625, 625-475, 375-625, 575, 375, 0, -1)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: 8+4.0*(x-c.compute_x(0))/(c.compute_x(1)-c.compute_x(0))
return cont
caesuracurved = tmpfn()
# ----------------------------------------------------------------------
# Random functional stuff like arrowheads.
def tmpfn(rotate, is_open):
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 375, 450, 0.83205, 0.5547, 500, 500, 0.977802, 0.209529)
c1 = CircleInvolute(cont, 500, 500, -0.977802, 0.209529, 375, 550, -0.83205, 0.5547)
c2 = CircleInvolute(cont, 375, 550, 0.519947, -0.854199, 375, 450, -0.519947, -0.854199)
c0.weld_to(1, c1, 0, 1)
c0.weld_to(0, c2, 1, 1)
c1.weld_to(1, c2, 0, 1)
# End saved data
if is_open:
cont.default_nib = 10
c2.nib = 0
else:
x0, y0 = c0.compute_x(0.5), c0.compute_y(1)
cont.default_nib = lambda c,x,y,t,theta: ptp_nib(c,x,y,t,theta,x0,y0,10)
if rotate:
cont.before = "500 500 translate %g rotate -500 -500 translate" % rotate
cont.cx = cont.cy = 500
cont.extent = abs(c0.compute_y(0) - cont.cy) + 6
return cont
openarrowright = tmpfn(0,1)
closearrowright = tmpfn(0,0)
openarrowleft = tmpfn(180,1)
closearrowleft = tmpfn(180,0)
openarrowup = tmpfn(270,1)
closearrowup = tmpfn(270,0)
openarrowdown = tmpfn(90,1)
closearrowdown = tmpfn(90,0)
# ----------------------------------------------------------------------
# Flat (and multiples of flat).
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 430, 236, 430, 548)
c1 = Bezier(cont, 430, 548, 481, 499, 515.999, 458, 505, 424)
c2 = CircleInvolute(cont, 505, 424, -0.307801, -0.951451, 430, 436, -0.462566, 0.886585)
c0.weld_to(1, c1, 0, 1)
c1.weld_to(1, c2, 0)
# End saved data
c0.nib = 8
x0 = c1.compute_x(0)
x1 = c1.compute_x(1)
cont.default_nib = lambda c,x,y,t,theta: 8+12*((x-x0)/(x1-x0))**2
cont.ox = c0.compute_x(0.5)
cont.hy = 469 # no sensible way to specify this except manually
return cont
flat = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 430, 236, 430, 548)
c1 = Bezier(cont, 430, 548, 481, 499, 515.999, 458, 505, 424)
c2 = CircleInvolute(cont, 505, 424, -0.307801, -0.951451, 430, 436, -0.462566, 0.886585)
c0.weld_to(1, c1, 0, 1)
c1.weld_to(1, c2, 0)
# End saved data
c0.nib = 8
x0 = c1.compute_x(0)
x1 = c1.compute_x(1)
cont.default_nib = lambda c,x,y,t,theta: 8+12*((x-x0)/(x1-x0))**2
cont.ox = c0.compute_x(0.5)
cont.hy = 469 # no sensible way to specify this except manually
cont.extra = "gsave 430 236 16 add translate 0.7 dup scale -500 dup 150 sub translate", closearrowup, "grestore"
return cont
flatup = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 430, 236, 430, 568)
c1 = Bezier(cont, 430, 548, 481, 499, 515.999, 458, 505, 424)
c2 = CircleInvolute(cont, 505, 424, -0.307801, -0.951451, 430, 436, -0.462566, 0.886585)
c1.weld_to(1, c2, 0)
# End saved data
c0.nib = 8
x0 = c1.compute_x(0)
x1 = c1.compute_x(1)
cont.default_nib = lambda c,x,y,t,theta: 8+12*((x-x0)/(x1-x0))**2
cont.ox = c0.compute_x(0.5)
cont.hy = 469 # no sensible way to specify this except manually
cont.extra = "gsave 430 568 16 sub translate 0.7 dup scale -500 dup 150 add translate", closearrowdown, "grestore"
return cont
flatdn = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 430, 236, 430, 568)
c1 = Bezier(cont, 430, 548, 481, 499, 515.999, 458, 505, 424)
c2 = CircleInvolute(cont, 505, 424, -0.307801, -0.951451, 430, 436, -0.462566, 0.886585)
c1.weld_to(1, c2, 0)
# End saved data
c0.nib = 8
x0 = c1.compute_x(0)
x1 = c1.compute_x(1)
cont.default_nib = lambda c,x,y,t,theta: 8+12*((x-x0)/(x1-x0))**2
cont.ox = c0.compute_x(0.5)
cont.hy = 469 # no sensible way to specify this except manually
cont.extra = flatup.extra + flatdn.extra
return cont
flatupdn = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = flat, "gsave -90 0 translate", flat, "grestore"
cont.ox = flat.ox - 90
cont.hy = flat.hy
return cont
doubleflat = tmpfn()
def tmpfn():
cont = GlyphContext()
reflectpt = flat.ox - 20
cont.extra = "gsave %g 0 translate -1 1 scale" % (2*reflectpt), \
flat, "grestore"
cont.hy = flat.hy
return cont
semiflat = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = flat, semiflat
cont.hy = flat.hy
return cont
sesquiflat = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 580 380 translate 0.5 dup scale -580 -380 translate", flat, "grestore"
return cont
smallflat = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 370, 363, 490, 303)
# End saved data
c0.nib = 8
cont.ox = flat.ox
cont.hy = flat.hy
cont.extra = flat
return cont
flatslash = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 372, 373, 490, 333)
c1 = StraightLine(cont, 372, 313, 490, 273)
# End saved data
c0.nib = c1.nib = 8
cont.ox = flat.ox
cont.hy = flat.hy
cont.extra = flat
return cont
flatslash2 = tmpfn()
def tmpfn():
cont = GlyphContext()
reflectpt = flat.ox - 20
cont.extra = "gsave %g 0 translate -1 1 scale" % (2*reflectpt), \
flatslash, "grestore"
cont.hy = flatslash.hy
return cont
semiflatslash = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 282, 361, 490, 281)
# End saved data
c0.nib = 8
cont.ox = doubleflat.ox
cont.hy = doubleflat.hy
cont.extra = doubleflat
return cont
doubleflatslash = tmpfn()
# ----------------------------------------------------------------------
# Natural.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 519, 622, 519, 399)
c1 = StraightLine(cont, 519, 399, 442, 418)
c2 = StraightLine(cont, 442, 318, 442, 539)
c3 = StraightLine(cont, 442, 539, 519, 520)
c0.weld_to(1, c1, 0, 1)
c2.weld_to(1, c3, 0, 1)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.cy = (c0.compute_y(0) + c2.compute_y(0)) / 2.0
return cont
natural = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 519, 622, 519, 399)
c1 = StraightLine(cont, 519, 399, 442, 418)
c2 = StraightLine(cont, 442, 318, 442, 539)
c3 = StraightLine(cont, 442, 539, 519, 520)
c0.weld_to(1, c1, 0, 1)
c2.weld_to(1, c3, 0, 1)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.extra = "gsave 442 318 translate 0.7 dup scale -500 dup 150 sub translate", closearrowup, "grestore"
cont.cy = natural.cy
return cont
naturalup = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 519, 622, 519, 399)
c1 = StraightLine(cont, 519, 399, 442, 418)
c2 = StraightLine(cont, 442, 318, 442, 539)
c3 = StraightLine(cont, 442, 539, 519, 520)
c0.weld_to(1, c1, 0, 1)
c2.weld_to(1, c3, 0, 1)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.extra = "gsave 519 622 translate 0.7 dup scale -500 dup 150 add translate", closearrowdown, "grestore"
cont.cy = natural.cy
return cont
naturaldn = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 519, 622, 519, 399)
c1 = StraightLine(cont, 519, 399, 442, 418)
c2 = StraightLine(cont, 442, 318, 442, 539)
c3 = StraightLine(cont, 442, 539, 519, 520)
c0.weld_to(1, c1, 0, 1)
c2.weld_to(1, c3, 0, 1)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.extra = naturalup.extra + naturaldn.extra
cont.cy = natural.cy
return cont
naturalupdn = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 580 280 translate 0.5 dup scale -580 -280 translate", natural, "grestore"
return cont
smallnatural = tmpfn()
# ----------------------------------------------------------------------
# Sharp.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 442, 306, 442, 652)
c1 = StraightLine(cont, 493, 291, 493, 637)
c2 = StraightLine(cont, 413, 419, 523, 392)
c3 = StraightLine(cont, 413, 551, 523, 524)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.cy = (c2.compute_y(0) + c3.compute_y(1))/2.0
return cont
sharp = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 442, 306, 442, 652)
c1 = StraightLine(cont, 493, 271, 493, 637)
c2 = StraightLine(cont, 413, 419, 523, 392)
c3 = StraightLine(cont, 413, 551, 523, 524)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.extra = "gsave 493 271 translate 0.7 dup scale -500 dup 150 sub translate", closearrowup, "grestore"
cont.cy = sharp.cy
return cont
sharpup = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 442, 306, 442, 672)
c1 = StraightLine(cont, 493, 291, 493, 637)
c2 = StraightLine(cont, 413, 419, 523, 392)
c3 = StraightLine(cont, 413, 551, 523, 524)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.extra = "gsave 442 672 translate 0.7 dup scale -500 dup 150 add translate", closearrowdown, "grestore"
cont.cy = sharp.cy
return cont
sharpdn = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 442, 306, 442, 672)
c1 = StraightLine(cont, 493, 271, 493, 637)
c2 = StraightLine(cont, 413, 419, 523, 392)
c3 = StraightLine(cont, 413, 551, 523, 524)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.extra = sharpup.extra + sharpdn.extra
cont.cy = sharp.cy
return cont
sharpupdn = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "gsave 580 280 translate 0.5 dup scale -580 -280 translate", sharp, "grestore"
return cont
smallsharp = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 442, 306, 442, 652)
c1 = StraightLine(cont, 413, 421, 472, 401.518)
c2 = StraightLine(cont, 413, 555, 472, 533.981)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
return cont
semisharp = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 442, 300.351, 442, 646.351)
c1 = StraightLine(cont, 493, 291, 493, 637)
c2 = StraightLine(cont, 544, 281.649, 544, 627.649)
c3 = StraightLine(cont, 413, 414, 574, 384.481)
c4 = StraightLine(cont, 413, 547, 574, 517.481)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
return cont
sesquisharp = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 442, 306, 442, 652)
c1 = StraightLine(cont, 493, 291, 493, 637)
c2 = StraightLine(cont, 413, 397, 523, 370)
c3 = StraightLine(cont, 413, 573, 523, 546)
c4 = StraightLine(cont, 401, 487.945, 535, 455.055)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.cy = (c2.compute_y(0) + c3.compute_y(1))/2.0
return cont
sharp3 = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 442, 306, 442, 652)
c1 = StraightLine(cont, 413, 399, 472, 379.518)
c2 = StraightLine(cont, 413, 577, 472, 555.981)
c3 = StraightLine(cont, 400.5, 492.703, 483.5, 465.297)
# End saved data
cont.default_nib = (8, pi/2, 16, 16)
cont.cy = (c2.compute_y(0) + c3.compute_y(1))/2.0
return cont
semisharp3 = tmpfn()
# ----------------------------------------------------------------------
# Double sharp.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 409, 426, 504, 521)
c1 = StraightLine(cont, 409, 521, 504, 426)
# End saved data
cont.default_nib = 8
# Blobs at the ends of the lines.
cont.extra = \
"/square { gsave 3 1 roll translate newpath dup dup moveto dup neg dup neg lineto dup neg dup lineto dup neg lineto closepath fill grestore } def " + \
"newpath 409 426 24 square " + \
"newpath 409 521 24 square " + \
"newpath 504 426 24 square " + \
"newpath 504 521 24 square "
return cont
doublesharp = tmpfn()
# ----------------------------------------------------------------------
# Arpeggio mark and friends.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = Bezier(cont, 491, 334, 516, 359, 516, 378, 491, 403)
c1 = Bezier(cont, 491, 403, 466, 428, 466, 447, 491, 472)
c2 = Bezier(cont, 491, 472, 516, 497, 516, 516, 491, 541)
c3 = Bezier(cont, 491, 541, 466, 566, 466, 585, 491, 610)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: 4+14*abs(cos(theta + 3*pi/4))**1.5
return cont
arpeggio = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = Bezier(cont, 491, 334, 516, 359, 516, 378, 491, 403)
c1 = Bezier(cont, 491, 403, 466, 428, 466, 447, 491, 472)
c0.weld_to(1, c1, 0)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: 4+14*abs(cos(theta + 3*pi/4))**1.5
cont.ty = c0.compute_y(0)
cont.oy = c1.compute_y(1)
cont.lx = c0.compute_x(0) - closearrowdown.extent
cont.rx = c0.compute_x(0) + closearrowdown.extent
return cont
arpeggioshort = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = Bezier(cont, 491, 334, 516, 359, 491, 370, 491, 403)
# End saved data
cont.default_nib = lambda c,x,y,t,theta: 4+16*t*(1-t)
cont.extra = "-9 0 translate", closearrowdown
cont.lx = arpeggioshort.lx
cont.rx = arpeggioshort.rx
cont.ey = c0.compute_y(0)
return cont
arpeggioarrowdown = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "1000 1000 translate -1 -1 scale", arpeggioarrowdown
cont.ey = 1000 - arpeggioarrowdown.ey
cont.lx = 1000 - arpeggioshort.rx
cont.rx = 1000 - arpeggioshort.lx
return cont
arpeggioarrowup = tmpfn()
def tmpfn():
# Rotate the arpeggio mark by 90 degrees and use it as the wavy
# line after 'tr' to indicate an extended trill.
cont = GlyphContext()
cont.extra = "500 500 translate -90 rotate -500 -500 translate", \
arpeggioshort
cont.lx = arpeggioshort.ty
cont.rx = arpeggioshort.oy
return cont
trillwiggle = tmpfn()
# ----------------------------------------------------------------------
# Downbow and upbow marks.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 447, 430, 447, 330)
c1 = StraightLine(cont, 447, 330, 608, 330)
c2 = StraightLine(cont, 608, 330, 608, 430)
c0.weld_to(1, c1, 0, 1)
c1.weld_to(1, c2, 0, 1)
# End saved data
cont.default_nib = 8, pi/2, 35, 35
return cont
bowdown = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 475, 256, 535, 460)
c1 = StraightLine(cont, 535, 460, 595, 256)
c0.weld_to(1, c1, 0, 1)
# End saved data
c0.nib = lambda c,x,y,t,theta: (6, 0, min(25, (1-t)*100), 0)
c1.nib = 6
return cont
bowup = tmpfn()
# ----------------------------------------------------------------------
# Sforzando / marcato is an inverted upbow mark.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 475, 460, 535, 256)
c1 = StraightLine(cont, 535, 256, 595, 460)
c0.weld_to(1, c1, 0, 1)
# End saved data
c0.nib = 6
c1.nib = lambda c,x,y,t,theta: (6, pi, min(25, t*100), 0)
return cont
sforzando = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "1000 1000 translate -1 -1 scale", sforzando
return cont
sforzandodn = tmpfn()
# ----------------------------------------------------------------------
# Repeat mark (just a pair of dots).
def tmpfn():
cont = GlyphContext()
cont.extra = \
"newpath 561 401 32 0 360 arc fill " + \
"newpath 561 542 32 0 360 arc fill "
return cont
repeatmarks = tmpfn()
# ----------------------------------------------------------------------
# Grace notes.
def tmpfn():
cont = GlyphContext()
cont.extra = [
"gsave 495 472 translate 0.45 dup scale -527 -472 translate",
"gsave 602.346 452.748 -450 add translate -535 -465 translate",
tailquaverup,
"grestore",
headcrotchet,
"newpath 602.346 452.748 moveto 0 -450 rlineto 16 setlinewidth stroke",
"grestore",
]
return cont
appoggiatura = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = StraightLine(cont, 502, 394, 601, 327)
# End saved data
c0.nib = 6
cont.ox = 532
cont.oy = 261
return cont
accslashup = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = appoggiatura, accslashup
return cont
acciaccatura = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "-500 0 translate 1 .45 div dup scale", accslashup
cont.ox = -500 + accslashup.ox / .45
cont.oy = accslashup.oy / .45
return cont
accslashbigup = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = '0 1000 translate 1 -1 scale', accslashbigup
cont.ox = accslashbigup.ox
cont.oy = 1000 - accslashbigup.oy
return cont
accslashbigdn = tmpfn()
# ----------------------------------------------------------------------
# Piano pedal marks.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 340, 451, 0.039968, 0.999201, 293, 487, -0.664364, -0.747409)
c1 = CircleInvolute(cont, 293, 487, -0.664364, -0.747409, 399, 373, 1, 0)
c2 = CircleInvolute(cont, 399, 373, 1, 0, 472, 451, -0.485643, 0.874157)
c3 = CircleInvolute(cont, 472, 451, -0.485643, 0.874157, 421, 441, -0.164399, -0.986394)
c4 = Bezier(cont, 395, 376, 374.611, 410.556, 351.98, 449.02, 388.876, 485.371)
c5 = Bezier(cont, 388.876, 485.371, 428.041, 523.958, 366, 586, 331.736, 624.799)
c6 = CircleInvolute(cont, 331.736, 624.799, 0.225579, -0.974225, 440, 613.5, 0.464007, 0.885832)
c7 = StraightLine(cont, 440, 613.5, 482, 580)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
c4.weld_to(1, c5, 0)
c5.weld_to(1, c6, 0, 1)
c6.weld_to(1, c7, 0, 1)
# End saved data
cont.default_nib = 6
# Construct a quintic which is 0 with derivative 0 at both 0 and
# 1, and 1 with derivative 0 at m. Second derivative at 0 is
# non-negative iff m <= 0.6, so we require 0.4 <= m <= 0.6 for
# the values on [0,1] to be contained within [0,1].
def quintic(m,x):
denom = m*m*m*(-1+m*(3+m*(-3+m)))
a = (2-4*m)/denom
b = (-4+m*(5+m*5))/denom
c = (2+m*(2+m*-10))/denom
d = (m*(-3+m*5))/denom
return x*x*(d+x*(c+x*(b+x*a)))
def shift(theta, phi):
return (theta-(phi+pi)) % (2*pi) + (phi+pi)
shift01 = 3*pi/4
end0 = shift(c0.compute_theta(0),shift01)
end1 = shift(c1.compute_theta(1),shift01)
c0.nib = c1.nib = lambda c,x,y,t,theta: 6+10*quintic(0.4, (shift(theta, shift01)-end0)/(end1-end0))
shift23 = -3*pi/4
end2 = shift(c2.compute_theta(0),shift23)
end3 = shift(c3.compute_theta(1),shift23)
c2.nib = c3.nib = lambda c,x,y,t,theta: 6+10*quintic(0.6, (shift(theta, shift23)-end2)/(end3-end2))
theta45 = (c4.compute_theta(0) + c5.compute_theta(1))/2
c4.nib = c5.nib = lambda c,x,y,t,theta: 6 + 15*sin(theta-theta45)**2
theta7 = c7.compute_theta(0)
c6.nib = lambda c,x,y,t,theta: (6, theta7, 18*t**2, 18*t**2)
cont.by = c5.compute_y(1) + c5.compute_nib(1)
return cont
pedP = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 482, 580, 0.786318, -0.617822, 533, 541, 0.804176, -0.594391)
c1 = CircleInvolute(cont, 533, 541, 0.804176, -0.594391, 520, 496, -1, 0)
c2 = CircleInvolute(cont, 520, 496, -1, 0, 495, 604, 0.485643, 0.874157)
c3 = CircleInvolute(cont, 495, 604, 0.485643, 0.874157, 571, 591, 0.581238, -0.813733)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
# End saved data
theta0 = c0.compute_theta(0)
theta1 = c3.compute_theta(1)
c0.nib = 6, theta0, 7, 0 # avoid running over left edge of e
c1.nib = 6, theta0, 7, 7 # avoid running over left edge of e
c2.nib = lambda c,x,y,t,theta: (6, theta0, 7+3*t, 7+3*t)
c3.nib = lambda c,x,y,t,theta: (6, (theta0+t*(theta1-theta0)), 10, 10)
cont.by = pedP.by
return cont
pede = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 638, 484, -0.91707, 0.398726, 580, 567, 0, 1)
c1 = CircleInvolute(cont, 580, 567, 0, 1, 623, 625, 1, 0)
c2 = CircleInvolute(cont, 623, 625, 1, -0, 664, 527, -0.304776, -0.952424)
c3 = CircleInvolute(cont, 664, 527, -0.304776, -0.952424, 514, 410, -0.980581, -0.196116)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
c2.weld_to(1, c3, 0)
# End saved data
theta0 = -pi
theta1 = 0
theta2 = +pi
c0.nib = c1.nib = lambda c,x,y,t,theta: 6+8*sin(pi*(theta-theta0)/(theta1-theta0))**2
c2.nib = c3.nib = lambda c,x,y,t,theta: 6+12*sin(pi*(theta-theta1)/(theta2-theta1))**2
cont.by = pedP.by
return cont
pedd = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = "newpath 708 611 20 0 360 arc fill "
cont.by = pedP.by
return cont
peddot = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = pedP, pede, pedd
cont.by = pedP.by
return cont
pedPed = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = pedP, pede, pedd, peddot
cont.by = pedP.by
return cont
pedPeddot = tmpfn()
# The pedal-up asterisk is drawn by drawing a single curved edge and
# repeating it around the circle eight times.
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = CircleInvolute(cont, 411, 448, 0.92388, -0.382683, 425, 425, 0, -1)
c1 = CircleInvolute(cont, 425, 425, 0, -1, 413, 405, -0.747409, -0.664364)
c2 = CircleInvolute(cont, 413, 405, -0.747409, -0.664364, 425, 373, 1, 0)
c0.weld_to(1, c1, 0)
c1.weld_to(1, c2, 0)
# End saved data
x0 = c0.compute_x(1)
cont.default_nib = lambda c,x,y,t,theta: (6, 0, 2*(x0-x), 0)
cont.cx = x0
cont.cy = c0.compute_y(0) + (x0 - c0.compute_x(0)) / tan(pi/8)
cont.r = sqrt((c0.compute_x(0) - cont.cx)**2 + (c0.compute_y(0) - cont.cy)**2)
return cont
pedstarcomponent = tmpfn()
def tmpfn():
cont = GlyphContext()
cx, cy, r = pedstarcomponent.cx, pedstarcomponent.cy, pedstarcomponent.r
cont.extra = "8 {", pedstarcomponent, \
"%g %g translate 45 rotate %g %g translate } repeat" % (cx,cy, -cx,-cy) + \
" newpath %g %g %g 0 360 arc closepath 12 setlinewidth stroke" % (cx,cy, r-5)
cont.by = pedP.by
return cont
pedstar = tmpfn()
def tmpfn():
cont = GlyphContext()
# Saved data from gui.py
c0 = Bezier(cont, 463, 538, 493, 518, 540, 544, 570, 524)
# End saved data
c0.nib = lambda c,x,y,t,theta: (4, pi/3, 18, 18)
cont.by = pedP.by
return cont
peddash = tmpfn()
# ----------------------------------------------------------------------
# Some note flags I don't really understand, but which Lilypond's
# font supports so I must too.
def tmpfn():
cont = GlyphContext()
cont.extra = \
"newpath 450 420 moveto 500 500 50 180 0 arcn 550 420 lineto " + \
"16 setlinewidth 1 setlinecap stroke"
cont.cy = 500
return cont
upedalheel = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = \
"newpath 450 580 moveto 500 500 50 180 0 arc 550 580 lineto " + \
"16 setlinewidth 1 setlinecap stroke"
cont.cy = 500
return cont
dpedalheel = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = \
"newpath 450 420 moveto 500 550 lineto 550 420 lineto " + \
"16 setlinewidth 1 setlinecap 1 setlinejoin stroke"
cont.cy = 500
return cont
upedaltoe = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.extra = \
"newpath 450 580 moveto 500 450 lineto 550 580 lineto " + \
"16 setlinewidth 1 setlinecap 1 setlinejoin stroke"
cont.cy = 500
return cont
dpedaltoe = tmpfn()
# ----------------------------------------------------------------------
# Accordion-specific markings.
def tmpfn(n):
cont = GlyphContext()
cont.scale = 1440 # make life easier: one stave space is now 100px
r = 50*n
cont.extra = "newpath 500 500 %g 0 360 arc " % r
for i in range(1,n):
y = 100*i - r
x = sqrt(r*r - y*y)
cont.extra = cont.extra + "%g %g moveto %g %g lineto " % (500-x, 500+y, 500+x, 500+y)
cont.extra = cont.extra + "8 setlinewidth stroke"
return cont
acc2 = tmpfn(2)
acc3 = tmpfn(3)
acc4 = tmpfn(4)
def tmpfn(w,h):
cont = GlyphContext()
cont.scale = 1440 # make life easier: one stave space is now 100px
ww = 50*w
hh = 50*h
cont.extra = ("newpath %g %g moveto %g %g lineto " + \
"%g %g lineto %g %g lineto closepath ") % \
(500-ww,500-hh,500+ww,500-hh,500+ww,500+hh,500-ww,500+hh)
for i in range(1,h):
y = 100*i - hh
cont.extra = cont.extra + "%g %g moveto %g %g lineto " % (500-ww, 500+y, 500+ww, 500+y)
cont.extra = cont.extra + "8 setlinewidth stroke"
return cont
accr = tmpfn(2,3)
def tmpfn():
cont = GlyphContext()
cont.scale = 1440 # make life easier: one stave space is now 100px
cont.extra = "newpath 500 500 25 0 360 arc fill "
return cont
accdot = tmpfn()
def tmpfn():
cont = GlyphContext()
cont.scale = 1440 # make life easier: one stave space is now 100px
cont.extra = "500 500 translate " + \
"newpath 0 0 100 0 360 arc 8 setlinewidth stroke " + \
"8 { " + \
" newpath 0 65 20 0 360 arc fill " + \
" newpath -9 65 moveto 9 65 lineto 4 0 lineto -4 0 lineto fill" + \
" " + \
" 45 rotate" + \
"} repeat"
return cont
accstar = tmpfn()
# ----------------------------------------------------------------------
# A blank glyph!
def tmpfn():
cont = GlyphContext()
cont.lx = 500
cont.rx = 600
cont.by = 500
cont.ty = 600
return cont
blank = tmpfn()
# ----------------------------------------------------------------------
# End of actual glyph definitions. Now for the output layer.
verstring = "version unavailable"
lilyglyphlist = [
("accent", "scripts.sforzato", 0, 0.5,0.5, 1,0.5),
("espressivo", "scripts.espr", 0, 0.5,0.5, 1,0.5),
("accslashbigup", "flags.ugrace", 0, 'ox','oy', 1,'oy'),
("accslashbigdn", "flags.dgrace", 0, 'ox','oy', 1,'oy'),
("acclparen", "accidentals.leftparen", 0, 1,0.5, 1,0.5, {"x1":"rx"}),
("accrparen", "accidentals.rightparen", 0, 0,0.5, 1,0.5, {"x0":"lx"}),
("arpeggioshort", "scripts.arpeggio", 0, 0,'oy', 1,'oy', {"x0":"lx","x1":"rx","y0":"oy","y1":"ty"}),
("arpeggioarrowdown", "scripts.arpeggio.arrow.M1", 0, 0,0, 1,0, {"x0":"lx","x1":"rx","y1":"ey"}),
("arpeggioarrowup", "scripts.arpeggio.arrow.1", 0, 0,0, 1,0, {"x0":"lx","x1":"rx","y0":"ey"}),
("trillwiggle", "scripts.trill_element", 0, 'lx',0, 1,0, {"x0":"lx", "x1":"rx"}),
# Irritatingly, we have to put the digits' baselines at the
# glitch (see below) rather than at the real baseline.
("big0", "zero", 0x0030, 0,'gy', 1,'gy'),
("big1", "one", 0x0031, 0,'gy', 1,'gy'),
("big2", "two", 0x0032, 0,'gy', 1,'gy'),
("big3", "three", 0x0033, 0,'gy', 1,'gy'),
("big4", "four", 0x0034, 0,'gy', 1,'gy'),
("big5", "five", 0x0035, 0,'gy', 1,'gy'),
("big6", "six", 0x0036, 0,'gy', 1,'gy'),
("big7", "seven", 0x0037, 0,'gy', 1,'gy'),
("big8", "eight", 0x0038, 0,'gy', 1,'gy'),
("big9", "nine", 0x0039, 0,'gy', 1,'gy'),
("asciiperiod", "period", 0x002e, 0,'gy', 1,'gy'),
("asciicomma", "comma", 0x002c, 0,'gy', 1,'gy'),
("asciiplus", "plus", 0x002b, 0,'gy', 1,'gy'),
("asciiminus", "hyphen", 0x002d, 0,'gy', 1,'gy'),
("bowdown", "scripts.downbow", 0, 0.5,0, 1,0),
("bowup", "scripts.upbow", 0, 0.5,0, 1,0),
("bracketlowerlily", "brackettips.down", 0, 0,'hy', 1,'hy'),
("bracketupperlily", "brackettips.up", 0, 0,'hy', 1,'hy'),
("breath", "scripts.rcomma", 0, 0,0.5, 1,0.5),
("revbreath", "scripts.lcomma", 0, 0,0.5, 1,0.5),
("varbreath", "scripts.rvarcomma", 0, 0.5,0.5, 1,0.5),
("revvarbreath", "scripts.lvarcomma", 0, 0.5,0.5, 1,0.5),
("caesura", "scripts.caesura", 0, 0,0.4, 1,0.4),
("caesura", "scripts.caesura.straight", 0, 0,0.4, 1,0.4),
("caesuracurved", "scripts.caesura.curved", 0, 0,0.4, 1,0.4),
("breve", "noteheads.sM1", 0, 0,0.5, 1,0.5),
("clefC", "clefs.C", 0, 0,0.5, 1,0.5),
("clefF", "clefs.F", 0, 0,'hy', 1,'hy'),
("clefG", "clefs.G", 0, 0,'hy', 1,'hy'),
("clefTAB", "clefs.tab", 0, 0,'hy', 1,'hy'),
("clefperc", "clefs.percussion", 0, 0,0.5, 1,0.5),
("clefCsmall", "clefs.C_change", 0, 0,0.5, 1,0.5),
("clefFsmall", "clefs.F_change", 0, 0,'hy', 1,'hy'),
("clefGsmall", "clefs.G_change", 0, 0,'hy', 1,'hy'),
("clefTABsmall", "clefs.tab_change", 0, 0,'hy', 1,'hy'),
("clefpercsmall", "clefs.percussion_change", 0, 0,0.5, 1,0.5),
("coda", "scripts.coda", 0, 0.5,0.5, 1,0.5),
("varcoda", "scripts.varcoda", 0, 0.5,0.5, 1,0.5),
("dynamicf", "f", 0x0066, 'lx','by', 'rx','by', {"x0":"lx", "x1":"rx", "xw":"rx"}),
("dynamicm", "m", 0x006d, 'lx','by', 'rx','by', {"x0":"lx", "x1":"rx", "xw":"rx"}),
("dynamicp", "p", 0x0070, 'lx','by', 'rx','by', {"x0":"lx", "x1":"rx", "xw":"rx"}),
("dynamicr", "r", 0x0072, 'lx','by', 'rx','by', {"x0":"lx", "x1":"rx", "xw":"rx"}),
("dynamics", "s", 0x0073, 'lx','by', 'rx','by', {"x0":"lx", "x1":"rx", "xw":"rx"}),
("dynamicz", "z", 0x007a, 'lx','by', 'rx','by', {"x0":"lx", "x1":"rx", "xw":"rx"}),
("blank", "space", 0x0020, 'lx','by', 'rx','by', {"x0":"lx", "x1":"rx", "y0":"by", "y1":"ty", "xw":"rx"}),
("fermata", "scripts.ufermata", 0, 0.5,0, 1,0),
("fermata0", "scripts.ushortfermata", 0, 0.5,0, 1,0),
("fermata2", "scripts.ulongfermata", 0, 0.5,0, 1,0),
("fermata3", "scripts.uverylongfermata", 0, 0.5,0, 1,0),
("fermataup", "scripts.dfermata", 0, 0.5,1, 1,1),
("fermata0up", "scripts.dshortfermata", 0, 0.5,1, 1,1),
("fermata2up", "scripts.dlongfermata", 0, 0.5,1, 1,1),
("fermata3up", "scripts.dverylongfermata", 0, 0.5,1, 1,1),
("semiflat", "accidentals.M1", 0, 0,'hy', 1,'hy'),
("semiflat", "accidentals.mirroredflat", 0, 0,'hy', 1,'hy'),
("semiflatslash", "accidentals.mirroredflat.backslash", 0, 0,'hy', 1,'hy'),
("flat", "accidentals.M2", 0, 'ox','hy', 1,'hy'),
("flat", "accidentals.flat", 0, 'ox','hy', 1,'hy'),
("flatup", "accidentals.flat.arrowup", 0, 'ox','hy', 1,'hy'),
("flatdn", "accidentals.flat.arrowdown", 0, 'ox','hy', 1,'hy'),
("flatupdn", "accidentals.flat.arrowboth", 0, 'ox','hy', 1,'hy'),
("flatslash", "accidentals.flat.slash", 0, 'ox','hy', 1,'hy'),
("flatslash2", "accidentals.flat.slashslash", 0, 'ox','hy', 1,'hy'),
("sesquiflat", "accidentals.M3", 0, 0,'hy', 1,'hy'),
("sesquiflat", "accidentals.mirroredflat.flat", 0, 0,'hy', 1,'hy'),
("doubleflat", "accidentals.M4", 0, 'ox','hy', 1,'hy'),
("doubleflat", "accidentals.flatflat", 0, 'ox','hy', 1,'hy'),
("doubleflatslash", "accidentals.flatflat.slash", 0, 'ox','hy', 1,'hy'),
("harmart", "noteheads.s0harmonic", 0, 0,0.5, 1,'ay'),
("harmartfilled", "noteheads.s2harmonic", 0, 0,0.5, 1,'ay'),
("harmnat", "scripts.flageolet", 0, 0.5,0.5, 1,0.5),
("flagopen", "scripts.open", 0, 0.5,'cy', 1,'cy'),
("flagthumb", "scripts.thumb", 0, 0.5,'cy', 1,'cy'),
("headcrotchet", "noteheads.s2", 0, 0,0.5, 1,'ay'),
("headminim", "noteheads.s1", 0, 0,0.5, 1,'ay'),
("legato", "scripts.tenuto", 0, 0.5,0.5, 1,0.5),
("portatoup", "scripts.uportato", 0, 0.5,'ly', 1,'ly'),
("portatodn", "scripts.dportato", 0, 0.5,'ly', 1,'ly'),
("mordentlower", "scripts.mordent", 0, 0.5,'cy', 1,'cy'),
("mordentupper", "scripts.prall", 0, 0.5,'cy', 1,'cy'),
("mordentupperlong", "scripts.prallprall", 0, 0.5,'cy', 1,'cy'),
("mordentupperlower", "scripts.prallmordent", 0, 0.5,'cy', 1,'cy'),
("upmordentupperlong", "scripts.upprall", 0, 0.5,'cy', 1,'cy'),
("upmordentupperlower", "scripts.upmordent", 0, 0.5,'cy', 1,'cy'),
("mordentupperlongdown", "scripts.pralldown", 0, 0.5,'cy', 1,'cy'),
("downmordentupperlong", "scripts.downprall", 0, 0.5,'cy', 1,'cy'),
("downmordentupperlower", "scripts.downmordent", 0, 0.5,'cy', 1,'cy'),
("mordentupperlongup", "scripts.prallup", 0, 0.5,'cy', 1,'cy'),
("straightmordentupperlong", "scripts.lineprall", 0, 0.5,'cy', 1,'cy'),
("natural", "accidentals.0", 0, 0,'cy', 1,'cy'),
("natural", "accidentals.natural", 0, 0,'cy', 1,'cy'),
("naturalup", "accidentals.natural.arrowup", 0, 0,'cy', 1,'cy'),
("naturaldn", "accidentals.natural.arrowdown", 0, 0,'cy', 1,'cy'),
("naturalupdn", "accidentals.natural.arrowboth", 0, 0,'cy', 1,'cy'),
("peddot", "pedal..", 0, 0,'by', 1,'by'),
("pedP", "pedal.P", 0, 0,'by', 1,'by'),
("pedd", "pedal.d", 0, 0,'by', 1,'by'),
("pede", "pedal.e", 0, 0,'by', 1,'by'),
("pedPed", "pedal.Ped", 0, 0,'by', 1,'by'),
("pedstar", "pedal.*", 0, 0,'by', 1,'by'),
("peddash", "pedal.M", 0, 0,'by', 1,'by'),
("restdbllonga", "rests.M3", 0, 0,0.5, 1,0.5),
("restlonga", "rests.M2", 0, 0,0.5, 1,0.5),
("restbreve", "rests.M1", 0, 0,0, 1,0),
("restcrotchet", "rests.2", 0, 0,0.5, 1,0.5),
("restcrotchetx", "rests.2classical", 0, 0,0.5, 1,0.5),
("restdemi", "rests.5", 0, 0,'cy', 1,'cy'),
("resthemi", "rests.6", 0, 0,'cy', 1,'cy'),
("restquasi", "rests.7", 0, 0,'cy', 1,'cy'),
("restminim", "rests.1", 0, 0,0, 1,0),
("restminimo", "rests.1o", 0, 0,'oy', 1,'oy'),
("restquaver", "rests.3", 0, 0,'cy', 1,'cy'),
("restsemi", "rests.4", 0, 0,'cy', 1,'cy'),
("restminim", "rests.0", 0, 0,1, 1,1), # reuse restminim as semibreve rest
("restsemibreveo", "rests.0o", 0, 0,'oy', 1,'oy'),
("segno", "scripts.segno", 0, 0.5,0.5, 1,0.5),
("semibreve", "noteheads.s0", 0, 0,0.5, 1,0.5),
("sforzando", "scripts.umarcato", 0, 0.5,0, 1,0),
("sforzandodn", "scripts.dmarcato", 0, 0.5,1, 1,1),
("semisharp", "accidentals.1", 0, 0,0.5, 1,0.5),
("semisharp", "accidentals.sharp.slashslash.stem", 0, 0,0.5, 1,0.5),
("semisharp3", "accidentals.sharp.slashslashslash.stem", 0, 0,0.5, 1,0.5),
("sharp", "accidentals.2", 0, 0,'cy', 1,'cy'),
("sharp", "accidentals.sharp", 0, 0,'cy', 1,'cy'),
("sharp3", "accidentals.sharp.slashslashslash.stemstem", 0, 0,'cy', 1,'cy'),
("sharpup", "accidentals.sharp.arrowup", 0, 0,'cy', 1,'cy'),
("sharpdn", "accidentals.sharp.arrowdown", 0, 0,'cy', 1,'cy'),
("sharpupdn", "accidentals.sharp.arrowboth", 0, 0,'cy', 1,'cy'),
("sesquisharp", "accidentals.3", 0, 0,0.5, 1,0.5),
("sesquisharp", "accidentals.sharp.slashslash.stemstemstem", 0, 0,0.5, 1,0.5),
("doublesharp", "accidentals.4", 0, 0,0.5, 1,0.5),
("doublesharp", "accidentals.doublesharp", 0, 0,0.5, 1,0.5),
("staccatissup", "scripts.dstaccatissimo", 0, 0.5,1, 1,1),
("staccatissdn", "scripts.ustaccatissimo", 0, 0.5,0, 1,0),
("staccato", "scripts.staccato", 0, 0.5,0.5, 1,0.5),
("staccato", "dots.dot", 0, 0,0.5, 1,0.5),
("snappizz", "scripts.snappizzicato", 0, 0.5,0.5, 1,0.5),
("stopping", "scripts.stopped", 0, 0.5,0.5, 1,0.5),
("tailquaverdn", "flags.d3", 0, 'ox','oy', 1,'oy'),
("tailquaverup", "flags.u3", 0, 'ox','oy', 1,'oy'),
("tailsemidn", "flags.d4", 0, 'ox','oy', 1,'oy'),
("tailsemiup", "flags.u4", 0, 'ox','oy', 1,'oy'),
("taildemidn", "flags.d5", 0, 'ox','oy', 1,'oy'),
("taildemiup", "flags.u5", 0, 'ox','oy', 1,'oy'),
("tailhemidn", "flags.d6", 0, 'ox','oy', 1,'oy'),
("tailhemiup", "flags.u6", 0, 'ox','oy', 1,'oy'),
("tailquasidn", "flags.d7", 0, 'ox','oy', 1,'oy'),
("tailquasiup", "flags.u7", 0, 'ox','oy', 1,'oy'),
("timeCbar", "timesig.C22", 0, 0,0.5, 1,0.5),
("timeC", "timesig.C44", 0, 0,0.5, 1,0.5),
("trill", "scripts.trill", 0, 0.5,0, 1,0),
("turn", "scripts.turn", 0, 0.5,0.5, 1,0.5),
("invturn", "scripts.reverseturn", 0, 0.5,0.5, 1,0.5),
("openarrowup", "arrowheads.open.11", 0, 'cx','cy', 1,0.5),
("openarrowdown", "arrowheads.open.1M1", 0, 'cx','cy', 1,0.5),
("openarrowleft", "arrowheads.open.0M1", 0, 'cx','cy', 1,'cy'),
("openarrowright", "arrowheads.open.01", 0, 'cx','cy', 1,'cy'),
("closearrowup", "arrowheads.close.11", 0, 'cx','cy', 1,0.5),
("closearrowdown", "arrowheads.close.1M1", 0, 'cx','cy', 1,0.5),
("closearrowleft", "arrowheads.close.0M1", 0, 'cx','cy', 1,'cy'),
("closearrowright", "arrowheads.close.01", 0, 'cx','cy', 1,'cy'),
("upedalheel", "scripts.upedalheel", 0, 0.5,'cy', 1,'cy'),
("dpedalheel", "scripts.dpedalheel", 0, 0.5,'cy', 1,'cy'),
("upedaltoe", "scripts.upedaltoe", 0, 0.5,0, 1,0),
("dpedaltoe", "scripts.dpedaltoe", 0, 0.5,1, 1,1),
("acc2", "accordion.accFreebase", 0, 0.5,0, 1,0),
("acc3", "accordion.accDiscant", 0, 0.5,0, 1,0),
("acc4", "accordion.accStdbase", 0, 0.5,0, 1,0),
("accr", "accordion.accBayanbase", 0, 0.5,0, 1,0),
("accdot", "accordion.accDot", 0, 0.5,0.5, 1,0.5),
("accstar", "accordion.accOldEE", 0, 0.5,0, 1,0),
("diamondsemi", "noteheads.s0diamond", 0, 0,0.5, 1,0.5),
("diamondminim", "noteheads.s1diamond", 0, 0,0.5, 1,0.5),
("diamondcrotchet", "noteheads.s2diamond", 0, 0,0.5, 1,0.5),
("trianglesemi", "noteheads.s0triangle", 0, 0,0.5, 1,0.5),
("triangleminim", "noteheads.d1triangle", 0, 0,0.5, 1,'iy'),
("triangleminim", "noteheads.u1triangle", 0, 0,0.5, 1,'ay'),
("trianglecrotchet", "noteheads.d2triangle", 0, 0,0.5, 1,'iy'),
("trianglecrotchet", "noteheads.u2triangle", 0, 0,0.5, 1,'ay'),
("crosssemi", "noteheads.s0cross", 0, 0,0.5, 1,0.5),
("crossminim", "noteheads.s1cross", 0, 0,0.5, 1,'ay'),
("crosscrotchet", "noteheads.s2cross", 0, 0,0.5, 1,'ay'),
("crosscircle", "noteheads.s2xcircle", 0, 0,0.5, 1,0.5),
("slashsemi", "noteheads.s0slash", 0, 0,0.5, 1,0.5),
("slashminim", "noteheads.s1slash", 0, 0,0.5, 1,'ay'),
("slashcrotchet", "noteheads.s2slash", 0, 0,0.5, 1,'ay'),
]
def writesfd(filepfx, fontname, encodingname, encodingsize, outlines, glyphlist):
fname = filepfx + ".sfd"
f = open(fname, "w")
f.write("SplineFontDB: 3.0\n")
f.write("FontName: %s\n" % fontname)
f.write("FullName: %s\n" % fontname)
f.write("FamilyName: %s\n" % fontname)
f.write("Copyright: No copyright is claimed on this font file.\n")
f.write("Version: %s\n" % verstring)
f.write("ItalicAngle: 0\n")
f.write("UnderlinePosition: -100\n")
f.write("UnderlineWidth: 50\n")
f.write("Ascent: 800\n")
f.write("Descent: 200\n")
f.write("LayerCount: 2\n")
f.write("Layer: 0 0 \"Back\" 1\n")
f.write("Layer: 1 0 \"Fore\" 0\n")
f.write("UseXUID: 0\n")
f.write("OS2Version: 0\n")
f.write("OS2_WeightWidthSlopeOnly: 0\n")
f.write("OS2_UseTypoMetrics: 1\n")
f.write("CreationTime: 1252826347\n") # when I first wrote this prologue-writing code
f.write("ModificationTime: %d\n" % time.time())
f.write("OS2TypoAscent: 0\n")
f.write("OS2TypoAOffset: 1\n")
f.write("OS2TypoDescent: 0\n")
f.write("OS2TypoDOffset: 1\n")
f.write("OS2TypoLinegap: 0\n")
f.write("OS2WinAscent: 0\n")
f.write("OS2WinAOffset: 1\n")
f.write("OS2WinDescent: 0\n")
f.write("OS2WinDOffset: 1\n")
f.write("HheadAscent: 0\n")
f.write("HheadAOffset: 1\n")
f.write("HheadDescent: 0\n")
f.write("HheadDOffset: 1\n")
f.write("OS2Vendor: 'PfEd'\n")
f.write("DEI: 0\n")
f.write("Encoding: %s\n" % encodingname)
f.write("UnicodeInterp: none\n")
f.write("NameList: Adobe Glyph List\n")
f.write("DisplaySize: -96\n")
f.write("AntiAlias: 1\n")
f.write("FitToEm: 1\n")
f.write("WinInfo: 64 8 2\n")
f.write("BeginChars: %d %d\n" % (encodingsize, len(glyphlist)))
i = 0
for glyph in glyphlist:
ourname, theirname, encoding, ox, oy = glyph[:5]
bbox, path = outlines[ourname]
char = eval(ourname)
xrt = lambda x: x * (3600.0 / (40*char.scale)) # potrace's factor of ten, ours of four
yrt = lambda y: y * (3600.0 / (40*char.scale))
xat = lambda x: xrt(x) - char.origin[0]
yat = lambda y: yrt(y) - char.origin[1]
xt = lambda x: xat(x) - xat(ox)
yt = lambda y: yat(y) - yat(oy)
if len(glyph) > 9 and glyph[9].has_key("xw"):
width = xt(glyph[9]["xw"]) # explicitly specified width
else:
width = xt(bbox[2]) # mostly default to RHS of bounding box
f.write("\nStartChar: %s\n" % theirname)
f.write("Encoding: %d %d %d\n" % (encoding, encoding, i))
f.write("Width: %g\n" % width)
f.write("Flags: W\n")
f.write("LayerCount: 2\n")
f.write("Fore\n")
f.write("SplineSet\n")
for c in path:
if c[0] == 'm':
f.write("%g %g m 1\n" % (xt(c[1]), yt(c[2])))
elif c[0] == 'l':
f.write(" %g %g l 1\n" % (xt(c[3]), yt(c[4])))
elif c[0] == 'c':
f.write(" %g %g %g %g %g %g c 0\n" % (xt(c[3]), yt(c[4]), xt(c[5]), yt(c[6]), xt(c[7]), yt(c[8])))
# closepath is not given explicitly
f.write("EndSplineSet\n")
f.write("EndChar\n")
i = i + 1
f.write("\nEndChars\n")
f.write("EndSplineFont\n")
f.close()
args = sys.argv[1:]
if len(args) >= 1 and args[0][:6] == "--ver=":
verstring = args[0][6:]
args = args[1:]
if len(args) == 2 and args[0] == "-test":
# example usage:
# ./glyphs.py -test braceupper | gs -sDEVICE=pngmono -sOutputFile=out.png -r72 -g1000x1000 -dBATCH -dNOPAUSE -q -
# and then to view that in gui for correction:
# convert -recolor '.25 0 0 0 0 .25 0 0 0 0 .25 0 .75 .75 .75 1' out.png zout.gif && ./gui.py zout.gif
glyph = eval(args[1])
glyph.testdraw()
elif len(args) == 2 and (args[0] == "-testps" or args[0] == "-testpsunscaled"):
char = eval(args[1])
bbox, path = get_ps_path(char)
if args[0] == "-testps":
xrt = lambda x: x * (3600.0 / (40*char.scale)) # potrace's factor of ten, ours of four
yrt = lambda y: y * (3600.0 / (40*char.scale))
xat = lambda x: xrt(x) - char.origin[0]
yat = lambda y: yrt(y) - char.origin[1]
else:
xat = yat = lambda x: x
print "%% bbox: %g %g %g %g" % (xat(bbox[0]), yat(bbox[1]), xat(bbox[2]), yat(bbox[3]))
for c in path:
if c[0] == 'm':
print "%g %g moveto" % (xat(c[1]), yat(c[2]))
elif c[0] == 'l':
print " %g %g lineto" % (xat(c[3]), yat(c[4]))
elif c[0] == 'c':
print " %g %g %g %g %g %g curveto" % (xat(c[3]), yat(c[4]), xat(c[5]), yat(c[6]), xat(c[7]), yat(c[8]))
elif c[0] == 'cp':
print "closepath"
elif len(args) == 1 and args[0] == "-mus":
# Generate a Postscript prologue suitable for use with 'mus' in
glyphlist = [
"accent",
"acciaccatura",
"appoggiatura",
"arpeggio",
"big0",
"big1",
"big2",
"big3",
"big4",
"big5",
"big6",
"big7",
"big8",
"big9",
"bowdown",
"bowup",
"bracelower",
"braceupper",
"bracketlower",
"bracketupper",
"breath",
"breve",
"clefC",
"clefF",
"clefG",
"coda",
"ditto",
"doubleflat",
"doublesharp",
"dynamicf",
"dynamicm",
"dynamicp",
"dynamics",
"dynamicz",
"fermata",
"flat",
"harmart",
"harmnat",
"headcrotchet",
"headminim",
"legato",
"mordentlower",
"mordentupper",
"natural",
"repeatmarks",
"restbreve",
"restcrotchet",
"restdemi",
"resthemi",
"restminim",
"restquaver",
"restsemi",
"segno",
"semibreve",
"sforzando",
"sharp",
"small0",
"small1",
"small2",
"small3",
"small4",
"small5",
"small6",
"small7",
"small8",
"small9",
"smallflat",
"smallnatural",
"smallsharp",
"staccatissdn",
"staccatissup",
"staccato",
"stopping",
"taildemidn",
"taildemiup",
"tailhemidn",
"tailhemiup",
"tailquaverdn",
"tailquaverup",
"tailsemidn",
"tailsemiup",
"timeCbar",
"timeC",
"trill",
"turn",
]
encoding = [(i+33, glyphlist[i]) for i in range(len(glyphlist))]
# the parent directory.
f = open("prologue.ps", "w")
g = open("abspaths.txt", "w")
f.write("save /m /rmoveto load def /l /rlineto load def\n")
f.write("/hm {0 m} def /vm {0 exch m} def /hl {0 l} def /vl {0 exch l} def\n")
f.write("/mm /moveto load def\n")
f.write("/c {4 -1 roll 5 index add 4 -1 roll 4 index add 4 2 roll\n")
f.write(" exch 3 index add exch 2 index add rcurveto} def\n")
f.write("/vhc {0 5 1 roll 0 c} def /hvc {0 4 1 roll 0 exch c} def\n")
f.write("/f /fill load def\n")
f.write("/cp {currentpoint closepath moveto} def\n")
f.write("/ip {0.02 dup scale 2 setlinecap 0 setlinejoin 0 setgray} def\n")
f.write("/beam {newpath 50 sub moveto\n")
f.write(" 0 100 rlineto 50 add lineto 0 -100 rlineto closepath fill} def\n")
f.write("/line {newpath moveto lineto setlinewidth stroke} def\n")
f.write("/tdict 5 dict def\n")
f.write("/tie {tdict begin\n")
f.write(" /x2 exch def /yp exch def /x1 exch def /y exch def newpath\n")
f.write(" x1 yp moveto\n")
f.write(" x1 y abs add yp y add\n")
f.write(" x2 y abs sub yp y add\n")
f.write(" x2 yp curveto\n")
f.write(" 30 setlinewidth stroke\n")
f.write("end} def\n")
f.write("10 dict dup begin\n")
f.write("/FontType 3 def /FontMatrix [1 0 0 1 0 0] def\n")
f.write("/Encoding 256 array def 0 1 255 {Encoding exch /.notdef put} for\n")
for code, name in encoding:
f.write("Encoding %d /.%s put\n" % (code, name))
f.write("/BBox %d dict def\n" % len(encoding))
f.write("/CharacterDefs %d dict def\n" % len(encoding))
fontbbox = (None,)*4
for code, name in encoding:
char = eval(name)
xrt = lambda x: x * (3600.0 / (40*char.scale)) # potrace's factor of ten, ours of four
yrt = lambda y: y * (3600.0 / (40*char.scale))
xat = lambda x: round(xrt(x) - char.origin[0])
yat = lambda y: round(yrt(y) - char.origin[1])
bbox, path = get_ps_path(char)
f.write("CharacterDefs /.%s {\n" % name)
g.write("# %s\n" % name)
output = "newpath"
currentpoint = (None, None)
for c in path:
if c[0] == 'm':
x1, y1 = xat(c[1]), yat(c[2])
x0, y0 = currentpoint
if x0 == None:
output = output + " %g %g mm" % (x1,y1)
elif x0 == x1:
output = output + " %g vm" % (y1-y0)
elif y0 == y1:
output = output + " %g hm" % (x1-x0)
else:
output = output + " %g %g m" % (x1-x0, y1-y0)
g.write(" %g %g moveto\n" % (x1,y1))
currentpoint = x1,y1
elif c[0] == 'l':
x0, y0 = xat(c[1]), yat(c[2])
x1, y1 = xat(c[3]), yat(c[4])
if x0 == x1:
output = output + " %g vl" % (y1-y0)
elif y0 == y1:
output = output + " %g hl" % (x1-x0)
else:
output = output + " %g %g l" % (x1-x0, y1-y0)
g.write(" %g %g lineto\n" % (x1,y1))
currentpoint = x1,y1
elif c[0] == 'c':
x0, y0 = xat(c[1]), yat(c[2])
x1, y1 = xat(c[3]), yat(c[4])
x2, y2 = xat(c[5]), yat(c[6])
x3, y3 = xat(c[7]), yat(c[8])
if x0 == x1 and y2 == y3:
output = output + " %g %g %g %g vhc" % (y1-y0, x2-x1, y2-y1, x3-x2)
elif y0 == y1 and x2 == x3:
output = output + " %g %g %g %g hvc" % (x1-x0, x2-x1, y2-y1, y3-y2)
else:
output = output + " %g %g %g %g %g %g c" % (x1-x0, y1-y0, x2-x1, y2-y1, x3-x2, y3-y2)
g.write(" %g %g %g %g %g %g curveto\n" % (x1,y1,x2,y2,x3,y3))
currentpoint = x3,y3
elif c[0] == 'cp':
output = output + " cp"
g.write(" closepath\n")
currentpoint = None, None
f.write(" " + output + " f\n")
x0, y0 = xat(bbox[0]), yat(bbox[1])
x1, y1 = xat(bbox[2]), yat(bbox[3])
f.write("} put BBox /.%s [%g %g %g %g] put\n" % ((name, x0, y0, x1, y1)))
g.write(" # bbox: %g %g %g %g\n" % (x0, y0, x1, y1))
g.write(" # w,h: %g %g\n" % (x1-x0, y1-y0))
fontbbox = update_bbox(fontbbox, x0,y0)
fontbbox = update_bbox(fontbbox, x1,y1)
x0,y0,x1,y1 = fontbbox
f.write("/FontBBox [%g %g %g %g] def\n" % (x0, y0, x1, y1))
f.write("/BuildChar {0 begin /char exch def /fontdict exch def\n")
f.write(" /charname fontdict /Encoding get char get def\n")
f.write(" 0 0 fontdict /BBox get charname get aload pop setcachedevice\n")
f.write(" fontdict /CharacterDefs get charname get exec\n")
f.write("end} def\n")
f.write("/BuildChar load 0 3 dict put\n")
f.write("/UniqueID 1 def\n")
f.write("end /MusicGfx exch definefont /MGfx exch def\n")
f.write("/ss 1 string def\n")
for code, name in encoding:
f.write("/.%s {moveto MGfx setfont ss 0 %d put ss show} def\n" % (name, code))
f.write("/.tdot {gsave\n")
f.write(" currentpoint translate 0.5 dup scale\n")
f.write(" 51 150 .staccato\n")
f.write(" grestore 67 hm} def\n")
f.write("/.tbreve {gsave\n")
f.write(" currentpoint translate 0.5 dup scale\n")
f.write(" 333 150 .breve\n")
f.write(" grestore 351 hm} def\n")
f.write("/.tsemibreve {gsave\n")
f.write(" currentpoint translate 0.5 dup scale\n")
f.write(" 207 150 .semibreve\n")
f.write(" grestore 247 hm} def\n")
f.write("/.tminim {gsave\n")
f.write(" currentpoint translate 0.5 dup scale\n")
f.write(" 160 150 .headminim\n")
f.write(" 24 setlinewidth newpath 304 186 moveto 850 vl stroke\n")
f.write(" grestore 178 hm} def\n")
f.write("/.tcrotchet {gsave\n")
f.write(" currentpoint translate 0.5 dup scale\n")
f.write(" 160 150 .headcrotchet\n")
f.write(" 24 setlinewidth newpath 304 186 moveto 850 vl stroke\n")
f.write(" grestore 178 hm} def\n")
f.write("/.tquaver {gsave\n")
f.write(" currentpoint translate 0.5 dup scale\n")
f.write(" 160 150 .headcrotchet\n")
f.write(" 24 setlinewidth newpath 304 186 moveto 850 vl stroke\n")
f.write(" 304 1050 .tailquaverup\n")
f.write(" grestore 293 hm} def\n")
f.write("/.tsemiquaver {gsave\n")
f.write(" currentpoint translate 0.5 dup scale\n")
f.write(" 160 150 .headcrotchet\n")
f.write(" 24 setlinewidth newpath 304 186 moveto 850 vl stroke\n")
f.write(" 304 1050 .tailquaverup\n")
f.write(" 304 900 .tailquaverup\n")
f.write(" grestore 293 hm} def\n")
f.write("/.tdemisemiquaver {gsave\n")
f.write(" currentpoint translate 0.5 dup scale\n")
f.write(" 160 150 .headcrotchet\n")
f.write(" 24 setlinewidth newpath 304 186 moveto 850 vl stroke\n")
f.write(" 304 1050 .tailquaverup\n")
f.write(" 304 900 .tailquaverup\n")
f.write(" 304 750 .tailquaverup\n")
f.write(" grestore 293 hm} def\n")
f.write("/.themidemisemiquaver {gsave\n")
f.write(" currentpoint translate 0.5 dup scale\n")
f.write(" 160 150 .headcrotchet\n")
f.write(" 24 setlinewidth newpath 304 186 moveto 850 vl stroke\n")
f.write(" 304 1050 .tailquaverup\n")
f.write(" 304 900 .tailquaverup\n")
f.write(" 304 750 .tailquaverup\n")
f.write(" 304 600 .tailquaverup\n")
f.write(" grestore 293 hm} def\n")
f.write("/.df {\n")
f.write(" currentfont currentpoint currentpoint .dynamicf moveto 216 hm setfont\n")
f.write("} def\n")
f.write("/.dm {\n")
f.write(" currentfont currentpoint currentpoint .dynamicm moveto 460 hm setfont\n")
f.write("} def\n")
f.write("/.dp {\n")
f.write(" currentfont currentpoint currentpoint .dynamicp moveto 365 hm setfont\n")
f.write("} def\n")
f.write("/.ds {\n")
f.write(" currentfont currentpoint currentpoint .dynamics moveto 225 hm setfont\n")
f.write("} def\n")
f.write("/.dz {\n")
f.write(" currentfont currentpoint currentpoint .dynamicz moveto 299 hm setfont\n")
f.write("} def\n")
f.close()
g.close()
# Now generate prologue.c.
f = open("prologue.ps", "r")
g = open("../prologue.c", "w")
g.write("/* This file is automatically generated from the Mus glyph\n")
g.write(" * descriptions. Do not expect changes made directly to this\n")
g.write(" * file to be permanent. */\n")
g.write("\n")
g.write("#include <stdio.h>\n")
g.write("\n")
g.write("static char *prologue[] = {\n")
wrapbuf = ""
while 1:
s = f.readline()
if s == "": break
ws = s.split()
for w in ws:
if len(wrapbuf) + 1 + len(w) <= 69:
wrapbuf = wrapbuf + " " + w
else:
g.write(" \"%s\\n\",\n" % wrapbuf)
wrapbuf = w
g.write(" \"%s\\n\",\n" % wrapbuf)
g.write(" NULL\n")
g.write("};\n")
g.write("\n")
g.write("void write_prologue(FILE *fp) {\n")
g.write(" char **p;\n")
g.write("\n")
g.write(" for (p=prologue; *p; p++)\n")
g.write(" fputs(*p, fp);\n")
g.write("}\n")
elif len(args) == 1 and args[0][:5] == "-lily":
# Generate .sfd files and supporting metadata which we then
# process with FontForge into a replacement system font set for
# GNU LilyPond.
def writetables(filepfx, size, subids, subnames, outlines, glyphlist, bracesonly=0):
fname = filepfx + ".LILF"
f = open(fname, "w")
f.write(" ".join(subnames))
f.close()
fname = filepfx + ".LILY"
f = open(fname, "w")
if not bracesonly:
f.write("(staffsize . %.6f)\n" % size)
f.write("(stafflinethickness . %.6f)\n" % (size/40.))
f.write("(staff_space . %.6f)\n" % (size/4.))
f.write("(linethickness . %.6f)\n" % (size/40.))
bbbox = outlines["headcrotchet"][0]
bwidth = bbbox[2] - bbbox[0]
f.write("(black_notehead_width . %.6f)\n" % (bwidth * 3600.0 / (40*headcrotchet.scale) * (size/1000.)))
f.write("(ledgerlinethickness . %.6f)\n" % (size/40.))
f.write("(design_size . %.6f)\n" % size)
if not bracesonly:
f.write("(blot_diameter . 0.4)\n")
f.close()
fname = filepfx + ".LILC"
f = open(fname, "w")
for glyph in glyphlist:
ourname, theirname, encoding, ox, oy, ax, ay, subid, subcode = glyph[:9]
char = eval(ourname)
bbox, path = outlines[ourname]
xrt = lambda x: x * (3600.0 / (40*char.scale)) # potrace's factor of ten, ours of four
yrt = lambda y: y * (3600.0 / (40*char.scale))
xat = lambda x: xrt(x) - char.origin[0]
yat = lambda y: yrt(y) - char.origin[1]
xt = lambda x: (xat(x) - xat(ox)) * (size/1000.)
yt = lambda y: (yat(y) - yat(oy)) * (size/1000.)
f.write("(%s .\n" % theirname)
f.write("((bbox . (%.6f %.6f %.6f %.6f))\n" % (xt(bbox[0]), yt(bbox[1]), xt(bbox[2]), yt(bbox[3])))
f.write("(subfont . \"%s\")\n" % subnames[subid])
f.write("(subfont-index . %d)\n" % subcode)
f.write("(attachment . (%.6f . %.6f))))\n" % (xt(ax), yt(ay)))
f.close()
if args[0] != "-lilybrace":
# Allocate sequential Unicode code points in the private use
# area for all the glyphs that don't already have a specific
# ASCII code point where they need to live.
code = 0xe100
for i in range(len(lilyglyphlist)):
g = lilyglyphlist[i]
if g[2] == 0:
lilyglyphlist[i] = g[:2] + (code,) + g[3:]
code = code + 1
# Construct the PS outlines via potrace, once for each glyph
# we're actually using.
outlines = {}
for g in lilyglyphlist:
gid = g[0]
char = eval(gid)
if not outlines.has_key(gid):
outlines[gid] = get_ps_path(char)
# PAINFUL HACK! Add invisible droppings above and below the
# digits. This is because LP draws time signatures by
# mushing the digits up against the centre line of the
# stave, in the assumption that they'll be big enough to
# overlap the top and bottom lines too. Personally I like
# time signatures to _not quite_ collide with the stave
# lines (except the 2nd and 4th, of course, which they can't
# avoid), and that means I need LP to consider the digits'
# bounding boxes to be just a bit wider.
#
# The pathlets appended here are of zero thickness, so they
# shouldn't ever actually show up.
digits = ["big%d" % i for i in range(10)]
ymid = (outlines["big4"][0][1] + outlines["big4"][0][3]) / 2.0
for d in digits:
char = eval(d)
d250 = 250.0 * (40*char.scale) / 3600.0
u250 = 236.0 * (40*char.scale) / 3600.0 # empirically chosen
one = 1.0 * (40*char.scale) / 3600.0
yone = 0 # set to one to make the droppings visible for debugging
bbox, path = outlines[d]
xmid = (bbox[0] + bbox[2]) / 2.0
path.append(('m', xmid, ymid-d250+yone))
path.append(('l', xmid, ymid-d250+yone, xmid-one, ymid-d250))
path.append(('l', xmid-one, ymid-d250, xmid+one, ymid-d250))
path.append(('l', xmid+one, ymid-d250, xmid, ymid-d250+yone))
path.append(('m', xmid, ymid+u250-yone))
path.append(('l', xmid, ymid+u250-yone, xmid-one, ymid+u250))
path.append(('l', xmid-one, ymid+u250, xmid+one, ymid+u250))
path.append(('l', xmid+one, ymid+u250, xmid, ymid+u250-yone))
bbox = (bbox[0], min(bbox[1], ymid-d250), \
bbox[2], max(bbox[3], ymid+u250))
outlines[d] = bbox, path
# Go through the main glyph list and transform the
# origin/attachment/width specifications into coordinates in
# the potrace coordinate system.
for i in range(len(lilyglyphlist)):
g = list(lilyglyphlist[i])
gid = g[0]
if len(g) > 7:
dict = g[7]
for k, v in dict.items():
if k[0] == "x":
v = eval(gid+"."+v) * 40
elif k[0] == "y":
v = (1000 - eval(gid+"."+v)) * 40
else:
raise "Error!"
dict[k] = v
else:
dict = {}
x0, y0, x1, y1 = outlines[gid][0]
# Allow manual overriding of the glyph's logical
# bounding box as written into the LILC table (used to
# make arpeggio and trill elements line up right, and
# also - for some reason - used for dynamics glyph
# kerning in place of the perfectly good system in the
# font format proper). If this happens, the attachment
# points are given in terms of the overridden bounding
# box.
x0 = dict.get("x0", x0)
x1 = dict.get("x1", x1)
y0 = dict.get("y0", y0)
y1 = dict.get("y1", y1)
outlines[gid] = ((x0,y0,x1,y1),outlines[gid][1])
xo = g[3]
if type(xo) == types.StringType:
xo = eval(gid+"."+xo) * 40
else:
xo = x0 + (x1-x0) * xo
g[3] = xo
yo = g[4]
if type(yo) == types.StringType:
yo = (1000 - eval(gid+"."+yo)) * 40
else:
yo = y0 + (y1-y0) * yo
g[4] = yo
xa = g[5]
if type(xa) == types.StringType:
xa = eval(gid+"."+xa) * 40
else:
xa = x0 + (x1-x0) * xa
g[5] = xa
ya = g[6]
if type(ya) == types.StringType:
ya = (1000 - eval(gid+"."+ya)) * 40
else:
ya = y0 + (y1-y0) * ya
g[6] = ya
lilyglyphlist[i] = tuple(g)
# Split up the glyph list into appropriately sized chunks
# for the custom-encoded .pfas.
subid = 0
subcode = 256
subglyphlists = [[]]
for i in range(len(lilyglyphlist)):
if lilyglyphlist[i][2] < 256:
thissubid = 0
thissubcode = lilyglyphlist[i][2]
else:
if subcode >= 256:
subid = subid + 1
subcode = 33
subglyphlists.append([])
thissubid = subid
thissubcode = subcode
subcode = subcode + 1
subglyphlists[thissubid].append(lilyglyphlist[i][:2] + (thissubcode,) + lilyglyphlist[i][3:])
lilyglyphlist[i] = lilyglyphlist[i][:7] + (thissubid, thissubcode) + lilyglyphlist[i][7:]
subids = subid + 1
sizes = 11, 13, 14, 16, 18, 20, 23, 26
for size in sizes:
writesfd("gonville-%d" % size, "Gonville-%d" % size, "UnicodeBmp", 65537, outlines, lilyglyphlist)
subnames = ["gonvillealpha%d" % size] + ["gonvillepart%d" % subid for subid in range(1,subids)]
writetables("gonville-%d" % size, size, subids, subnames, outlines, lilyglyphlist)
writesfd("gonvillealpha%d" % size, subnames[0], "Custom", 256, outlines, subglyphlists[0])
for subid in range(1,subids):
writesfd("gonvillepart%d" % subid, "Gonville-Part%d" % subid, "Custom", 256, outlines, subglyphlists[subid])
for dir in "lilyfonts", "lilyfonts/type1", "lilyfonts/otf", "lilyfonts/svg":
try:
os.mkdir(dir)
except OSError, e:
pass # probably already existed, which we don't mind
for size in sizes:
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"LoadTableFromFile(\"LILC\", \"gonville-%d.LILC\"); " + \
"LoadTableFromFile(\"LILF\", \"gonville-%d.LILF\"); " + \
"LoadTableFromFile(\"LILY\", \"gonville-%d.LILY\"); " + \
"Generate($2)' gonville-%d.sfd lilyfonts/otf/gonville-%d.otf") % ((size,)*5))
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"LoadTableFromFile(\"LILC\", \"gonville-%d.LILC\"); " + \
"LoadTableFromFile(\"LILF\", \"gonville-%d.LILF\"); " + \
"LoadTableFromFile(\"LILY\", \"gonville-%d.LILY\"); " + \
"SetFontNames(\"Emmentaler-%d\",\"Emmentaler-%d\",\"Emmentaler-%d\"); " + \
"Generate($2)' gonville-%d.sfd lilyfonts/otf/emmentaler-%d.otf") % ((size,)*8))
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"SetFontNames(\"Emmentaler-%d\",\"Emmentaler-%d\",\"Emmentaler-%d\"); " + \
"Generate($2)' gonville-%d.sfd lilyfonts/svg/emmentaler-%d.svg") % ((size,)*5))
for size in sizes:
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"SetFontNames(\"feta-alphabet%d\",\"feta-alphabet%d\",\"feta-alphabet%d\"); " + \
"Generate($2)' gonvillealpha%d.sfd lilyfonts/type1/gonvillealpha%d.pfa") % ((size,)*5))
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"SetFontNames(\"feta-alphabet%d\",\"feta-alphabet%d\",\"feta-alphabet%d\"); " + \
"Generate($2)' gonvillealpha%d.sfd lilyfonts/type1/gonvillealpha%d.pfb") % ((size,)*5))
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"SetFontNames(\"feta-alphabet%d\",\"feta-alphabet%d\",\"feta-alphabet%d\"); " + \
"Generate($2)' gonvillealpha%d.sfd lilyfonts/svg/gonvillealpha%d.svg") % ((size,)*5))
try:
os.symlink("gonvillealpha%d.pfa" % size, "lilyfonts/type1/feta-alphabet%d.pfa" % size)
except OSError, e:
pass # probably already existed, which we don't mind
try:
os.symlink("gonvillealpha%d.pfb" % size, "lilyfonts/type1/feta-alphabet%d.pfb" % size)
except OSError, e:
pass # probably already existed, which we don't mind
try:
os.symlink("gonvillealpha%d.svg" % size, "lilyfonts/svg/feta-alphabet%d.svg" % size)
except OSError, e:
pass # probably already existed, which we don't mind
for subid in range(1,subids):
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"Generate($2)' gonvillepart%d.sfd lilyfonts/type1/gonvillepart%d.pfa") % ((subid,)*2))
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"Generate($2)' gonvillepart%d.sfd lilyfonts/svg/gonvillepart%d.svg") % ((subid,)*2))
# Now do most of that all over again for the specialist brace
# font, if we're doing that. (The "-lilymain" option doesn't
# regenerate the braces, because they're large and slow and it's
# nice to be able to debug just the interesting bits.) Construct
# the PS outlines via potrace, once for each glyph we're
# actually using.
if args[0] != "-lilymain":
outlines = {}
bracelist = []
for i in range(576):
char = scaledbrace(525 * (151./150)**i)
gid = "brace%d" % i
exec "%s = char" % gid
outlines[gid] = get_ps_path(char)
x0, y0, x1, y1 = outlines[gid][0]
yh = (y0+y1)/2.0
bracelist.append((gid, gid, 0xe100+i, x1, yh, x1, yh))
# Split up the glyph list into appropriately sized chunks
# for the custom-encoded .pfas.
subid = -1
subcode = 256
subbracelists = [[]]
for i in range(len(bracelist)):
if subcode >= 256:
subid = subid + 1
subcode = 33
subbracelists.append([])
thissubid = subid
thissubcode = subcode
subcode = subcode + 1
subbracelists[thissubid].append(bracelist[i][:2] + (thissubcode,) + bracelist[i][3:])
bracelist[i] = bracelist[i] + (thissubid, thissubcode)
subids = subid + 1
writesfd("gonville-brace", "Gonville-Brace", "UnicodeBmp", 65537, outlines, bracelist)
subnames = ["gonville-bracepart%d" % subid for subid in range(subids)]
writetables("gonville-brace", 20, subids, subnames, outlines, bracelist, 1)
for subid in range(subids):
writesfd("gonville-bracepart%d" % subid, "Gonville-Brace-Part%d" % subid, "Custom", 256, outlines, subbracelists[subid])
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"LoadTableFromFile(\"LILC\", \"gonville-brace.LILC\"); " + \
"LoadTableFromFile(\"LILF\", \"gonville-brace.LILF\"); " + \
"LoadTableFromFile(\"LILY\", \"gonville-brace.LILY\"); " + \
"Generate($2)' gonville-brace.sfd lilyfonts/otf/gonville-brace.otf"))
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"Generate($2)' gonville-brace.sfd lilyfonts/svg/gonville-brace.svg"))
try:
os.symlink("gonville-brace.otf", "lilyfonts/otf/aybabtu.otf")
os.symlink("gonville-brace.svg", "lilyfonts/svg/aybabtu.svg")
os.symlink("gonville-brace.otf", "lilyfonts/otf/emmentaler-brace.otf")
os.symlink("gonville-brace.svg", "lilyfonts/svg/emmentaler-brace.svg")
except OSError, e:
pass # probably already existed, which we don't mind
for subid in range(subids):
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"Generate($2)' gonville-bracepart%d.sfd lilyfonts/type1/gonville-bracepart%d.pfa") % ((subid,)*2))
system(("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"Generate($2)' gonville-bracepart%d.sfd lilyfonts/svg/gonville-bracepart%d.svg") % ((subid,)*2))
elif len(args) == 1 and args[0] == "-simple":
# Generate an .sfd file which can be compiled into a really
# simple binary font in which all the glyphs are in the bottom
# 256 code points.
#
# Future glyphs should be added to the end of this list, so that
# the existing code point values stay the same.
glyphlist = [
("big0", 0x30),
("big1", 0x31),
("big2", 0x32),
("big3", 0x33),
("big4", 0x34),
("big5", 0x35),
("big6", 0x36),
("big7", 0x37),
("big8", 0x38),
("big9", 0x39),
("dynamicf", 0x66),
("dynamicm", 0x6d),
("dynamicp", 0x70),
("dynamicr", 0x72),
("dynamics", 0x73),
("dynamicz", 0x7a),
("asciiplus", 0x2b),
("asciicomma", 0x2c),
("asciiminus", 0x2d),
("asciiperiod", 0x2e),
("accent", 0x3e),
("acclparen", 0x28),
("accrparen", 0x29),
("fixedbrace", 0x7b),
"espressivo",
"accslashbigup",
"accslashbigdn",
"acciaccatura",
"appoggiatura",
"arpeggioshort",
"arpeggioarrowdown",
"arpeggioarrowup",
"trillwiggle",
"bowdown",
"bowup",
"bracketlowerlily",
"bracketupperlily",
"breath",
"revbreath",
"varbreath",
"revvarbreath",
"caesura",
"caesuracurved",
"breve",
"clefC",
"clefF",
"clefG",
"clefTAB",
"clefperc",
"clefCsmall",
"clefFsmall",
"clefGsmall",
"clefTABsmall",
"clefpercsmall",
"coda",
"varcoda",
"ditto",
"fermata",
"fermata0",
"fermata2",
"fermata3",
"fermataup",
"fermata0up",
"fermata2up",
"fermata3up",
"semiflat",
"semiflatslash",
"flat",
"flatup",
"flatdn",
"flatupdn",
"flatslash",
"flatslash2",
"sesquiflat",
"doubleflat",
"doubleflatslash",
"harmart",
"harmartfilled",
"harmnat",
"flagopen",
"flagthumb",
"headcrotchet",
"headminim",
"legato",
"portatoup",
"portatodn",
"mordentlower",
"mordentupper",
"mordentupperlong",
"mordentupperlower",
"upmordentupperlong",
"upmordentupperlower",
"mordentupperlongdown",
"downmordentupperlong",
"downmordentupperlower",
"mordentupperlongup",
"straightmordentupperlong",
"natural",
"naturalup",
"naturaldn",
"naturalupdn",
"peddot",
"pedP",
"pedd",
"pede",
"pedPed",
"pedPeddot",
"pedstar",
"peddash",
"repeatmarks",
"restdbllonga",
"restlonga",
"restbreve",
"restcrotchet",
"restcrotchetx",
"restdemi",
"resthemi",
"restquasi",
"restminimo",
"restquaver",
"restsemi",
"restsemibreveo",
"segno",
"semibreve",
"sforzando",
"sforzandodn",
"semisharp",
"semisharp3",
"sharp",
"sharp3",
"sharpup",
"sharpdn",
"sharpupdn",
"sesquisharp",
"doublesharp",
"staccatissup",
"staccatissdn",
"staccato",
"snappizz",
"stopping",
"tailquaverdn",
"tailquaverup",
"tailsemidn",
"tailsemiup",
"taildemidn",
"taildemiup",
"tailhemidn",
"tailhemiup",
"tailquasidn",
"tailquasiup",
"timeCbar",
"timeC",
"trill",
"turn",
"invturn",
"openarrowup",
"openarrowdown",
"openarrowleft",
"openarrowright",
"closearrowup",
"closearrowdown",
"closearrowleft",
"closearrowright",
"upedalheel",
"dpedalheel",
"upedaltoe",
"dpedaltoe",
"acc2",
"acc3",
"acc4",
"accr",
"accdot",
"accstar",
"diamondsemi",
"diamondminim",
"diamondcrotchet",
"trianglesemi",
"triangleminim",
"trianglecrotchet",
"crosssemi",
"crossminim",
"crosscrotchet",
"crosscircle",
"slashsemi",
"slashminim",
"slashcrotchet",
]
code = 0x21 # use sequential code points for anything not explicitly given
codes = {}
for i in range(0x7f, 0xa1):
codes[i] = None # avoid these code points
outlines = {}
for i in range(len(glyphlist)):
gid = glyphlist[i]
if type(gid) == types.TupleType:
# Allocate a specific code.
gid, thiscode = gid
else:
while codes.has_key(code):
code = code + 1
assert code < 0x100
thiscode = code
codes[thiscode] = gid
char = eval(gid)
if not outlines.has_key(gid):
outlines[gid] = get_ps_path(char)
xo, yo = char.origin
if (xo,yo) == (1000,1000):
# Hack: that particular origin is taken to indicate that
# the origin was not set to anything more sensible by
# GlyphContext.__init__, and so we instead use the
# centre of the glyph's bounding box.
x0, y0, x1, y1 = outlines[gid][0]
xo = (x0+x1)/2
yo = (y0+y1)/2
else:
xo = xo * char.scale / 3600. * 40
yo = yo * char.scale / 3600. * 40
if char.__dict__.has_key("hy"):
yo = (1000 - char.hy) * 40
if char.__dict__.has_key("hx"):
xo = char.hx * 40
dict = {}
if char.__dict__.has_key("width"):
dict["xw"] = char.width * 40 + xo
glyphlist[i] = (gid, gid, thiscode, xo, yo, None, None, None, None, dict)
writesfd("gonville-simple", "Gonville-Simple", "UnicodeBmp", 65537, outlines, glyphlist)
system("fontforge -lang=ff -c 'Open($1); CorrectDirection(); " + \
"Generate($2)' gonville-simple.sfd gonville-simple.otf")
elif len(args) == 2 and args[0] == "-lilycheck":
# Run over the list of glyph names in another font file and list
# the ones not known to this file. Expects one additional
# argument which is the name of a font file.
known = {}
for g in lilyglyphlist:
known[g[1]] = 1
# Regexps
import re
ignored = [
".notdef",
# I wasn't able to get LP to generate this glyph name at all; my
# guess is that it's a legacy version of trill_element used in
# older versions.
"scripts.trilelement",
# Longa notes are not supported.
re.compile(r'noteheads\.[ud]M2'),
# Solfa note heads are not supported.
re.compile(r'noteheads\..*(do|re|mi|fa|so|la|ti)'),
# Ancient music is not supported.
re.compile(r'.*vaticana.*'),
re.compile(r'.*mensural.*'),
re.compile(r'.*petrucci.*'),
re.compile(r'.*medicaea.*'),
re.compile(r'.*solesmes.*'),
re.compile(r'.*hufnagel.*'),
"scripts.ictus",
"scripts.uaccentus",
"scripts.daccentus",
"scripts.usemicirculus",
"scripts.dsemicirculus",
"scripts.circulus",
"scripts.augmentum",
"scripts.usignumcongruentiae",
"scripts.dsignumcongruentiae",
]
s = string.replace(args[1], "'", "'\\''")
system("fontforge -lang=ff -c 'Open($1); Save($2)' '%s' temp.sfd >&/dev/null" % s)
f = open("temp.sfd", "r")
while 1:
s = f.readline()
if s == "": break
ss = s.split()
if len(ss) >= 2 and ss[0] == "StartChar:":
name = ss[1]
ok = known.get(name, 0)
if not ok:
for r in ignored:
if type(r) == types.StringType:
match = (r == name)
else:
match = r.match(name)
if match:
ok = 1
break
if not ok:
print name
f.close()
| [
"[email protected]"
] | |
5f8ebf56801116b7b154e43dd062074d9efadc4d | 683876019cad0b0d562ac7f9da8c679cb310cfb2 | /2022/day02/part1.py | dc2e419fe437d20ff49a1058524daffcb5a7b817 | [] | no_license | CoachEd/advent-of-code | d028bc8c21235361ad31ea55922625adf743b5c8 | 10850d5d477c0946ef73756bfeb3a6db241cc4b2 | refs/heads/master | 2023-05-11T05:20:26.951224 | 2023-05-09T18:54:16 | 2023-05-09T18:54:16 | 160,375,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | """
AoC
"""
import time
import sys
from copy import copy, deepcopy
start_secs = time.time()
print('')
# part 1
print()
l=[]
my_file = open("inp.txt", "r", encoding='utf-8')
lines = my_file.readlines()
for line in lines:
l.append(line.strip())
shape = dict()
cvt = dict()
shape['X'] = 1
shape['Y'] = 2
shape['Z'] = 3
cvt['X'] = 'A'
cvt['Y'] = 'B'
cvt['Z'] = 'C'
score = 0
for s in l:
outcome = 0
c1 = s[0]
c2 = cvt[s[2]]
if c1 == c2:
outcome = 3
else:
if ((c2 == 'A' and c1 == 'C') or
(c2 == 'B' and c1 == 'A') or
(c2 == 'C' and c1 == 'B')):
outcome = 6
score += outcome + shape[s[2]]
print(score)
print('')
end_secs = time.time()
print('--- ' + str(end_secs-start_secs) + ' secs ---')
| [
"[email protected]"
] | |
51b385655a8b99c13c1189ed0c0ab90b8fae6fac | 90656f5ce4efdf07ea0a61ad425673887e517371 | /ch6/glossary.py | f55aabbaac093e2af6ce6347e3a205192c1a1938 | [] | no_license | fjluartes/pcc | bec2e15e8b70aaee11f562cdf78c39e3a9844269 | cda4510937f5772d3a0a0b51609ab9554206aa06 | refs/heads/master | 2021-06-04T14:37:03.953253 | 2020-05-03T13:09:10 | 2020-05-03T13:09:10 | 144,001,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | #!/usr/bin/env python
# glossary.py: Exercise 6-3, 6-4
# 18 Aug 2018 | fjgl
# 6-3, 6-4. Glossary
# print, list, for, if, dictionary
glossary = {
'print': 'Python keyword used to display data. (usually strings)',
'list': 'A collection of items in a particular order.',
'for': 'Python keyword used to loop into a list, tuple, or dictionary.',
'if': 'Python keyword for conditional statements.',
'dictionary': 'A collection of key-value pairs.',
'tuple': 'A list whose items cannot be modified.',
'del': 'Python keyword for removing an item in a list permanently.',
'and': 'Python keyword for two statements that must be both true.',
'or': 'Python keyword for two statements that at least one must be true.',
'not': 'Python keyword for negating a conditional statement.'
}
for word, definition in glossary.items():
print(word + ": " + definition)
| [
"[email protected]"
] | |
6855bf744c24460e3fe3656f47a4255c1b27a487 | 3e94edf846cf379733f498f715b20e3fda064624 | /Common/UIMap/Register_Image_Frame_UIMap.py | 2633221f0381cfbc9111c7fd11e337a960369929 | [] | no_license | xhan-shannon/WebAutotest | eff01da40f079a42ff81b7507f17239a6687b96b | 46d79be3ba59bcc2b6dc99b2313e454b2ea7e68a | refs/heads/master | 2020-03-30T06:59:19.025699 | 2014-10-28T07:38:12 | 2014-10-28T07:38:12 | 25,856,122 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,745 | py | # -*- coding: utf-8 -*-
'''
Created on 2014年7月9日
@author: stm
'''
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from Common.Utils.PD_DebugLog import PD_DEBUG_LOG, DEBUG_LOG_PRINT
from Common.Utils import utils_misc
class Register_Image_Frame_UIMap(object):
"""
Map the PowerDirector elements to the convenience interface.
Such as the label, input editor, button or frames.
"""
# OS Category ToolKit
OS_CATG_TOOLKIT = "ToolKit"
OS_CATEGORY = ("ToolKit", "SUSE", "Red Hat", "Other ISO", "AIX")
# OS Descriptions
OS_DESC_TOOLKIT_V52 = "IBM ToolKit v52"
OS_DESC_TOOLKIT_V53 = "IBM ToolKit v53"
OS_DESC_TOOLKIT_V54 = "IBM ToolKit v54"
OS_DESC_TOOLKIT_OTH = "IBM ToolKit other version"
OS_DESC = (("IBM ToolKit v52", "IBM ToolKit v53", "IBM ToolKit v54",
"IBM ToolKit other version"
),
("SUSE Linux Enterprise Server 10 for IBM Power",
"SUSE Linux Enterprise Server 11 for IBM Power",
"SUSE Linux Enterprise Server 12 for IBM Power",
"SUSE Linux Enterprise Server other version"
),
("Red Hat Enterprise Linux Server release 5.7 for IBM Power",
"Red Hat Enterprise Linux Server release 5.8 for IBM Power",
"Red Hat Enterprise Linux Server release 6.0 for IBM Power",
"Red Hat Enterprise Linux Server release 6.1 for IBM Power",
"Red Hat Enterprise Linux Server release 6.2 for IBM Power",
"Red Hat Enterprise Linux Server release 6.3 for IBM Power",
"Red Hat Enterprise Linux Server release 6.4 for IBM Power",
"Red Hat Enterprise Linux Server release 7.0 for IBM Power",
"Red Hat Enterprise Linux Server other version"
),
("Other ISO, unkonwn version"
),
("AIX 5L 5.3",
"AIX 6.1",
"AIX other version")
)
#//*[@id="osname"]/option[8]
def __init__(self, driver):
self.driver = driver
self.accept_next_alert = True
self.driver.switch_to.frame("_medium_frame")
def get_image_alias_input(self):
'''
get the ip address input box element
'''
return self.driver.find_element(By.ID, "imageAlias")
def get_image_file_location_input(self):
'''
get the platform name input box element
'''
return self.driver.find_element(By.ID, "ospath")
def get_submit_btn(self):
'''
get the submit button element
'''
#import pdb; pdb.set_trace()
# The element is on the bottom of the dialog form
# Need keep the dialog part visible, for example, keep a
# proper screen resolution.
btn_elem = self.driver.find_element(By.ID, 'btn_submit')
return btn_elem
def is_notify_msg_present(self):
utils_misc.set_script_timeout(self.driver)
elem = None
try:
#xpath = '//div/div[@class="noty_message"]/span'
css = 'noty_message'
#elem = self.driver.find_element(By.XPATH, xpath)
elem = self.driver.find_element(By.CLASS_NAME, css)
utils_misc.restore_script_timeout(self.driver)
except:
return False
if elem:
return True
else:
return False
def get_notify_msg(self):
utils_misc.set_script_timeout(self.driver)
elem = None
xpath = '//div/div[@class="noty_message"]/span'
elem = self.driver.find_element(By.XPATH, xpath)
self.driver.set_script_timeout(30)
return elem.text
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def get_file_type_img_radiobtn(self):
'''
Get the file type radio button: ISO, IMG
'''
return self.get_file_type_radiobtn('rdo_img')
def get_file_type_iso_radiobtn(self):
'''
Get the file type radio button: ISO, IMG
'''
return self.get_file_type_radiobtn('rdo_iso')
def get_file_type_radiobtn(self, id_name):
'''
Get the file type radio button: ISO, IMG
'''
return self.driver.find_element(By.ID, id_name)
def select_osname_by_name(self, name):
osname_list = self.driver.find_element(By.ID, 'osname')
items = osname_list.find_elements_by_xpath('//option')
for item in items:
if item.text == name:
item.click()
break
def select_ostype_by_value(self, value):
'''
To get the os category by value for the os category
'''
xpath = '//option[@value="%s"]' % value
osname_list = self.driver.find_element(By.ID, 'ostype')
item = osname_list.find_element_by_xpath(xpath)
item.click()
def select_osname_by_value(self, value):
'''
To get the os name by value for the os description
'''
xpath = '//*[@id="osname"]/option[@value="%s"]' % value
osname_list = self.driver.find_element(By.ID, 'osname')
item = osname_list.find_element_by_xpath(xpath)
item.click()
def get_alert_text(self):
'''
Switch to alert and get the alert text
'''
alert = self.driver.switch_to_alert()
return alert.text
def is_loading_getvminfo(self):
utils_misc.set_script_timeout(self.driver)
elem = None
try:
xpath = '//div[@class="loading_overlay"]/p'
elem = self.driver.find_element(By.XPATH, xpath)
utils_misc.restore_script_timeout(self.driver)
except:
return False
if elem:
return True
else:
return False
| [
"[email protected]"
] | |
aa043fd4161fecee4583d0d5fb0ac8daefb634e1 | 1e25c0070126e6a95112e7ae6d9591e48d1693b4 | /api_test.py | 7884d33b1dfad842a1203a72458aa2ff97f83ef6 | [] | no_license | tomhigginsuom/datavault-api-test | 09b33432513caad416ed951482ac8c40f2b43e37 | 49edaec01ba4ca3069d1645011dc00f14b930b78 | refs/heads/master | 2021-01-10T02:43:29.279223 | 2016-03-08T10:49:11 | 2016-03-08T10:49:11 | 44,376,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,959 | py | import os
import shutil
import requests
import json
import time
verbose = False
# System details
server = "http://127.0.0.1:8080"
client_key = "datavault-webapp"
datapath = "/home/ubuntu/data/api-test-data"
archivepath = "/home/ubuntu/data/api-test-archive"
restoreDir = "restore"
# Test user details
username = "user1"
vault_policy = "UNIVERSITY"
vault_dataset = "MOCK-DATASET-1"
vault_group = "CHSS"
# Utility functions
def create_filestore(storageClass, label, path):
if verbose:
print("create_filestore : " + label)
payload = {"storageClass": storageClass, "label": label, "properties":{"rootPath":path}}
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.post(server + '/datavault-broker/filestores', data=json.dumps(payload), headers=headers)
return(response.json())
def list_filestores():
if verbose:
print("list_filestores")
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.get(server + '/datavault-broker/filestores', headers=headers)
return(response.json())
def list_files(filestoreId):
if verbose:
print("list_files : " + filestoreId)
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.get(server + '/datavault-broker/files/' + filestoreId, headers=headers)
return(response.json())
def create_archivestore(storageClass, label, path):
if verbose:
print("create_archivestore : " + label)
payload = {"storageClass": storageClass, "label": label, "properties":{"rootPath":path}}
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.post(server + '/datavault-broker/archivestores', data=json.dumps(payload), headers=headers)
return(response.json())
def create_vault(name, description, policyID, groupID, datasetID):
if verbose:
print("create_vault : " + name)
payload = {"name": name, "description": description, "policyID": policyID, "groupID": groupID, "datasetID": datasetID}
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.post(server + '/datavault-broker/vaults', data=json.dumps(payload), headers=headers)
return(response.json())
def list_vaults():
if verbose:
print("list_vaults")
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.get(server + '/datavault-broker/vaults', headers=headers)
return(response.json())
def create_deposit(vaultId, note, filePath):
if verbose:
print("create_deposit : " + note)
payload = {"note": note, "filePath": filePath}
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.post(server + '/datavault-broker/vaults/' + vaultId + "/deposits", data=json.dumps(payload), headers=headers)
return(response.json())
def list_vault_deposits(vaultId):
if verbose:
print("list_vault_deposits : " + vaultId)
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.get(server + '/datavault-broker/vaults/' + vaultId + "/deposits", headers=headers)
return(response.json())
def get_deposit(vaultId, depositId):
if verbose:
print("get_deposit : " + vaultId + "/" + depositId)
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.get(server + '/datavault-broker/vaults/' + vaultId + "/deposits/" + depositId, headers=headers)
return(response.json())
def create_restore(vaultId, depositId, note, restorePath):
if verbose:
print("create_restore : " + note)
payload = {"note": note, "restorePath": restorePath}
headers = {'Content-type': 'application/json', 'X-UserID': username, 'X-Client-Key': client_key}
response = requests.post(server + '/datavault-broker/vaults/' + vaultId + "/deposits/" + depositId + "/restore", data=json.dumps(payload), headers=headers)
return(response.json())
# Init the test environment
def setup():
clear_data()
generate_test_data()
create_archivestore("org.datavaultplatform.common.storage.impl.LocalFileSystem", "Test archive", archivepath)
def clear_data():
clear_directory(datapath)
clear_directory(archivepath)
def clear_directory(path):
for root, dirs, files in os.walk(path):
for f in files:
print("unlink: " + f)
os.unlink(os.path.join(root, f))
for d in dirs:
print("rmtree: " + d)
shutil.rmtree(os.path.join(root, d))
def generate_test_data():
os.mkdir(datapath + "/" + "restore")
create_file("250M", datapath + "/" + "test_data_250MB.bin")
create_file("100M", datapath + "/" + "test_data_100MB.bin")
create_file("50M", datapath + "/" + "test_data_50MB.bin")
create_file("25M", datapath + "/" + "test_data_25MB.bin")
create_file("5M", datapath + "/" + "test_data_5MB.bin")
create_file("2M", datapath + "/" + "test_data_2MB.bin")
def create_file(size, path):
print("create_file: " + path)
command = "fallocate -l " + size + " " + path
os.system(command)
def dump_info():
print("")
print("System state")
print("------------")
print("")
print("Filestores")
print("----------")
filestores = list_filestores()
print("Count: " + str(len(filestores)))
for filestore in filestores:
print("Filestore: " + filestore['id'] + " Label: " + filestore['label'])
print("")
print("Vaults")
print("------")
vaults = list_vaults()
print("Count: " + str(len(vaults)))
for vault in vaults:
print("Vault: " + vault['id'] + " Name: " + vault['name'])
vault_deposits = list_vault_deposits(vault['id'])
for vault_deposit in vault_deposits:
print("Deposit: " + vault_deposit['id'] + " Status: " + vault_deposit['status'] + " Note: " + vault_deposit['note'])
print("")
# Test script body
print("API test : " + username)
setup()
filestore = create_filestore("org.datavaultplatform.common.storage.impl.LocalFileSystem", "Test data source", datapath)
filestoreId = filestore['id']
print("Created file store: " + filestoreId)
# Carry out some deposits and then restore the test data
tracked_deposits = []
for x in range(0,4):
vault = create_vault("Test vault " + str(x), "Automatically created vault", vault_policy, vault_group, vault_dataset)
vaultId = vault['id']
print("Created vault with ID: " + vaultId)
files = list_files(filestoreId)
for file in files:
if not file['isDirectory']:
print("File: " + file['key'] + " Name: " + file['name'])
deposit = create_deposit(vaultId, "Test deposit - " + file['name'], file['key'])
tracked_deposits.append((vaultId, deposit['id']))
while(len(tracked_deposits) > 0):
print("")
print("Tracking " + str(len(tracked_deposits)) + " deposits:")
deposit_statuses = {}
for tracked_deposit in tracked_deposits:
vaultId = tracked_deposit[0]
depostId = tracked_deposit[1]
deposit = get_deposit(vaultId, depostId)
print("Deposit: " + vaultId + "/" + depostId + " - " + deposit['status'])
if deposit['status'] == "COMPLETE":
tracked_deposits.remove(tracked_deposit)
restore = create_restore(vaultId, depostId, "Test restore", filestoreId + "/" + restoreDir)
time.sleep(5)
# Carry out a large number of small deposits
for x in range(0,400):
vault = create_vault("Test small vault " + str(x), "Automatically created small vault", vault_policy, vault_group, vault_dataset)
vaultId = vault['id']
print("Created vault with ID: " + vaultId)
files = list_files(filestoreId)
for file in files:
if not file['isDirectory']:
if file['name'] == "test_data_2MB.bin":
print("File: " + file['key'] + " Name: " + file['name'])
deposit = create_deposit(vaultId, "Test small deposit - " + file['name'], file['key'])
dump_info()
| [
"[email protected]"
] | |
8d906a41e12288e837f0ae0d69434c997d81bfbd | 2640a522e5d9bacc7597ede14cb13fb928e779be | /source/ops/normalTool.py | 19e01ed86cc8c42f14464e2aabc405d9b28cf46a | [
"Apache-2.0"
] | permissive | miximixim/blenderNormalBrush | b17f9087d41d8d760fe35e148cbdeba80b27cbf4 | c02180ec63e49006589e90be63a3e02dc773d32c | refs/heads/main | 2023-02-26T12:39:36.473230 | 2021-02-03T20:04:09 | 2021-02-03T20:04:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,713 | py | #Copyright 2021 Mark McKay
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import bpy
import bpy.utils.previews
import os
import bgl
import blf
import gpu
import mathutils
import math
from gpu_extras.batch import batch_for_shader
from bpy_extras import view3d_utils
class NormalToolSettings(bpy.types.PropertyGroup):
brush_type : bpy.props.EnumProperty(
items=(
('FIXED', "Fixed", "Normals are in a fixed direction"),
('ATTRACT', "Attract", "Normals point toward target object"),
('REPEL', "Repel", "Normals point away from target object"),
('VERTEX', "Vertex", "Get normal values from mesh vertices")
),
default='FIXED'
)
radius : bpy.props.FloatProperty(
name="Radius", description="Radius of brush", default = 1, min=0, soft_max = 4
)
strength : bpy.props.FloatProperty(
name="Strength", description="Amount to adjust mesh normal", default = 1, min=0, max = 1
)
normal_length : bpy.props.FloatProperty(
name="Normal Length", description="Display length of normal", default = 1, min=0, soft_max = 1
)
selected_only : bpy.props.BoolProperty(
name="Selected Only", description="If true, affect only selected vertices", default = True
)
normal : bpy.props.FloatVectorProperty(
name="Normal",
description="Direction of normal in Fixed mode",
default = (1, 0, 0),
subtype="DIRECTION"
# update=normal_update
)
normal_exact : bpy.props.BoolProperty(
name="Exact normal", description="Display normal as exact coordinates", default = True
)
front_faces_only : bpy.props.BoolProperty(
name="Front Faces Only", description="Only affect normals on front facing faces", default = True
)
target : bpy.props.PointerProperty(name="Target", description="Object Attract and Repel mode reference", type=bpy.types.Object)
#---------------------------
circleSegs = 64
coordsCircle = [(math.sin(((2 * math.pi * i) / circleSegs)), math.cos((math.pi * 2 * i) / circleSegs), 0) for i in range(circleSegs + 1)]
coordsNormal = [(0, 0, 0), (0, 0, 1)]
vecZ = mathutils.Vector((0, 0, 1))
vecX = mathutils.Vector((1, 0, 0))
#Find matrix that will rotate Z axis to point along normal
#coord - point in world space
#normal - normal in world space
def calc_vertex_transform_world(pos, norm):
axis = norm.cross(vecZ)
if axis.length_squared < .0001:
axis = mathutils.Vector(vecX)
else:
axis.normalize()
angle = -math.acos(norm.dot(vecZ))
quat = mathutils.Quaternion(axis, angle)
# print (quat)
mR = quat.to_matrix()
# print (mR)
mR.resize_4x4()
# print (mR)
mT = mathutils.Matrix.Translation(pos)
# print (mT)
m = mT @ mR
return m
#Calc matrix that maps from world space to a particular vertex on mesh
#coord - vertex position in local space
#normal - vertex normal in local space
def calc_vertex_transform(obj, coord, normal):
pos = obj.matrix_world @ coord
#Transform normal into world space
norm = normal.copy()
norm.resize_4d()
norm.w = 0
mIT = obj.matrix_world.copy()
mIT.invert()
mIT.transpose()
norm = mIT @ norm
norm.resize_3d()
norm.normalize()
return calc_vertex_transform_world(pos, norm)
def calc_gizmo_transform(obj, coord, normal, ray_origin):
mV = calc_vertex_transform(obj, coord, normal)
pos = obj.matrix_world @ coord
eye_offset = pos - ray_origin
radius = eye_offset.length / 5
mS = mathutils.Matrix.Scale(radius, 4)
m = mV @ mS
return m
def draw_callback(self, context):
ctx = bpy.context
region = context.region
rv3d = context.region_data
viewport_center = (region.x + region.width / 2, region.y + region.height / 2)
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, viewport_center)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, viewport_center)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
batchLine = batch_for_shader(shader, 'LINES', {"pos": coordsNormal})
batchCircle = batch_for_shader(shader, 'LINE_STRIP', {"pos": coordsCircle})
shader.bind();
bgl.glEnable(bgl.GL_DEPTH_TEST)
#Draw cursor
if self.show_cursor:
brush_radius = context.scene.normal_brush_props.radius
m = calc_vertex_transform_world(self.cursor_pos, self.cursor_normal);
mS = mathutils.Matrix.Scale(brush_radius, 4)
m = m @ mS
#Tangent to mesh
gpu.matrix.push()
gpu.matrix.multiply_matrix(m)
# shader.uniform_float("color", (1, 0, 1, 1))
# batchLine.draw(shader)
shader.uniform_float("color", (1, 0, 1, 1))
batchCircle.draw(shader)
gpu.matrix.pop()
#Brush normal direction
gpu.matrix.push()
brush_normal = context.scene.normal_brush_props.normal
m = calc_vertex_transform_world(self.cursor_pos, brush_normal);
gpu.matrix.multiply_matrix(m)
shader.uniform_float("color", (0, 1, 1, 1))
batchLine.draw(shader)
gpu.matrix.pop()
#Draw editable normals
shader.uniform_float("color", (1, 1, 0, 1))
selOnly = context.scene.normal_brush_props.selected_only
normLength = context.scene.normal_brush_props.normal_length
mS = mathutils.Matrix.Scale(normLength, 4)
for obj in ctx.selected_objects:
if obj.type == 'MESH':
success = obj.update_from_editmode()
mesh = obj.data
mesh.calc_normals_split()
for l in mesh.loops:
# if not (selOnly and not v.select):
v = mesh.vertices[l.vertex_index]
m = calc_vertex_transform(obj, v.co, l.normal)
m = m @ mS
gpu.matrix.push()
gpu.matrix.multiply_matrix(m)
shader.uniform_float("color", (1, 1, 0, 1))
batchLine.draw(shader)
gpu.matrix.pop()
bgl.glDisable(bgl.GL_DEPTH_TEST)
#---------------------------
class ModalDrawOperator(bpy.types.Operator):
"""Adjust normals"""
bl_idname = "kitfox.normal_tool"
bl_label = "Normal Tool Kitfox"
bl_options = {"REGISTER", "UNDO"}
dragging = False
cursor_pos = None
show_cursor = False
bm = None
def dab_brush(self, context, event):
mouse_pos = (event.mouse_region_x, event.mouse_region_y)
targetObj = context.scene.normal_brush_props.target
# if targetObj != None:
# print("^^^Tool property target: " + targetObj.name)
# else:
# print("^^^Tool property target: None")
ctx = bpy.context
region = context.region
rv3d = context.region_data
# coord = event.mouse_region_x, event.mouse_region_y
# viewport_center = (region.x + region.width / 2, region.y + region.height / 2)
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, mouse_pos)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, mouse_pos)
viewlayer = bpy.context.view_layer
result, location, normal, index, object, matrix = context.scene.ray_cast(viewlayer.depsgraph, ray_origin, view_vector)
# print("location " + str(location))
center = None
center_count = 0
selOnly = context.scene.normal_brush_props.selected_only
radius = context.scene.normal_brush_props.radius
strength = context.scene.normal_brush_props.strength
brush_type = context.scene.normal_brush_props.brush_type
brush_normal = context.scene.normal_brush_props.normal
target = context.scene.normal_brush_props.target
front_faces_only = context.scene.normal_brush_props.front_faces_only
if result:
for obj in ctx.selected_objects:
if obj.type == 'MESH':
# print("Updating mesh " + obj.name)
mesh = obj.data
mesh.use_auto_smooth = True
mesh.calc_normals_split()
# print("num mesh loops: " + str(len(mesh.loops))
normals = []
for p in mesh.polygons:
for loop_idx in p.loop_indices:
l = mesh.loops[loop_idx]
# normals.append(brush_normal)
v = mesh.vertices[l.vertex_index]
pos = mathutils.Vector(v.co)
wpos = obj.matrix_world @ pos
# print ("---")
# print ("mtx wrld " + str(obj.matrix_world))
# print ("pos " + str(pos))
# print ("wpos " + str(wpos))
#Normal transform is (l2w ^ -1) ^ -1 ^ T
w2ln = obj.matrix_world.copy()
w2ln.transpose()
nLocal = None
if brush_type == "FIXED":
nLocal = brush_normal.to_4d()
nLocal.w = 0
nLocal = w2ln @ nLocal
nLocal = nLocal.to_3d()
nLocal.normalize()
elif brush_type == "ATTRACT":
if target != None:
m = obj.matrix_world.copy()
m.invert()
targetLoc = m @ target.matrix_world.translation
nLocal = targetLoc - pos
nLocal.normalize()
elif brush_type == "REPEL":
if target != None:
m = obj.matrix_world.copy()
m.invert()
targetLoc = m @ target.matrix_world.translation
nLocal = pos - targetLoc
nLocal.normalize()
# print("Setting nLocal")
# nLocal = mathutils.Vector(v.normal)
elif brush_type == "VERTEX":
nLocal = mathutils.Vector(v.normal)
# print("brush norm local " + str(nLocal))
# print("l2w " + str(obj.matrix_world))
# print("w2ln " + str(w2ln))
# print("nLocal " + str(nLocal))
offset = location - wpos
# print ("offset " + str(offset))
# offset.length_squared / radius * radius
t = 1 - offset.length / radius
# print ("t " + str(t))
view_local = w2ln @ view_vector
# if p.normal.dot(view_local) < 0 && front_faces_only:
# pass
# print("loop norm " + str(l.normal))
if t <= 0 or nLocal == None or (p.normal.dot(view_local) > 0 and front_faces_only):
normals.append(l.normal)
else:
axis = l.normal.cross(nLocal)
angle = nLocal.angle(l.normal)
# print("->axis " + str(axis))
# print("->angle " + str(math.degrees(angle)))
q = mathutils.Quaternion(axis, angle * t * strength)
m = q.to_matrix()
newNorm = m @ l.normal
# print("->new norm " + str(newNorm))
normals.append(newNorm)
mesh.normals_split_custom_set(normals)
def mouse_move(self, context, event):
mouse_pos = (event.mouse_region_x, event.mouse_region_y)
ctx = bpy.context
region = context.region
rv3d = context.region_data
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, mouse_pos)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, mouse_pos)
viewlayer = bpy.context.view_layer
result, location, normal, index, object, matrix = context.scene.ray_cast(viewlayer.depsgraph, ray_origin, view_vector)
#Brush cursor display
if result:
self.show_cursor = True
self.cursor_pos = location
self.cursor_normal = normal
self.cursor_object = object
self.cursor_matrix = matrix
else:
self.show_cursor = False
# print ("dragging: " + str(self.dragging));
if self.dragging:
self.dab_brush(context, event)
def mouse_down(self, context, event):
if event.value == "PRESS":
mouse_pos = (event.mouse_region_x, event.mouse_region_y)
region = context.region
rv3d = context.region_data
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, mouse_pos)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, mouse_pos)
viewlayer = bpy.context.view_layer
result, location, normal, index, object, matrix = context.scene.ray_cast(viewlayer.depsgraph, ray_origin, view_vector)
if result == False or object.select_get() == False:
return {'PASS_THROUGH'}
# print ("m DOWN")
self.dragging = True
self.dab_brush(context, event)
elif event.value == "RELEASE":
# print ("m UP")
self.dragging = False
return {'RUNNING_MODAL'}
def modal(self, context, event):
#We are not receiving a mouse up event after editing the normal,
# so check for it here
# print ("modal normal_changed: " + str(context.scene.normal_brush_props.normal_changed))
# if context.scene.normal_brush_props.normal_changed:
# print ("reactng to normal chagne!!!: ")
# self.dragging = False
# context.scene.normal_brush_props.normal_changed = False;
#
context.area.tag_redraw()
if event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE'}:
# allow navigation
return {'PASS_THROUGH'}
elif event.type == 'MOUSEMOVE':
self.mouse_move(context, event)
if self.dragging:
return {'RUNNING_MODAL'}
else:
return {'PASS_THROUGH'}
elif event.type == 'LEFTMOUSE':
return self.mouse_down(context, event)
# return {'PASS_THROUGH'}
# return {'RUNNING_MODAL'}
# elif event.type in {'Z'}:
# #Kludge to get around FloatVectorProperty(subtype='DIRECTION') error
# self.dragging = False
# return {'RUNNING_MODAL'}
#
elif event.type in {'RET'}:
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
return {'FINISHED'}
elif event.type in {'PAGE_UP', 'RIGHT_BRACKET'}:
if event.value == "PRESS":
brush_radius = context.scene.normal_brush_props.radius
brush_radius = brush_radius + .1
context.scene.normal_brush_props.radius = brush_radius
return {'RUNNING_MODAL'}
elif event.type in {'PAGE_DOWN', 'LEFT_BRACKET'}:
if event.value == "PRESS":
brush_radius = context.scene.normal_brush_props.radius
brush_radius = max(brush_radius - .1, .1)
context.scene.normal_brush_props.radius = brush_radius
return {'RUNNING_MODAL'}
elif event.type in {'RIGHTMOUSE', 'ESC'}:
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
# print("norm tool cancelled")
return {'CANCELLED'}
return {'PASS_THROUGH'}
# return {'RUNNING_MODAL'}
def invoke(self, context, event):
if context.area.type == 'VIEW_3D':
# the arguments we pass the the callback
args = (self, context)
# Add the region OpenGL drawing callback
# draw in view space with 'POST_VIEW' and 'PRE_VIEW'
self._context = context
self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback, args, 'WINDOW', 'POST_VIEW')
context.area.tag_redraw()
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
else:
self.report({'WARNING'}, "View3D not found, cannot run operator")
return {'CANCELLED'}
#---------------------------
class NormalPickerOperator(bpy.types.Operator):
"""Pick normal"""
bl_idname = "kitfox.nt_pick_normal"
bl_label = "Pick Normal"
picking = False
def mouse_down(self, context, event):
mouse_pos = (event.mouse_region_x, event.mouse_region_y)
ctx = bpy.context
region = context.region
rv3d = context.region_data
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, mouse_pos)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, mouse_pos)
viewlayer = bpy.context.view_layer
result, location, normal, index, object, matrix = context.scene.ray_cast(viewlayer.depsgraph, ray_origin, view_vector)
if result:
# print("--picked " + str(normal))
context.scene.normal_brush_props.normal = normal
context.area.tag_redraw()
def modal(self, context, event):
if event.type == 'MOUSEMOVE':
if self.picking:
context.window.cursor_set("EYEDROPPER")
else:
context.window.cursor_set("DEFAULT")
return {'PASS_THROUGH'}
elif event.type == 'LEFTMOUSE':
self.picking = False
self.mouse_down(context, event)
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
context.window.cursor_set("DEFAULT")
return {'FINISHED'}
elif event.type in {'RIGHTMOUSE', 'ESC'}:
self.picking = False
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
print("pick target object cancelled")
context.window.cursor_set("DEFAULT")
return {'CANCELLED'}
else:
return {'PASS_THROUGH'}
def invoke(self, context, event):
if context.area.type == 'VIEW_3D':
args = (self, context)
# Add the region OpenGL drawing callback
# draw in view space with 'POST_VIEW' and 'PRE_VIEW'
self._context = context
self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback, args, 'WINDOW', 'POST_VIEW')
context.window_manager.modal_handler_add(self)
context.window.cursor_set("EYEDROPPER")
self.picking = True
return {'RUNNING_MODAL'}
else:
self.report({'WARNING'}, "View3D not found, cannot run operator")
return {'CANCELLED'}
#---------------------------
class NormalToolPanel(bpy.types.Panel):
"""Panel for the Normal Tool on tool shelf"""
bl_label = "Normal Tool Panel"
bl_idname = "OBJECT_PT_normal_tool"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
obj = context.object
row = layout.row()
row.operator("kitfox.normal_tool")
#---------------------------
#class NormalToolTool(bpy.types.WorkSpaceTool):
# bl_space_type = 'VIEW_3D'
# bl_context_mode = 'OBJECT'
# # The prefix of the idname should be your add-on name.
# bl_idname = "kitfox.normal_tool_tool"
# bl_label = "Normal Tool"
# bl_description = (
# "Adjust the normals of the selected object"
# )
# bl_icon = "ops.generic.select_circle"
# bl_widget = None
## bl_keymap = (
## ("view3d.select_circle", {"type": 'LEFTMOUSE', "value": 'PRESS'},
## {"properties": [("wait_for_input", False)]}),
## ("view3d.select_circle", {"type": 'LEFTMOUSE', "value": 'PRESS', "ctrl": True},
## {"properties": [("mode", 'SUB'), ("wait_for_input", False)]}),
## )
# bl_keymap = (
# ("kitfox.normal_tool", {"type": 'LEFTMOUSE', "value": 'PRESS'},
# {"properties": [("wait_for_input", False)]}),
# )
# def draw_settings(context, layout, tool):
# props = tool.operator_properties("kitfox.normal_tool")
## layout.prop(props, "mode")
#
#---------------------------
class NormalToolPropsPanel(bpy.types.Panel):
"""Properties Panel for the Normal Tool on tool shelf"""
bl_label = "Normal Brush"
bl_idname = "OBJECT_PT_normal_tool_props"
bl_space_type = 'VIEW_3D'
# bl_region_type = 'TOOL_PROPS'
bl_region_type = 'UI'
bl_context = "objectmode"
bl_category = "Kitfox"
def draw(self, context):
layout = self.layout
scene = context.scene
settings = scene.normal_brush_props
pcoll = preview_collections["main"]
col = layout.column();
col.operator("kitfox.normal_tool", text="Start Normal Tool", icon_value = pcoll["normalTool"].icon_id)
col.prop(settings, "strength")
col.prop(settings, "normal_length")
col.prop(settings, "radius")
col.prop(settings, "front_faces_only")
# col.prop(settings, "selected_only")
row = layout.row();
row.prop(settings, "brush_type", expand = True)
col = layout.column();
# context.scene.normal_brush_props.normal = normal
brush_type = context.scene.normal_brush_props.brush_type
if brush_type == "FIXED":
if not context.scene.normal_brush_props.normal_exact:
col.label(text="Normal:")
col.prop(settings, "normal", text="")
else:
col.prop(settings, "normal", expand = True)
col.prop(settings, "normal_exact")
col.operator("kitfox.nt_pick_normal", icon="EYEDROPPER")
elif brush_type == "ATTRACT" or brush_type == "REPEL":
col.prop(settings, "target")
# layout.separator()
#---------------------------
preview_collections = {}
def register():
bpy.utils.register_class(NormalToolSettings)
bpy.utils.register_class(NormalPickerOperator)
bpy.utils.register_class(ModalDrawOperator)
# bpy.utils.register_class(NormalToolPanel)
bpy.utils.register_class(NormalToolPropsPanel)
# bpy.utils.register_tool(NormalToolTool)
bpy.types.Scene.normal_brush_props = bpy.props.PointerProperty(type=NormalToolSettings)
#Load icons
icon_path = "../icons"
if __name__ == "__main__":
icon_path = "../../source/icons"
icons_dir = os.path.join(os.path.dirname(__file__), icon_path)
print("icons dir: " + str(icons_dir))
pcoll = bpy.utils.previews.new()
pcoll.load("normalTool", os.path.join(icons_dir, "normalTool.png"), 'IMAGE')
preview_collections["main"] = pcoll
def unregister():
bpy.utils.unregister_class(NormalToolSettings)
bpy.utils.unregister_class(NormalPickerOperator)
bpy.utils.unregister_class(ModalDrawOperator)
# bpy.utils.unregister_class(NormalToolPanel)
bpy.utils.unregister_class(NormalToolPropsPanel)
# bpy.utils.unregister_tool(NormalToolTool)
del bpy.types.Scene.normal_brush_props
#Unload icons
for pcoll in preview_collections.values():
bpy.utils.previews.remove(pcoll)
preview_collections.clear()
if __name__ == "__main__":
register()
| [
"[email protected]"
] | |
03cdc0781de9e62e42210b4bea3bb9593385ff63 | ac216a2cc36f91625e440247986ead2cd8cce350 | /appengine/cr-buildbucket/test/swarmingcfg_test.py | d3c1936a13e099c1ab42df70cdfe8144b4ebce74 | [
"BSD-3-Clause"
] | permissive | xinghun61/infra | b77cdc566d9a63c5d97f9e30e8d589982b1678ab | b5d4783f99461438ca9e6a477535617fadab6ba3 | refs/heads/master | 2023-01-12T21:36:49.360274 | 2019-10-01T18:09:22 | 2019-10-01T18:09:22 | 212,168,656 | 2 | 1 | BSD-3-Clause | 2023-01-07T10:18:03 | 2019-10-01T18:22:44 | Python | UTF-8 | Python | false | false | 20,193 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from components import utils
utils.fix_protobuf_package()
from google import protobuf
from parameterized import parameterized
from components import config as config_component
from testing_utils import testing
from proto import project_config_pb2
from proto import service_config_pb2
from test import config_test
import errors
import swarmingcfg
class ProjectCfgTest(testing.AppengineTestCase):
def cfg_test(self, swarming_text, mixins_text, expected_errors):
swarming_cfg = project_config_pb2.Swarming()
protobuf.text_format.Merge(swarming_text, swarming_cfg)
buildbucket_cfg = project_config_pb2.BuildbucketCfg()
protobuf.text_format.Merge(mixins_text, buildbucket_cfg)
mixins = {m.name: m for m in buildbucket_cfg.builder_mixins}
ctx = config_component.validation.Context()
swarmingcfg.validate_project_cfg(swarming_cfg, mixins, True, ctx)
self.assert_errors(ctx, expected_errors)
def assert_errors(self, ctx, expected_errors):
self.assertEqual(
map(config_test.errmsg, expected_errors),
ctx.result().messages
)
def test_valid(self):
self.cfg_test(
'''
builder_defaults {
swarming_host: "example.com"
swarming_tags: "master:master.a"
dimensions: "cores:8"
dimensions: "60:cores:64"
dimensions: "pool:default"
dimensions: "cpu:x86-64"
service_account: "bot"
}
builders {
name: "release"
swarming_tags: "a:b'"
dimensions: "os:Linux"
dimensions: "cpu:"
service_account: "[email protected]"
caches {
name: "git_chromium"
path: "git_cache"
}
recipe {
name: "foo"
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
properties: "a:b'"
properties_j: "x:true"
}
}
builders {
name: "release cipd"
recipe {
cipd_package: "some/package"
name: "foo"
}
}
''', '', []
)
def test_valid_global_swarming_hostname(self):
self.cfg_test(
'''
hostname: "example.com"
builders {
name: "release"
recipe {
name: "foo"
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
}
}
''', '', []
)
def test_validate_recipe_properties(self):
def test(properties, properties_j, expected_errors):
ctx = config_component.validation.Context()
swarmingcfg.validate_recipe_properties(properties, properties_j, ctx)
self.assertEqual(
map(config_test.errmsg, expected_errors),
ctx.result().messages
)
test([], [], [])
runtime = '$recipe_engine/runtime:' + json.dumps({
'is_luci': False,
'is_experimental': True,
})
test(
properties=[
'',
':',
'buildbucket:foobar',
'x:y',
],
properties_j=[
'x:"y"',
'y:b',
'z',
runtime,
],
expected_errors=[
'properties \'\': does not have a colon',
'properties \':\': key not specified',
'properties \'buildbucket:foobar\': reserved property',
'properties_j \'x:"y"\': duplicate property',
'properties_j \'y:b\': No JSON object could be decoded',
'properties_j \'z\': does not have a colon',
'properties_j %r: key \'is_luci\': reserved key' % runtime,
'properties_j %r: key \'is_experimental\': reserved key' % runtime,
]
)
test([], ['$recipe_engine/runtime:1'], [
('properties_j \'$recipe_engine/runtime:1\': '
'not a JSON object'),
])
test([], ['$recipe_engine/runtime:{"unrecognized_is_fine": 1}'], [])
def test_bad(self):
self.cfg_test(
'''
builders {}
''',
'',
[
'builder #1: name: unspecified',
'builder #1: swarming_host: unspecified',
'builder #1: recipe: name: unspecified',
'builder #1: recipe: cipd_package: unspecified',
],
)
self.cfg_test(
'''
builder_defaults {
swarming_host: "swarming.example.com"
recipe {
name: "meeper"
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
}
}
builders {
name: "meep"
}
builders {
name: "meep"
}
''',
'',
[
'builder meep: name: duplicate',
],
)
self.cfg_test(
'''
builders {
name: ":/:"
swarming_host: "swarming.example.com"
}
''',
'',
[
('builder :/:: name: invalid char(s) u\'/:\'. '
'Alphabet: "%s"') % errors.BUILDER_NAME_VALID_CHARS,
],
)
self.cfg_test(
'''
builders {
name: "veeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeery"
"looooooooooooooooooooooooooooooooooooooooooooooooooooooooong"
"naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaame"
swarming_host: "swarming.example.com"
}
''',
'',
[(
'builder '
'veeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeery'
'looooooooooooooooooooooooooooooooooooooooooooooooooooooooong'
'naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaame: '
'name: length is > 128'
)],
)
self.cfg_test(
'''
builder_defaults {name: "x"}
builders {
name: "release"
swarming_host: "swarming.example.com"
dimensions: "pool:a"
recipe {
name: "foo"
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
}
}
''',
'',
[
'builder_defaults: name: not allowed',
],
)
self.cfg_test(
'''
task_template_canary_percentage { value: 102 }
builder_defaults {
swarming_host: "https://swarming.example.com"
swarming_tags: "wrong"
}
builders {
swarming_tags: "wrong2"
}
builders {
name: "b2"
swarming_tags: "builder:b2"
caches {}
caches { name: "a/b" path: "a" }
caches { name: "b" path: "a\\c" }
caches { name: "c" path: "a/.." }
caches { name: "d" path: "/a" }
priority: 300
}
''',
'',
[
'task_template_canary_percentage.value must must be in [0, 100]',
'builder_defaults: swarming_host: must not contain "://"',
'builder_defaults: tag #1: does not have ":": wrong',
'builder #1: tag #1: does not have ":": wrong2',
(
'builder b2: tag #1: do not specify builder tag; '
'it is added by swarmbucket automatically'
),
'builder b2: cache #1: name: required',
'builder b2: cache #1: path: required',
(
'builder b2: cache #2: '
'name: "a/b" does not match ^[a-z0-9_]{1,4096}$'
),
(
'builder b2: cache #3: path: cannot contain \\. '
'On Windows forward-slashes will be replaced with back-slashes.'
),
'builder b2: cache #4: path: cannot contain ".."',
'builder b2: cache #5: path: cannot start with "/"',
'builder b2: priority: must be in [20, 255] range; got 300',
],
)
self.cfg_test(
'''
builders {
name: "rel"
swarming_host: "swarming.example.com"
caches { path: "a" name: "a" }
caches { path: "a" name: "a" }
}
''',
'',
[
'builder rel: cache #2: duplicate name',
'builder rel: cache #2: duplicate path',
],
)
self.cfg_test(
'''
builders {
name: "rel"
swarming_host: "swarming.example.com"
caches { path: "a" name: "a" wait_for_warm_cache_secs: 61 }
}
''',
'',
[
'builder rel: cache #1: wait_for_warm_cache_secs: must be rounded '
'on 60 seconds',
],
)
self.cfg_test(
'''
builders {
name: "rel"
swarming_host: "swarming.example.com"
caches { path: "a" name: "a" wait_for_warm_cache_secs: 59 }
}
''',
'',
[
'builder rel: cache #1: wait_for_warm_cache_secs: must be at least '
'60 seconds'
],
)
self.cfg_test(
'''
builders {
name: "rel"
swarming_host: "swarming.example.com"
caches { path: "a" name: "a" wait_for_warm_cache_secs: 60 }
caches { path: "b" name: "b" wait_for_warm_cache_secs: 120 }
caches { path: "c" name: "c" wait_for_warm_cache_secs: 180 }
caches { path: "d" name: "d" wait_for_warm_cache_secs: 240 }
caches { path: "e" name: "e" wait_for_warm_cache_secs: 300 }
caches { path: "f" name: "f" wait_for_warm_cache_secs: 360 }
caches { path: "g" name: "g" wait_for_warm_cache_secs: 420 }
caches { path: "h" name: "h" wait_for_warm_cache_secs: 480 }
}
''',
'',
[
'builder rel: too many different (8) wait_for_warm_cache_secs '
'values; max 7',
],
)
self.cfg_test(
'''
builders {
name: "b"
swarming_host: "swarming.example.com"
service_account: "not an email"
}
''',
'',
[
'builder b: service_account: value "not an email" does not match '
'^[0-9a-zA-Z_\\-\\.\\+\\%]+@[0-9a-zA-Z_\\-\\.]+$',
],
)
self.cfg_test(
'''
builders {
name: "b"
swarming_host: "swarming.example.com"
expiration_secs: 158400 # 44h
execution_timeout_secs: 14400 # 4h
}
''',
'',
[
'builder b: expiration_secs + execution_timeout_secs '
'must be at most 47h'
],
)
self.cfg_test(
'''
builders {
name: "bad"
luci_migration_host: "hi"
recipe: {
name: "fake"
cipd_package: "also/fake"
}
}
''',
'',
['builder bad: deprecated luci_migration_host field must be removed.'],
)
@parameterized.expand([
(['a:b'], ''),
([''], 'dimension "": does not have ":"'),
(
['caches:a'],
(
'dimension "caches:a": dimension key must not be "caches"; '
'caches must be declared via caches field'
),
),
(
['a:b', 'a:c'],
(
'dimension "a:c": '
'multiple values for dimension key "a" and expiration 0s'
),
),
([':'], 'dimension ":": no key'),
(
['a.b:c'],
(
'dimension "a.b:c": '
r'key "a.b" does not match pattern "^[a-zA-Z\_\-]+$"'
),
),
(['0:'], 'dimension "0:": has expiration_secs but missing value'),
(['a:', '60:a:b'], 'dimension "60:a:b": mutually exclusive with "a:"'),
(
['-1:a:1'],
(
'dimension "-1:a:1": '
'expiration_secs is outside valid range; up to 21 days'
),
),
(
['1:a:b'],
'dimension "1:a:b": expiration_secs must be a multiple of 60 seconds',
),
(
['1814400:a:1'], # 21*24*60*6
'',
),
(
['1814401:a:1'], # 21*24*60*60+
(
'dimension "1814401:a:1": '
'expiration_secs is outside valid range; up to 21 days'
),
),
(
[
'60:a:1',
'120:a:1',
'180:a:1',
'240:a:1',
'300:a:1',
'360:a:1',
'420:a:1',
],
'at most 6 different expiration_secs values can be used',
),
])
def test_validate_dimensions(self, dimensions, expected_error):
ctx = config_component.validation.Context()
swarmingcfg._validate_dimensions('dimension', dimensions, ctx)
self.assert_errors(ctx, [expected_error] if expected_error else [])
def test_default_recipe(self):
self.cfg_test(
'''
builder_defaults {
dimensions: "pool:default"
swarming_host: "swarming.example.com"
recipe {
name: "foo"
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
properties: "a:b"
properties: "x:y"
}
}
builders { name: "debug" }
builders {
name: "release"
recipe {
properties: "a:c"
properties_j: "x:null"
}
}
''', '', []
)
def test_default_recipe_bad(self):
self.cfg_test(
'''
builder_defaults {
dimensions: "pool:default"
swarming_host: "swarming.example.com"
recipe {
name: "foo"
properties: "a"
}
}
builders { name: "debug" }
''',
'',
['builder_defaults: recipe: properties u\'a\': does not have a colon'],
)
def test_validate_builder_mixins(self):
def test(cfg_text, expected_errors):
ctx = config_component.validation.Context()
cfg = project_config_pb2.BuildbucketCfg()
protobuf.text_format.Merge(cfg_text, cfg)
swarmingcfg.validate_builder_mixins(cfg.builder_mixins, ctx)
self.assertEqual(
map(config_test.errmsg, expected_errors),
ctx.result().messages
)
test(
'''
builder_mixins {
name: "a"
dimensions: "a:b"
dimensions: "60:a:c"
}
builder_mixins {
name: "b"
mixins: "a"
dimensions: "a:b"
}
''', []
)
test(
'''
builder_mixins {
name: "b"
mixins: "a"
}
builder_mixins {
name: "a"
}
''', []
)
test(
'''
builder_mixins {}
''', ['builder_mixin #1: name: unspecified']
)
test(
'''
builder_mixins { name: "a" }
builder_mixins { name: "a" }
''', ['builder_mixin a: name: duplicate']
)
test(
'''
builder_mixins {
name: "a"
mixins: ""
}
''', ['builder_mixin a: referenced mixin name is empty']
)
test(
'''
builder_mixins {
name: "a"
mixins: "b"
}
''', ['builder_mixin a: mixin "b" is not defined']
)
test(
'''
builder_mixins {
name: "a"
mixins: "a"
}
''', [
'circular mixin chain: a -> a',
]
)
test(
'''
builder_mixins {
name: "a"
mixins: "b"
}
builder_mixins {
name: "b"
mixins: "c"
}
builder_mixins {
name: "c"
mixins: "a"
}
''', [
'circular mixin chain: a -> b -> c -> a',
]
)
def test_builder_with_mixins(self):
def test(cfg_text, expected_errors):
ctx = config_component.validation.Context()
cfg = project_config_pb2.BuildbucketCfg()
protobuf.text_format.Merge(cfg_text, cfg)
swarmingcfg.validate_builder_mixins(cfg.builder_mixins, ctx)
self.assertEqual([], ctx.result().messages)
mixins = {m.name: m for m in cfg.builder_mixins}
swarmingcfg.validate_project_cfg(
cfg.buckets[0].swarming, mixins, True, ctx
)
self.assertEqual(
map(config_test.errmsg, expected_errors),
ctx.result().messages
)
test(
'''
builder_mixins {
name: "a"
dimensions: "cores:8"
dimensions: "cpu:x86-64"
dimensions: "os:Linux"
dimensions: "pool:default"
caches {
name: "git"
path: "git"
}
recipe {
name: "foo"
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
properties: "a:b'"
properties_j: "x:true"
}
}
builder_mixins {
name: "b"
mixins: "a"
}
builder_mixins {
name: "c"
mixins: "a"
mixins: "b"
}
buckets {
name: "a"
swarming {
builders {
name: "release"
swarming_host: "swarming.example.com"
mixins: "b"
mixins: "c"
}
}
}
''', []
)
class ServiceCfgTest(testing.AppengineTestCase):
def setUp(self):
super(ServiceCfgTest, self).setUp()
self.ctx = config_component.validation.Context()
def assertErrors(self, expected_errors):
self.assertEqual(
map(config_test.errmsg, expected_errors),
self.ctx.result().messages
)
def cfg_test(self, swarming_text, expected_errors):
settings = service_config_pb2.SwarmingSettings()
protobuf.text_format.Merge(swarming_text, settings)
swarmingcfg.validate_service_cfg(settings, self.ctx)
self.assertErrors(expected_errors)
def test_valid(self):
self.cfg_test(
'''
milo_hostname: "ci.example.com"
bbagent_package {
package_name: "infra/bbagent"
version: "stable"
version_canary: "canary"
builders {
regex: "infra/.+"
}
}
kitchen_package {
package_name: "infra/kitchen"
version: "stable"
version_canary: "canary"
}
user_packages {
package_name: "git"
version: "stable"
version_canary: "canary"
}
''',
[],
)
def test_hostname(self):
swarmingcfg._validate_hostname('https://milo.example.com', self.ctx)
self.assertErrors(['must not contain "://"'])
def test_package_name(self):
pkg = service_config_pb2.SwarmingSettings.Package(version='latest')
swarmingcfg._validate_package(pkg, self.ctx)
self.assertErrors(['package_name is required'])
def test_package_version(self):
pkg = service_config_pb2.SwarmingSettings.Package(package_name='infra/tool')
swarmingcfg._validate_package(pkg, self.ctx)
self.assertErrors(['version is required'])
def test_predicate(self):
predicate = service_config_pb2.BuilderPredicate(
regex=['a', ')'],
regex_exclude=['b', '('],
)
swarmingcfg._validate_builder_predicate(predicate, self.ctx)
self.assertErrors([
'regex u\')\': invalid: unbalanced parenthesis',
'regex_exclude u\'(\': invalid: unbalanced parenthesis',
])
| [
"[email protected]"
] | |
67c377b9df8ec2472444611369f5af5a82e50c5b | 4a52511aead102089c6c6a79658242922ae77e83 | /Algorithms/lcs.py | 542d111cf725b860e1590f9fa24b6e02aec0df28 | [
"MIT"
] | permissive | pybae/etc | bb7c5fa42b914771319c81652b20ad003a8cff2f | ba3d6291ed5dd8e6b6ee18b186a09600def56505 | refs/heads/master | 2016-09-10T01:25:17.456170 | 2015-08-21T18:31:19 | 2015-08-21T18:31:19 | 23,380,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | # An implementation of the Longest Common Subsequence Problem
# Takes two command line arguments, the two subsequences and prints out the
# longest common subsequence and its size
from copy import deepcopy
import sys
def lcs (prev, current, a, b):
rows, columns = len(a), len(b)
print(columns, rows)
print(current)
for i in range(1, rows + 1):
for j in range(1, columns + 1): # skip the first column
if a[i-1] == b[j-1]: # decrement since we start at 1
current[j] = prev[j - 1] + 1
else:
current[j] = max(prev[j], current[j - 1])
print(current)
prev = deepcopy(current)
print("Max value is", current[columns])
return current[columns]
# Parse the command line arguments into an array
if len(sys.argv) != 3:
print("There can only be two arguments")
quit() # may be unsafe
# Initialize two rows of the array, our previous, and the one we are using
# n are the rows in the array and m are the columns
# We increment the length for our "perimeter"
prev = [0 for i in range(len(sys.argv[2]) + 1)]
current = [0 for i in range(len(sys.argv[2]) + 1)]
lcs (prev, current, sys.argv[1], sys.argv[2])
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.