content
stringlengths 5
1.05M
|
---|
__all__ = [
"CobaltStrikeCampaignsContentHandler",
"CobaltStrikeTokensContentHandler",
"CobaltStrikeSentEmailsContentHandler",
"CobaltStrikeWebHitsContentHandler",
"CobaltStrikeApplicationsContentHandler",
]
from xml.sax import ContentHandler, parse, SAXNotRecognizedException
from xml.parsers.expat import ExpatError
from pca.util import copy_attrs
import datetime
import hashlib
import ipaddress
class CobaltStrikeCampaignsContentHandler(ContentHandler):
def __init__(self, campaign_callback, end_callback):
ContentHandler.__init__(self)
self.campaign_callback = campaign_callback
self.end_callback = end_callback
self.is_campaignsFile = False
self.currentCampaign = None
self.chars = ""
def startElement(self, name, attrs):
# clear characters buffer
self.chars = ""
if not self.is_campaignsFile:
if name == "campaigns":
self.is_campaignsFile = True
else:
raise SAXNotRecognizedException(
"XML does not look like Cobalt Strike campaigns.xml data."
)
elif name == "entry":
self.currentCampaign = dict()
def endElement(self, name):
if name == "entry":
self.campaign_callback(self.currentCampaign)
elif name == "cid":
self.currentCampaign["_id"] = self.chars
elif name == "when":
self.currentCampaign["start_time"] = datetime.datetime.utcfromtimestamp(
int(self.chars) / 1000.0
)
elif name == "url":
self.currentCampaign["url"] = self.chars
# We now ignore the template name that Cobalt Strike provides; instead we use the data in TemplateDoc
# elif name == 'template':
elif name == "subject":
self.currentCampaign["subject"] = self.chars
elif name == "campaigns":
self.end_callback()
def characters(self, content):
self.chars += content
class CobaltStrikeTokensContentHandler(ContentHandler):
def __init__(self, token_callback, end_callback):
ContentHandler.__init__(self)
self.token_callback = token_callback
self.end_callback = end_callback
self.is_tokensFile = False
self.currentToken = None
self.chars = ""
def startElement(self, name, attrs):
# clear characters buffer
self.chars = ""
if not self.is_tokensFile:
if name == "tokens":
self.is_tokensFile = True
else:
raise SAXNotRecognizedException(
"XML does not look like Cobalt Strike tokens.xml data."
)
elif name == "entry":
self.currentToken = dict()
def endElement(self, name):
if name == "entry":
self.token_callback(self.currentToken)
elif name == "token":
self.currentToken["token"] = self.chars
elif name == "email":
self.currentToken["email_hash"] = hashlib.sha256(
(self.chars).encode("utf-8")
).hexdigest()
elif name == "cid":
self.currentToken["campaign"] = self.chars
elif name == "tokens":
self.end_callback()
def characters(self, content):
self.chars += content
class CobaltStrikeSentEmailsContentHandler(ContentHandler):
def __init__(self, email_callback, end_callback):
ContentHandler.__init__(self)
self.email_callback = email_callback
self.end_callback = end_callback
self.is_sentemailsFile = False
self.currentEmail = None
self.chars = ""
def startElement(self, name, attrs):
# clear characters buffer
self.chars = ""
if not self.is_sentemailsFile:
if name == "sentemails":
self.is_sentemailsFile = True
else:
raise SAXNotRecognizedException(
"XML does not look like Cobalt Strike sentemails.xml data."
)
elif name == "entry":
self.currentEmail = dict()
def endElement(self, name):
if name == "entry":
self.email_callback(self.currentEmail)
elif name == "token":
self.currentEmail["token"] = self.chars
elif name == "cid":
self.currentEmail["campaign"] = self.chars
elif name == "when":
self.currentEmail["time"] = datetime.datetime.utcfromtimestamp(
int(self.chars) / 1000.0
)
elif name == "status":
self.currentEmail["status"] = self.chars
elif name == "sentemails":
self.end_callback()
def characters(self, content):
self.chars += content
class CobaltStrikeWebHitsContentHandler(ContentHandler):
def __init__(self, webhits_callback, end_callback):
ContentHandler.__init__(self)
self.webhits_callback = webhits_callback
self.end_callback = end_callback
self.is_webhitsFile = False
self.currentWebhit = None
self.chars = ""
def startElement(self, name, attrs):
# clear characters buffer
self.chars = ""
if not self.is_webhitsFile:
if name == "webhits":
self.is_webhitsFile = True
else:
raise SAXNotRecognizedException(
"XML does not look like Cobalt Strike webhits.xml data."
)
elif name == "entry":
self.currentWebhit = dict()
def endElement(self, name):
if name == "entry":
self.webhits_callback(self.currentWebhit)
elif name == "token":
self.currentWebhit["token"] = self.chars
elif name == "when":
self.currentWebhit["time"] = datetime.datetime.utcfromtimestamp(
int(self.chars) / 1000.0
)
elif name == "data":
# Currently expects source_ip to be last item in data
# TODO make searching for source_ip more bulletproof (regex?)
self.currentWebhit["source_ip"] = self.chars.split(" ")[-1]
elif name == "webhits":
self.end_callback()
def characters(self, content):
self.chars += content
class CobaltStrikeApplicationsContentHandler(ContentHandler):
def __init__(self, applications_callback, end_callback):
ContentHandler.__init__(self)
self.applications_callback = applications_callback
self.end_callback = end_callback
self.is_applicationsFile = False
self.currentApplication = None
self.chars = ""
def startElement(self, name, attrs):
# clear characters buffer
self.chars = ""
if not self.is_applicationsFile:
if name == "applications":
self.is_applicationsFile = True
else:
raise SAXNotRecognizedException(
"XML does not look like Cobalt Strike applications.xml data."
)
elif name == "entry":
self.currentApplication = dict()
def endElement(self, name):
if name == "entry":
self.applications_callback(self.currentApplication)
elif name == "id":
self.currentApplication["token"] = self.chars
elif name == "date":
self.currentApplication["time"] = datetime.datetime.utcfromtimestamp(
int(self.chars) / 1000.0
)
elif name == "application":
self.currentApplication["name"] = self.chars
elif name == "version":
self.currentApplication["version"] = self.chars
elif name == "external":
self.currentApplication["external_ip"] = self.chars
elif name == "internal":
try: # internal_ip is not guaranteed to be present or may be 'unknown'/'null'
internal_ip = ipaddress.ip_address(self.chars)
self.currentApplication["internal_ip"] = self.chars
except:
self.currentApplication["internal_ip"] = None
elif name == "applications":
self.end_callback()
def characters(self, content):
self.chars += content
|
import sys
# Uses the pedigree (.ped) file to create files that will be used by PLINK.
ped_filepath = sys.argv[1]
# Gets the stem of the pedigree file (retaining only the base filename and removing the file extension.
from pathlib import Path
ped_stem = Path(ped_filepath).stem
upid = "{}.{}".format(ped_stem,"update.ids")
uppa = "{}.{}".format(ped_stem,"update.parents")
upsx = "{}.{}".format(ped_stem,"update.sex")
phen = "{}.{}".format(ped_stem,"update.tdt_all_case")
fout_upid = open(upid,"w")
fout_uppa = open(uppa,"w")
fout_upsx = open(upsx,"w")
fout_phen = open(phen,"w")
f = open(ped_filepath,"r")
for line in f:
linesplit = line.rstrip().split("\t")
fid,iid,father_iid,mother_iid,sex,phen = linesplit[0],linesplit[1],linesplit[2],linesplit[3],linesplit[4],linesplit[5]
fout_upid.write("{} {} {} {}\n".format(iid,iid,fid,iid))
fout_uppa.write("{} {} {} {}\n".format(fid,iid,father_iid,mother_iid))
fout_upsx.write("{} {} {}\n".format(fid,iid,sex))
# Generate phen file
if not (father_iid == "0" and mother_iid == "0"): # Skip parents
fout_phen.write("{} {} 2\n".format(fid,iid)) # In order to obtain transmission counts from the controls, we treat controls as cases (the PLINK TDT seems to require this)
f.close()
fout_upid.close()
fout_uppa.close()
fout_upsx.close()
fout_phen.close()
|
__all__ = ['acb']
|
"""
Simple Git clone tool
Code readability: cmd, exists, current_dir, change_dir reused from DJANGULAR settings
"""
import os
import shutil
from djangular_cli.terminal.prompt import prompt
from distlib._backport import shutil # noqa F405
from djangular_cli.terminal.custom_prompts import prompt_overwrite, prompt_rename
class Repo:
"""
Gather repository data to clone.
"""
def __init__(self):
self.git_url = "https://" + input("▸ [github.com][other]: https://")
self.user = input("▸ Author: ")
self.package_name = input("▸ Package name: ")
self.result = "{}/{}/{}.git".format(self.git_url, self.user, self.package_name)
self.to_clone = "git clone "
self.command = self.to_clone + self.result
self.absolute_path = os.getcwd() + "/" + self.package_name
def djangular_boilerplate():
"""
Clone any repository into your project.
"""
git = Repo()
path = git.absolute_path
package_name = git.package_name
clone = git.command
if not os.path.exists(path):
os.system(clone)
rename = prompt(prompt_rename)
if rename.get("rename", True):
os.rename(package_name, input("Rename directory: "))
else:
pass
elif os.path.exists(path):
ow = prompt(prompt_overwrite)
if ow.get("overwrite", True):
shutil.rmtree(package_name)
os.system(clone)
rename = prompt(prompt_rename)
if rename.get("rename", True):
os.rename(package_name, input("Rename directory: "))
else:
exit("You have chosen not to overwrite. Session ended.")
|
import string
import random
def generate_random_string(length: int = 16) -> str:
return ''.join(
[random.choice(string.ascii_lowercase + string.digits) for n in range(length)]
)
|
#!/usr/bin/env python3
# coding:utf-8
f = open("yankeedoodle.csv")
nums = [num.strip() for num in f.read().split(',')]
f.close()
res = [int(x[0][5] + x[1][5] + x[2][6]) for x in zip(nums[0::3], nums[1::3], nums[2::3])]
print(''.join([chr(e) for e in res]))
|
# FeatherS2 Neo Helper Library
# 2021 Seon Rozenblum, Unexpected Maker
#
# Project home:
# https://feathers2neo.io
#
# Import required libraries
import time
import neopixel
import board
from os import statvfs
from micropython import const
from digitalio import DigitalInOut, Direction, Pull
from analogio import AnalogIn
class FeatherS2NeoHelper:
def __init__(self):
# pin 13 and on-board RGB
self._led13 = DigitalInOut(board.LED)
self._led13.direction = Direction.OUTPUT
# Setup the NeoPixel power pins
self._pixel_power = DigitalInOut(board.NEOPIXEL_POWER)
self._pixel_power.direction = Direction.OUTPUT
self._pixel_power.value = True
self._pixel_matrix_power = DigitalInOut(board.NEOPIXEL_MATRIX_POWER)
self._pixel_matrix_power.direction = Direction.OUTPUT
# Setup the BATTERY voltage sense pin
self._vbat_voltage = AnalogIn(board.BATTERY)
# Setup the VBUS sense pin
self._vbus_sense = DigitalInOut(board.VBUS_SENSE)
self._vbus_sense.direction = Direction.INPUT
# Create a NeoPixel reference
self._pixel = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.3, auto_write=True, pixel_order=neopixel.RGB)
# Create a NeoPixel matrix reference
self._matrix = neopixel.NeoPixel(board.NEOPIXEL_MATRIX, 25, brightness=0.5, auto_write=True, pixel_order=neopixel.RGB)
# Initially set the matrix power off
self._pixel_matrix_power.value = False
def set_pixel_matrix_power(self, state):
"""Enable or Disable power to the onboard NeoPixel to either show colour, or to reduce power fro deep sleep"""
self._pixel_matrix_power.value = state
def get_battery_voltage(self):
"""Get the approximate battery voltage"""
# I don't really understand what CP is doing under the hood here for the ADC range & calibration,
# but the onboard voltage divider for VBAT sense is setup to deliver 1.1V to the ADC based on it's
# default factory configuration.
# This forumla should show the nominal 4.2V max capacity (approximately) when 5V is present and the
# VBAT is in charge state for a 1S LiPo battery with a max capacity of 4.2V
return round(self._vbat_voltage.value / 5370,2)
def get_vbus_present(self):
"""Detect if VBUS (5V) power source is present"""
return self._vbus_sense.value
def get_flash_info(self):
flash = statvfs('/')
flash_size = flash[0] * flash[2]
flash_free = flash[0] * flash[3]
return flash_size, flash_free
@staticmethod
def rgb_color_wheel(wheel_pos):
"""Color wheel to allow for cycling through the rainbow of RGB colors."""
wheel_pos = wheel_pos % 255
if wheel_pos < 85:
return 255 - wheel_pos * 3, 0, wheel_pos * 3
elif wheel_pos < 170:
wheel_pos -= 85
return 0, wheel_pos * 3, 255 - wheel_pos * 3
else:
wheel_pos -= 170
return wheel_pos * 3, 255 - wheel_pos * 3, 0
@property
def battery_voltage(self):
return self.get_battery_voltage()
@property
def vbus_present(self):
return self.get_vbus_present()
@property
def pixel(self):
return self._pixel
@property
def matrix(self):
return self._matrix
@property
def flash_info(self):
return self.get_flash_info()
@property
def blue_led(self):
return self._led13.value
@blue_led.setter
def blue_led(self,value):
# Turn the Blue LED on or off
self._led13.value = value
class MatrixMessage:
STATIC = const(0)
LEFT = const(1)
RIGHT = const(2)
def __init__(self, matrix):
self.matrix = matrix
self._message_width = 0
self._message_index = 0
self._pixel_data = []
self._scroll_direction = MatrixMessage.LEFT
self.current_rotation = 0
self._scroll_delay = 0.15
self._pixel_data_length = 0
self._next_tick = 0
self.glyphs = {
" ": [0,0,0,0,0],
"!": [0,29,0,0,0],
"\"": [0,24,0,24,0],
"#": [10,31,10,31,10],
"$": [10,29,21,23,10],
"%": [25,18,4,9,19],
"&": [10,21,21,10,1],
"'": [0,24,0,0,0],
"(": [0,14,17,0,0],
")": [0,17,14,0,0],
"*": [0,10,4,10,0],
"+": [0,4,14,4,0],
",": [0,1,2,0,0],
"-": [0,4,4,4,0],
".": [0,2,0,0,0],
"/": [1,2,4,8,16],
"0": [14,17,17,14,0],
"1": [0,9,31,1,0],
"2": [19,21,21,9,0],
"3": [18,17,21,26,0],
"4": [6,10,18,31,2],
"5": [29,21,21,21,18],
"6": [2,5,13,21,2],
"7": [17,18,20,24,16],
"8": [10,21,21,21,10],
"9": [8,21,22,20,8],
":": [0,10,0,0,0],
";": [0,1,10,0,0],
"<": [0,4,10,17,0],
"=": [0,10,10,10,0],
">": [0,17,10,4,0],
"?": [8,16,21,20,8],
"@": [14,17,21,18,14],
"A": [15,20,20,15,0],
"B": [31,21,21,10,0],
"C": [14,17,17,17,0],
"D": [31,17,17,14,0],
"E": [31,21,21,17,0],
"F": [31,20,20,16,0],
"G": [14,17,17,21,6],
"H": [31,4,4,31,0],
"I": [17,31,17,0,0],
"J": [18,17,17,30,16],
"K": [31,4,10,17,0],
"L": [31,1,1,1,0],
"M": [31,8,4,8,31],
"N": [31,8,4,2,31],
"O": [14,17,17,14,0],
"P": [31,20,20,8,0],
"Q": [12,18,19,13,0],
"R": [31,20,20,10,1],
"S": [9,21,21,18,0],
"T": [16,16,31,16,16],
"U": [30,1,1,30,0],
"V": [28,2,1,2,28],
"W": [31,2,4,2,31],
"X": [27,4,4,27,0],
"Y": [16,8,7,8,16],
"Z": [19,21,25,17,0],
"[": [0,31,17,17,0],
"\\": [16,8,4,2,1],
"]": [0,17,17,31,0],
"^": [0,8,16,8,0],
"_": [1,1,1,1,1],
"`": [0,16,8,0,0],
"a": [6,9,9,15,1],
"b": [31,5,5,2,0],
"c": [6,9,9,9,0],
"d": [2,5,5,31,0],
"e": [14,21,21,9,0],
"f": [4,15,20,16,0],
"g": [8,21,21,30,0],
"h": [31,4,4,3,0],
"i": [0,23,0,0,0],
"j": [0,1,1,22,0],
"k": [31,4,10,1,0],
"l": [0,30,1,1,0],
"m": [15,8,4,8,15],
"n": [15,8,8,7,0],
"o": [6,9,9,6,0],
"p": [15,10,10,4,0],
"q": [4,10,10,15,0],
"r": [7,8,8,8,0],
"s": [1,5,10,8,0],
"t": [0,30,5,5,1],
"u": [14,1,1,15,1],
"v": [12,2,1,2,12],
"w": [15,1,2,1,15],
"x": [9,6,6,9,0],
"y": [9,5,2,4,8],
"z": [9,11,13,9,0],
"{": [0,4,31,17,0],
"|": [0,31,0,0,0],
"}": [17,31,4,0,0],
"~": [0,4,4,2,2],
'↑': [4,8,31,8,4],
'→': [4,4,21,14,4],
'↓': [4,2,31,2,4],
'←': [4,14,21,4,4],
'▲': [2, 6, 14, 6, 2],
'►': [0, 31, 14, 4, 0],
'▼': [8, 12, 14, 12, 8],
'◄': [0, 4, 14, 31, 0],
"☐": [0, 14, 10, 14, 0],
"□": [31, 17, 17, 17, 31],
"℃": [24, 24, 7, 5, 5],
"℉": [24, 24, 7, 6, 4],
'π': [16, 31, 16, 31, 17],
'å': [6,9,27,15,1],
}
self.wifi_anim = [
[1, 0, 0, 0, 0],
[5, 4, 3, 0, 0],
[21, 20, 19, 8, 7]
]
def get_characters(self):
return f"{''.join(sorted(self.glyphs.keys()))} "
def get_character(self, c):
if c not in self.glyphs:
print(f"{c} not in font glyphs, sorry!")
return None
glyph_data = self.glyphs[c]
bits = [0] * 25
bit = 0
for x in range(5):
for y in range(5):
v = (glyph_data[x] >> (4-y)) & 1
bits[bit] = v
bit+=1
return bits
def get_message_width(self, txt, use_padding = True):
total_width = 0
for i, c in enumerate(txt):
# Special case for space
width = 0
if c == " ":
width = 2
elif c in self.glyphs:
glyph_data = self.glyphs[c]
for x in range(5):
width += 1 if glyph_data[x] > 0 else 0
# Extra 1 to ensure 1 colum padding for every character in the string
total_width += (width + 1) if use_padding else width
return total_width
def get_message(self, txt, use_padding = True):
width = self.get_message_width(txt, use_padding)
# print(f"width: {width}")
bits = [0] * (width * 5)
# print(f"len bits {len(bits)}")
bit = 0
for i, c in enumerate(txt):
# Special case for space
if c == " ":
bit+= 10
elif c in self.glyphs:
glyph_data = self.glyphs[c]
for x in range(5):
if glyph_data[x] > 0:
for y in range(5):
v = (glyph_data[x] >> (4-y)) & 1
bits[bit] = v
bit+= 1
if use_padding:
bit+= 5
return width, bits
def setup_message(self, message, delay=0.2, use_padding=True):
""" setup the message
message: The message to display
delay: The scroll speed delay step in ms
use_padding: If there should be padding between each character
"""
self._scroll_delay = delay
self._message = message
self._message_width, self._pixel_data = self.get_message(message, use_padding)
self._pixel_data_length = len(self._pixel_data )
self._next_tick = time.monotonic()
self._fade = 1
# Set the current index to the start or end depending on direction
self._message_index = self._message_width if self._scroll_direction == MatrixMessage.RIGHT else 0
def show_message(self, color, brightness = 0.33, fade_out=0.2):
""" show the message on the matrix
color: The r,g,b colour each visible LED should be this update
brightness: Multiplier for the color as the neopixel lib doesn't have a brightness setting
fade_out: fade step for each character being show. Only use when display messages in STATIC movement mode
to help reduce transition shock and to separate showing identical characters consecutively
"""
if self._scroll_direction == MatrixMessage.LEFT and self._message_index >= self._message_width:
return
elif self._scroll_direction == MatrixMessage.RIGHT and self._message_index <= 0:
return
elif self._scroll_direction == MatrixMessage.STATIC and self._message_index >= len(self._message)-1:
return
if time.monotonic() > self._next_tick + self._scroll_delay:
self._next_tick = time.monotonic()
# Adjust index based on scroll direction
self._message_index += -1 if self._scroll_direction == MatrixMessage.RIGHT else 1
self._fade = 1
if self._scroll_direction == MatrixMessage.STATIC:
brightness *= self._fade
self._fade = max(self._fade - fade_out, 0)
col_on = [c * brightness for c in color ]
col_off = [0,0,0]
if self._scroll_direction == MatrixMessage.STATIC:
for led, p in enumerate(self.get_character(self._message[self._message_index])):
self.matrix[led] = col_on if p else col_off
return
for led in range(25):
index = led + 5 * self._message_index
if index < self._pixel_data_length:
self.matrix[led] = col_on if self._pixel_data[index] else col_off
@property
def scroll_direction(self):
return self.scroll_direction
@scroll_direction.setter
def scroll_direction(self,value):
# Set the scroll direction
self._scroll_direction = value
@property
def display_rotation(self):
return self.current_rotation
@display_rotation.setter
def display_rotation(self,value):
# Set the scroll direction
self.current_rotation = value
class MatrixAnimation:
def __init__(self, matrix, anim_type, trail_length):
# List of animation shapes by pixel index
# Pixel 0 is Top Left, pixels increase vertically by row
# Feel free to make your own shapes!
self.matrix_display_shapes = {
"square": [0,1,2,3,4,9,14,19,24,23,22,21,20,15,10,5],
"circle": [1,2,3,9,14,19,23,22,21,15,10,5],
"diamond": [2,8,14,18,22,16,10,6],
"plus": [2,7,12,17,22,10,11,12,13,14],
"cross": [0,6,12,18,24,4,8,12,16,20],
"spiral": [12,13,18,17,16,11,6,7,8,9,14,19,24,23,22,21,20,15,10,5,0,1,2,3,4,9,14,19,24,23,22,21,20,15,10,5,6,7,8,13,18,17,16,11,12,-1,-1,-1,-1,-1,-1,-1]
}
# Initialiation error status
self.error = False
if anim_type not in self.matrix_display_shapes:
print(f"** '{anim_type}' not found in list of shapes!\n** Animation halted!")
self.error = True
elif trail_length < 1 or trail_length > 20:
print(f"** trail_length cannot be {trail_length}. Please pick a value between 1 and 20!\n** Animation halted!")
self.error = True
if not self.error:
self.matrix = matrix
self.anim_type = anim_type
self.trail_length = trail_length + 1
# Create the trail list base don the length of the trail
self.anim_trail = [x for x in range(0, -self.trail_length,-1)]
# Create a reference to the selected animation list
self.current_anim = self.matrix_display_shapes[self.anim_type]
def get_alpha(self):
return 0.2 * (self.trail_length-1)
def inc_anim_index(self, index):
self.anim_trail[index] += 1
if self.anim_trail[index] == len(self.current_anim):
self.anim_trail[index] = 0
def get_anim_index(self, index ):
return self.current_anim[self.anim_trail[index]]
def animate(self, r, g, b):
if not self.error:
alpha = self.get_alpha()
for index in range(self.trail_length):
if self.anim_trail[index] > -1:
(r2, g2, b2) = r * alpha, g * alpha, b * alpha
if self.get_anim_index(index) > -1:
self.matrix[ self.get_anim_index(index) ] = (r2, g2, b2)
alpha = alpha - 0.2 if alpha > 0.2 else 0
self.inc_anim_index(index)
class MatrixDigitalScope:
def __init__(self, pin):
self.pin = pin
def get_pin(self, col):
# print(self.pin.value)
if self.pin.value:
return 0
else:
return 4
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns =[
# path('',views.index,name='index'),
# path('<int:question_id>/',views.detail, name='detail'),
# path('<int:question_id>/results/', views.result, name='results'),
path('',views.IndexView.as_view(), name ='index'),
path('<int:pk>/',views.DetailView.as_view(), name ='detail'),
path('<int:pk>/results',views.ResultView.as_view(), name ='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
] |
# cli:commands:create:presets:installable_apps
INSTALLABLE_APPS = {
'allauth': {
'apps': [
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.linkedin_oauth2',
'allauth.socialaccount.providers.twitter',
],
'middleware': [],
},
'auditlog': {
'apps': [
'auditlog',
],
'middleware': [],
},
'celery': {
'apps': [
'django_celery_beat',
],
'middleware': [],
},
'cors': {
'apps': [
'corsheaders',
],
'middleware': [
'corsheaders.middleware.CorsMiddleware',
],
},
'django-extensions': {
'apps': [
'djanjo_extensions',
],
'middleware': [],
},
'django-hosts': {
'apps': [
'django_hosts',
],
'middleware': [],
},
'redis': {
'apps': [
'django_redis',
],
'middleware': [],
},
'restframework': {
'apps': [
'rest_framework',
'rest_framework.authtoken',
'rest_framework_httpsignature',
'rest_framework_swagger',
],
'middleware': [],
},
'storages': {
'apps': [
'storages',
],
'middleware': [],
},
'polymorphic': {
'apps': [
'polymorphic',
],
'middleware': [],
},
'debug_toolbar': {
'apps': [
'debug_toolbar',
],
'middleware': [
'debug_toolbar.middleware.DebugToolbarMiddleware',
],
},
'django_otp': {
'apps': [
'django_otp',
'django_otp.plugins.otp_static',
'django_otp.plugins.otp_totp',
],
'middleware': [
'django_otp.middleware.OTPMiddleware',
]
},
}
DEFAULTS = ['allauth', 'cors', 'redis', 'restframework']
APPS = [app for application in INSTALLABLE_APPS for app in INSTALLABLE_APPS[application]['apps']]
MIDDLEWARE = [middleware for application in INSTALLABLE_APPS for middleware in INSTALLABLE_APPS[application]['middleware']]
|
# coding=utf-8
import _mysql
from common import db_name_config
from common.mylog import logger
from support.db.mysql_db import doctor_conn, doctor_user_conn, doctor_question_conn
class BaseDao(object):
db_name = ""
table_name = ""
escape_list = [] # 需要转义的list
quot_list = [] # 需要带引号的list
not_append_list = [] # int list,但是不可能有append操作的list,如 img_id
append_list = [] # int list, 但是可能有append操作的list,如add_cnt, view_cnt
@classmethod
def get_by_id(cls, _id):
"""
根据id获取
:param _id:
:return:
"""
sql = "select * from {db}.{tbl} where id = {_id}". \
format(db=cls.db_name,
tbl=cls.table_name,
_id=_id,
)
if cls.db_name == "doctor":
item = doctor_conn.fetchone(sql)
elif cls.db_name == "doctor_user":
item = doctor_user_conn.fetchone(sql)
elif cls.db_name == "doctor_question":
item = doctor_question_conn.fetchone(sql)
else:
logger.error("get_by_id() find no db to exec.")
item = None
return item
@classmethod
def update(cls, dic, where_col='id', where_col_str=False):
"""
更新Something...
:param dic: 字典
:return:
"""
key_value_lst = []
for key, value in dic.items():
logger.info("%s=%s" % (key, value))
if key == where_col:
continue
# 普通字符串
if type(value) == str or type(value) == unicode:
value = _mysql.escape_string(value)
item = "%s='%s'" % (key, value)
# 需要追加的int,比如 like_num: (1, True),那么是like_num = like_num + 1
elif type(value) == tuple and len(value) == 2:
if value[1]:
item = "%s=%s+%s" % (key, key, value[0])
else:
item = "%s=%s" % (key, value[0])
# 普通int, 比如 del_flag: 1, 直接 def_flag = 1
else:
item = "%s=%s" % (key, value)
key_value_lst.append(item)
sql = "update {db}.{tbl} set ".format(db=cls.db_name, tbl=cls.table_name)
sql += ",".join(key_value_lst)
# where 列默认是id
where_value = dic[where_col]
if where_col_str:
sql += " where %s = '%s'" % (where_col, where_value)
else:
sql += ' where %s = %s' % (where_col, where_value)
logger.info("base_update: %s" % sql)
if cls.db_name == db_name_config.DOCTOR_DB:
ret = doctor_conn.execute_with_exception(sql)
elif cls.db_name == db_name_config.DOCTOR_USER_DB:
ret = doctor_user_conn.execute_with_exception(sql)
elif cls.db_name == db_name_config.DOCTOR_QUESTION_DB:
ret = doctor_question_conn.execute_with_exception(sql)
else:
logger.error("error db...")
ret = None
return ret
@classmethod
def insert(cls, _dic):
"""
插入Something...
:param _dic: 新增的字典
:return:
"""
key_value_lst = []
for key, value in _dic.items():
# 普通字符串
if type(value) == str or type(value) == unicode:
value = _mysql.escape_string(value)
item = "'%s'" % value
else:
item = "%s" % value
key_value_lst.append(item)
sql = "insert into {db}.{tbl}({column_list}) values ({value_list})". \
format(db=cls.db_name,
tbl=cls.table_name,
column_list=','.join(["`%s`" % v for v in _dic.keys()]),
value_list=','.join(key_value_lst))
logger.info("base_insert===> %s" % sql)
if cls.db_name == db_name_config.DOCTOR_DB:
ret = doctor_conn.execute_with_exception(sql)
elif cls.db_name == db_name_config.DOCTOR_USER_DB:
ret = doctor_user_conn.execute_with_exception(sql)
elif cls.db_name == db_name_config.DOCTOR_QUESTION_DB:
ret = doctor_question_conn.execute_with_exception(sql)
else:
logger.error("error db...")
ret = None
return ret
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2020 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
# #*** <License> ************************************************************#
# This module is part of the package GTW.RST.TOP.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# GTW.RST.TOP.from_nav_list_file
#
# Purpose
# Read tree-of-pages from navigation.list+referred-to-files
#
# Revision Dates
# 23-Nov-2015 (CT) Creation (based on GTW.NAV methods)
# 1-Dec-2015 (CT) Change `_entries` to use `parent.Page`, if any,
# as default `Type`
# 1-Dec-2015 (CT) Improve `_fix_dict` (no `desc` nor `short_title`)
# 2-Dec-2015 (CT) Add `logging.exception` to `from_nav_list_file`
# ««revision-date»»···
#--
from _GTW import GTW
from _ReST import ReST
from _TFL import TFL
from _CAL.Date_Time import Date_Time as DT
import _GTW._RST.Resource
import _GTW._RST._TOP.Gallery
import _GTW._RST._TOP.Page
import _GTW._RST._TOP.ReST
import _GTW._RST._TOP.Video
import _ReST.To_Html
import _GTW._RST._TOP.import_TOP
from _TFL._Meta.Once_Property import Once_Property
from _TFL.formatted_repr import formatted_repr
from _TFL.Decorator import getattr_safe
from _TFL.Filename import Filename
from _TFL.predicate import split_hst
from _TFL.pyk import pyk
from _TFL.Record import Record
from _TFL.Regexp import Regexp, re
from _TFL import sos
import _TFL.Sorted_By
from posixpath import join as pjoin
import textwrap
_globs = None
_nav_info_pat = Regexp \
( r"^\.\. *$"
"\n"
r" +<(?P<expired>!?)Nav-Info> *$"
"\n"
r"(?P<code>"
r"(?:^ +(?:\w+ *=|[][(),]).*$" "\n" r")+"
r")"
r" +<!?/Nav-Info> *$"
, re.MULTILINE
)
def _entries (parent, src_dir, list_of_dicts) :
for d in list_of_dicts :
_fix_dict (d)
sd = d.pop ("sub_dir", None)
Type = d.pop ("Type", None) or \
( getattr (parent, "Page", GTW.RST.TOP.Page)
if sd is None
else GTW.RST.TOP.Dir
)
if sd is None :
entry = Type (parent = parent, src_dir = src_dir, ** d)
else :
entry = Type (name = sd, parent = parent, ** d)
sub_dir_path = pjoin (src_dir, sd)
from_nav_list_file (entry, sub_dir_path)
if entry is not None :
yield entry
# end def _entries
def _exec (text) :
code = text
result = dict (__builtins__ = {})
exec (code, _globs, result)
result.pop ("__builtins__")
return result
# end def _exec
def _file_contents (name, encoding = "utf-8") :
if sos.path.exists (name) :
with open (name, "rb") as f :
result = pyk.decoded (f.read ().strip (), encoding)
return result
else :
print ("*** *** *** File doesn't exist:", name, "*** *** ***")
# end def _file_contents
def _fix_dict (dct) :
if "title" in dct and "short_title" not in dct :
if "desc" in dct :
dct ["short_title"] = dct.pop ("title")
dct ["title"] = dct.pop ("desc")
else :
dct ["short_title"] = dct.get ("title")
for k, v in list (pyk.iteritems (dct)) :
if isinstance (v, pyk.byte_types) :
dct [k] = pyk.decoded (v, "utf-8", "latin-1")
return dct
# end def _fix_dict
def _page_info (f) :
src = _file_contents (f)
pat = _nav_info_pat
if pat.search (src) :
result = _fix_dict (_exec (textwrap.dedent (pat.code)))
if pat.expired :
date = result.get ("date")
exp_date = "20091231"
if date :
try :
exp_date = \
(DT.from_string (date) + DT.Delta (30)).formatted ()
except Exception :
pass
result ["exp_date"] = exp_date
result ["src_contents"] = pat.sub ("", src).strip ()
return result
# end def _page_info
A_Link = GTW.RST.TOP.A_Link
Alias = GTW.RST.TOP.Alias
def Dyn_Slice_ReST_Dir (parent, src_dir) :
def _gen (parent, src_dir) :
for f in sos.expanded_globs (pjoin (src_dir, "*.txt")) :
info = _page_info (f)
if info :
n = info.get ("name", f)
info ["perma_name"] = base = Filename (n).base
info ["name"] = name = "%s.html" % (base, )
yield GTW.RST.TOP.Page_ReST \
(parent = parent, src_dir = src_dir, ** info)
sort_key = TFL.Sorted_By ("rank", "-date", "name")
entries = sorted (_gen (parent, src_dir), key = sort_key)
parent.add_entries (* entries)
# end def Dyn_Slice_ReST_Dir
Gallery = GTW.RST.TOP.Gallery
def Page_ReST_F (parent, src_dir, name, ** kw) :
src_path = pjoin (src_dir, Filename (".txt", name).name)
src_contents = _file_contents (src_path)
return GTW.RST.TOP.Page_ReST \
( parent = parent
, src_dir = src_dir
, name = name
, src_contents = src_contents
, ** kw
)
# end def Page_ReST_F
Video = GTW.RST.TOP.Video
def from_nav_list_file (parent, src_dir, nav_context = {}) :
global _globs
if _globs is None :
_globs = dict (globals (), ** nav_context)
parent.src_dir = src_dir
fn = pjoin (src_dir, "navigation.list")
fc = _file_contents (fn)
if fc is not None :
try :
dct = _exec (fc)
except Exception as exc :
import logging
logging.exception ("Error in %s" % (fn, ))
else :
parent.add_entries \
(* _entries (parent, src_dir, dct ["own_links"]))
else :
print ("*** navigation.list not found in directory", src_dir, "***")
# end def from_nav_list_file
if __name__ != "__main__" :
GTW.RST.TOP._Export ("from_nav_list_file")
### __END__ GTW.RST.TOP.from_nav_list_file
|
DIRECTORIES = (
('.',
# 'nosetests -v premailer.test_premailer -m test_parse_style_rules'
'nosetests -v premailer.test_premailer'
# 'nosetests -v premailer.test_premailer -m test_merge_styles',
),
)
|
import pytest
from bmi import create_parser, handle_args
@pytest.fixture
def parser():
return create_parser()
def test_no_args_exits(parser):
# parser.parse_args should raise the exception but in case
# you raised it explicitly further down the stack let's check
# if handle_args raises it (same applies to next test)
with pytest.raises(SystemExit):
handle_args()
def test_one_arg_exits(parser):
with pytest.raises(SystemExit):
args = parser.parse_args(['-w', '80'])
handle_args(args)
def test_two_arg(parser, capfd):
args = parser.parse_args(['-w', '80', '-l', '187'])
handle_args(args)
output = capfd.readouterr()[0]
assert "Your BMI is: 22.88" in output
def test_two_arg_reversed_order(parser, capfd):
args = parser.parse_args(['-l', '187', '-w', '80'])
handle_args(args)
output = capfd.readouterr()[0]
assert "Your BMI is: 22.88" in output
def test_help_text(parser, capfd):
with pytest.raises(SystemExit):
parser.parse_args(['-h'])
output = capfd.readouterr()[0]
assert "-w WEIGHT, --weight WEIGHT" in output
assert "-l LENGTH, --length LENGTH" in output
assert "Calculate your BMI." in output |
# coding=utf-8
import inspect
import os
from datetime import datetime
from server_core import HTTPServer, BaseHTTPRequestHandler
class RomaHandler(BaseHTTPRequestHandler):
def do_static(self):
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
url_path = self.path
if "?" in url_path:
url_path = url_path[:url_path.index("?")]
if "#" in url_path:
url_path = url_path[:url_path.index("#")]
url_path = "index.html" if url_path == "/" else url_path[1:]
file_path = os.path.join(current_dir, "static", url_path)
if file_path != os.path.abspath(file_path):
# если это подмена пути, то выхожу
return False
filename, file_extension = os.path.splitext(file_path)
if os.path.isfile(file_path):
self.send_response(200)
print("file found " + file_path + " " + file_extension)
if file_extension == ".html":
self.send_header("Content-Type", "text/html; charset=utf-8")
elif file_extension == ".css":
self.send_header("Content-Type", "text/css")
elif file_extension == ".js":
self.send_header("Content-Type", "application/javascript")
elif file_extension == ".xml":
self.send_header("Content-Type", "text/xml")
else:
return False
self.end_headers()
with open(file_path, 'r') as f:
self.wfile.write(f.read())
return True
return False
def do_GET(self):
if self.do_static():
return
if self.path == "/dynamic/":
self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
self.wfile.write("<html><head><title>Это динамическая страница</title></head><body><h1>")
self.wfile.write("Текущее время на сервере: " + datetime.now().isoformat())
self.wfile.write("</h1></body></html>")
return
self.log_error("wrong file " + self.path)
self.send_error(code=404)
httpd = HTTPServer(('', 8047), RomaHandler)
httpd.serve_forever()
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db.models.signals import post_save
# Creating our own User class for future upgrade.
class User(AbstractUser):
is_organisor = models.BooleanField(default=True)
is_agent = models.BooleanField(default=False)
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return f"{self.user.username}"
class Agent(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
organisation = models.ForeignKey("UserProfile", on_delete=models.CASCADE)
def __str__(self):
return f"{self.user.username}"
class Lead(models.Model):
# Basic fields
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
age = models.IntegerField(default=0)
description = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
phone_number = models.CharField(max_length=20)
email = models.EmailField()
# Foreign Key
organisation = models.ForeignKey("UserProfile", on_delete=models.CASCADE)
agent = models.ForeignKey("Agent", null=True, blank=True, on_delete=models.SET_NULL)
category = models.ForeignKey("Category", related_name="leads", null=True, blank=True, on_delete=models.SET_NULL)
def __str__(self):
return f"{self.first_name} {self.last_name}"
def post_user_created_signals(sender, instance, created, **kwargs):
print(f"{sender=}")
print(f"{instance=}")
print(f"{created=}")
print(f"{kwargs=}")
if created:
# Create a new UserProfile
UserProfile.objects.create(user=instance)
post_save.connect(post_user_created_signals, sender=User)
class Category(models.Model):
name = models.CharField(max_length=30) # New, Contacted, Converted, Unconverted
organisation = models.ForeignKey("UserProfile", on_delete=models.CASCADE)
def __str__(self):
return self.name
|
__version__ = '10.1'
|
import guppy
from guppy import hpy
import time
import sys
from collections import defaultdict
class Heap():
def __init__(self):
self.array = []
self.size = 0
self.pos = []
def newMinHeapNode(self, v, dist):
minHeapNode = [v, dist]
return minHeapNode
def swapMinHeapNode(self, a, b):
t = self.array[a]
self.array[a] = self.array[b]
self.array[b] = t
def minHeapify(self, idx):
smallest = idx
left = 2 * idx + 1
right = 2 * idx + 2
if left < self.size and self.array[left][1] <self.array[smallest][1]:
smallest = left
if right < self.size and self.array[right][1] <self.array[smallest][1]:
smallest = right
if smallest != idx:
self.pos[ self.array[smallest][0] ] = idx
self.pos[ self.array[idx][0] ] = smallest
self.swapMinHeapNode(smallest, idx)
self.minHeapify(smallest)
def extractMin(self):
if self.isEmpty() == True:
return
root = self.array[0]
lastNode = self.array[self.size - 1]
self.array[0] = lastNode
self.pos[lastNode[0]] = 0
self.pos[root[0]] = self.size - 1
self.size -= 1
self.minHeapify(0)
return root
def isEmpty(self):
return True if self.size == 0 else False
def decreaseKey(self, v, dist):
i = self.pos[v]
self.array[i][1] = dist
while i > 0 and self.array[i][1] < self.array[(i - 1) // 2][1]:
self.pos[ self.array[i][0] ] = (i-1)/2
self.pos[ self.array[(i-1)//2][0] ] = i
self.swapMinHeapNode(i, (i - 1)//2 )
i = (i - 1) // 2;
def isInMinHeap(self, v):
if self.pos[v] < self.size:
return True
return False
def printArr(parent, n):
for i in range(1, n):
print ("% d - % d" % (parent[i], i))
class Graph():
def __init__(self, V):
self.V = V
self.graph = defaultdict(list)
# Adds an edge to an undirected graph
def addEdge(self, src, dest, weight):
newNode = [dest, weight]
self.graph[src].insert(0, newNode)
newNode = [src, weight]
self.graph[dest].insert(0, newNode)
def PrimMST(self):
# Get the number of vertices in graph
V = self.V
# key values used to pick minimum weight edge in cut
key = []
# List to store contructed MST
parent = []
# minHeap represents set E
minHeap = Heap()
for v in range(V):
parent.append(-1)
key.append(sys.maxsize)
minHeap.array.append( minHeap.newMinHeapNode(v, key[v]) )
minHeap.pos.append(v)
# Make key value of 0th vertex as 0 so
# that it is extracted first
minHeap.pos[0] = 0
key[0] = 0
minHeap.decreaseKey(0, key[0])
# Initially size of min heap is equal to V
minHeap.size = V;
# In the following loop, min heap contains all nodes
# not yet added in the MST.
while minHeap.isEmpty() == False:
# Extract the vertex with minimum distance value
newHeapNode = minHeap.extractMin()
u = newHeapNode[0]
# Traverse through all adjacent vertices of u
# (the extracted vertex) and update their
# distance values
for pCrawl in self.graph[u]:
v = pCrawl[0]
if minHeap.isInMinHeap(v) and pCrawl[1] < key[v]:
key[v] = pCrawl[1]
parent[v] = u
# update distance value in min heap also
minHeap.decreaseKey(v, key[v])
printArr(parent, V)
h = hpy()
graph = Graph(9)
graph.addEdge(0, 1, 1)
graph.addEdge(0, 7, 1)
graph.addEdge(1, 2, 1)
graph.addEdge(1, 7, 1)
graph.addEdge(2, 3, 1)
graph.addEdge(2, 8, 1)
graph.addEdge(2, 5, 1)
graph.addEdge(3, 4, 1)
graph.addEdge(3, 5, 1)
graph.addEdge(4, 5, 1)
graph.addEdge(5, 6, 1)
graph.addEdge(6, 7, 1)
graph.addEdge(6, 8, 1)
graph.addEdge(7, 8, 1)
graph.PrimMST()
heap1 = hpy()
time_start = time.time()
compu = graph.PrimMST()
time_end = time.time()
cal = time_end - time_start
print("*******----------********\n")
print("\n\n Memory consumed -----",heap1.heap())
print("Prims complete graph", "time taken for particular nodes = ",cal) |
from datetime import datetime
from django.test import TestCase
from tasks.models import Task, Tag
from ..utils import get_tasks_for_individual_report
class IndividualReportsTestUtils(TestCase):
def setUp(self):
Tag.objects.create(name='Test tag')
Task.objects.bulk_create([
Task(date=datetime.now(), name='test1', description='test_desc1', tag=Tag.objects.first()),
Task(date=datetime.now(), name='test2', description='test_desc2', tag=Tag.objects.first())
])
self.tasks = get_tasks_for_individual_report(Task, datetime.now(), datetime.now(), [1])
def test_get_tasks_for_individual_report(self):
"""Проверяем, что отдаем нужный кверисет"""
self.assertEqual(list(self.tasks), [(datetime.now().date(), 'test1', 'test_desc1'),
(datetime.now().date(), 'test2', 'test_desc2')])
|
#!/usr/bin/python -S
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wifi packet blaster."""
__author__ = '[email protected] (Mike Mu)'
import contextlib
import errno
import multiprocessing
import os
import re
import subprocess
import sys
import time
import traceback
import options
try:
import monotime # pylint: disable=unused-import,g-import-not-at-top
except ImportError:
pass
try:
_gettime = time.monotonic
except AttributeError:
_gettime = time.time
_OPTSPEC = """
wifiblaster [options...] [clients...]
--
i,interface= Name of access point interface
d,duration= Packet blast duration in seconds [.1]
f,fraction= Number of samples per duration [10]
s,size= Packet size in bytes [1470]
"""
class Error(Exception):
"""Exception superclass representing a nominal test failure."""
pass
class NotActiveError(Error):
"""Client is not active."""
pass
class NotAssociatedError(Error):
"""Client is not associated."""
pass
class NotSupportedError(Error):
"""Packet blasts are not supported."""
pass
class PktgenError(Error):
"""Pktgen failure."""
pass
class Iw(object):
"""Interface to iw."""
# TODO(mikemu): Use an nl80211 library instead.
def __init__(self, interface):
"""Initializes Iw on a given interface."""
self._interface = interface
def _DevInfo(self):
"""Returns the output of 'iw dev <interface> info'."""
return subprocess.check_output(
['iw', 'dev', self._interface, 'info'])
def _DevStationDump(self):
"""Returns the output of 'iw dev <interface> station dump'."""
return subprocess.check_output(
['iw', 'dev', self._interface, 'station', 'dump'])
def _DevStationGet(self, client):
"""Returns the output of 'iw dev <interface> station get <client>'."""
try:
return subprocess.check_output(
['iw', 'dev', self._interface, 'station', 'get', client])
except subprocess.CalledProcessError as e:
if e.returncode == 254:
raise NotAssociatedError
raise
def GetClients(self):
"""Returns the associated clients of an interface."""
return set([client.lower() for client in re.findall(
r'Station ((?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2})',
self._DevStationDump())])
def GetFrequency(self):
"""Returns the frequency of an interface."""
return int(re.search(r'channel.*\((\d+) MHz\)', self._DevInfo()).group(1))
def GetPhy(self):
"""Returns the PHY name of an interface."""
return 'phy%d' % int(re.search(r'wiphy (\d+)', self._DevInfo()).group(1))
def GetInactiveTime(self, client):
"""Returns the inactive time of a client."""
return float(re.search(r'inactive time:\s+(\d+) ms',
self._DevStationGet(client)).group(1)) / 1000
def GetRssi(self, client):
"""Returns the RSSI of a client."""
return float(re.search(r'signal:\s+([-.\d]+)',
self._DevStationGet(client)).group(1))
class Mac80211Stats(object):
"""Interface to mac80211 statistics in debugfs."""
def __init__(self, phy):
"""Initializes Mac80211Stats on a given PHY."""
self._basedir = os.path.join(
'/sys/kernel/debug/ieee80211', phy, 'statistics')
def _ReadCounter(self, counter):
"""Returns a counter read from a file."""
try:
with open(os.path.join(self._basedir, counter)) as f:
return int(f.read())
except IOError as e:
if e.errno == errno.ENOENT:
raise NotSupportedError
raise
def GetTransmittedFrameCount(self):
"""Returns the number of successfully transmitted MSDUs."""
return self._ReadCounter('dot11TransmittedFrameCount')
class Pktgen(object):
"""Interface to pktgen."""
def __init__(self, interface):
"""Initializes Pktgen on a given interface."""
self._interface = interface
self._control_file = '/proc/net/pktgen/pgctrl'
self._thread_file = '/proc/net/pktgen/kpktgend_1'
self._device_file = '/proc/net/pktgen/%s' % interface
def _ReadFile(self, filename):
"""Returns the contents of a file."""
try:
with open(filename) as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise NotSupportedError
raise
def _WriteFile(self, filename, s):
"""Writes a string and a newline to a file."""
try:
with open(filename, 'w') as f:
f.write(s + '\n')
except IOError as e:
if e.errno == errno.ENOENT:
raise NotSupportedError
raise
@contextlib.contextmanager
def PacketBlast(self, client, size):
"""Runs a packet blast."""
# Reset pktgen.
self._WriteFile(self._control_file, 'reset')
self._WriteFile(self._thread_file, 'add_device %s' % self._interface)
# Work around a bug on GFRG200 where transmits hang on queues other than BE.
self._WriteFile(self._device_file, 'queue_map_min 2')
self._WriteFile(self._device_file, 'queue_map_max 2')
# Disable packet count limit.
self._WriteFile(self._device_file, 'count 0')
# Set parameters.
self._WriteFile(self._device_file, 'dst_mac %s' % client)
self._WriteFile(self._device_file, 'pkt_size %d' % size)
# Start packet blast.
p = multiprocessing.Process(target=self._WriteFile,
args=[self._control_file, 'start'])
p.start()
# Wait for pktgen startup delay. pktgen prints 'Starting' after the packet
# blast has started.
while (p.is_alive() and not
re.search(r'Result: Starting', self._ReadFile(self._device_file))):
pass
# Run with-statement body.
try:
yield
# Stop packet blast.
finally:
p.terminate()
p.join()
def _PacketBlast(iw, mac80211stats, pktgen, client, duration, fraction, size):
"""Blasts packets at a client and returns a string representing the result."""
try:
# Validate client.
if client not in iw.GetClients():
raise NotAssociatedError
with pktgen.PacketBlast(client, size):
# Wait for the client's inactive time to become zero, which happens when
# an ack is received from the client. Assume the client has disassociated
# after 2s.
start = _gettime()
while _gettime() < start + 2:
if iw.GetInactiveTime(client) == 0:
break
else:
raise NotActiveError
# Wait for block ack session and max PHY rate negotiation.
time.sleep(.1)
# Sample transmitted frame count.
samples = [mac80211stats.GetTransmittedFrameCount()]
start = _gettime()
dt = duration / fraction
for t in [start + dt * (i + 1) for i in xrange(fraction)]:
time.sleep(t - _gettime())
samples.append(mac80211stats.GetTransmittedFrameCount())
# Compute throughputs from samples.
samples = [8 * size * (after - before) / dt
for (after, before) in zip(samples[1:], samples[:-1])]
# Print result.
print ('version=2 mac=%s throughput=%d rssi=%g frequency=%d samples=%s') % (
client,
sum(samples) / len(samples),
iw.GetRssi(client),
iw.GetFrequency(),
','.join(['%d' % sample for sample in samples]))
except Error as e:
print 'version=2 mac=%s error=%s' % (client, e.__class__.__name__)
traceback.print_exc()
except Exception as e:
print 'version=2 mac=%s error=%s' % (client, e.__class__.__name__)
raise
def main():
# Parse and validate arguments.
o = options.Options(_OPTSPEC)
(opt, _, clients) = o.parse(sys.argv[1:])
opt.duration = float(opt.duration)
opt.fraction = int(opt.fraction)
opt.size = int(opt.size)
if not opt.interface:
o.fatal('must specify --interface')
if opt.duration <= 0:
o.fatal('--duration must be positive')
if opt.fraction <= 0:
o.fatal('--fraction must be a positive integer')
if opt.size <= 0:
o.fatal('--size must be a positive integer')
# Initialize iw, mac80211stats, and pktgen.
iw = Iw(opt.interface)
mac80211stats = Mac80211Stats(iw.GetPhy())
pktgen = Pktgen(opt.interface)
# If no clients are specified, test all associated clients.
if not clients:
clients = sorted(iw.GetClients())
# Normalize clients.
clients = [client.lower() for client in clients]
# Blast packets at each client.
for client in clients:
_PacketBlast(iw, mac80211stats, pktgen, client,
opt.duration, opt.fraction, opt.size)
if __name__ == '__main__':
main()
|
import bosh_client
import os
import yaml
def do_step(context):
settings = context.meta['settings']
username = settings["username"]
home_dir = os.path.join("/home", username)
index_file = context.meta['index-file']
f = open("manifests/{0}".format(index_file))
manifests = yaml.safe_load(f)
f.close()
client = bosh_client.BoshClient("https://10.0.0.4:25555", "admin", "admin")
for m in manifests['manifests']:
print "Running errands for {0}/manifests/{1}...".format(home_dir, m['file'])
for errand in m['errands']:
print "Running errand {0}".format(errand)
task_id = client.run_errand(m['deployment-name'], errand)
task = client.wait_for_task(task_id)
retries = 0
while task['state'] == 'error' and retries < 5:
retries += 1
print "Retrying errand {0}".format(errand)
task_id = client.run_errand(m['deployment-name'], errand)
task = client.wait_for_task(task_id)
result = client.get_task_result(task_id)
print "Errand finished with exit code {0}".format(result['exit_code'])
print "=========== STDOUT ==========="
print result['stdout'].encode('utf8')
print "=========== STDERR ==========="
print result['stderr'].encode('utf8')
return context
|
from socket import *
import json
import camera, display, keyboard, audio, pygame
from multiprocessing.connection import SocketClient
import time, random
clock = pygame.time.Clock()
class Manager(object):
__camera = None
__display = None
__keyboard = None
__audio = None
def __init__(self):
self.__camera = camera.Camera()
self.__display = display.Display()
self.__keyboard = keyboard.Keyboard()
self.__audio = audio.Audio()
def replyToPing(self, addressTracker):
data = json.dumps({'type': 0, 'code': 1 ,'status': 'OK', 'port': self.__port})
s = socket(AF_INET, SOCK_STREAM)
try:
s.connect(addressTracker)
s.sendall(data)
except:
print "Erro connection to ReplyPing"
s.close()
return True
def broadcastMessage(self, addressTracker,socketServer ,name):
print 'Send BroadcastMessage'
data = json.dumps({'type': 1, 'code': 0 ,'status': 'OK','name': name})
try:
socketServer.connect(addressTracker)
socketServer.sendall(data)
socketServer.close()
except:
print "Erro connection to BroadcastMessage"
return True
def requestImagem(self, socketServer):
while True:
image = self.__camera.getImageData()
data = json.dumps({'type': 3, 'code': 1 ,'status' : 'OK', 'image': image})
try:
socketServer.sendall(data)
except:
print "Erro SendALL requestImage"
return True
return True
def requestDisplay(self, socketClient):
while True:
display = self.__display.getDisplayData()
data = json.dumps({'type': 5, 'code': 1 ,'status' : 'OK', 'display': display})
try:
socketClient.sendall(data)
except:
print "Erro sendALL to RequestDisplay"
break
return True
def requestAudio(self, socketServer, size):
audio = self.__audio.getAudioData(size)
data = json.dumps({'type': 4, 'code': 1 ,'status' : 'OK', 'audio': audio})
try:
socketServer.sendall(data)
except:
print "Erro Send Audio"
return False
return True
def requestKeyboard(self, socketServer, size):
self.__keyboard.getKeysData(socketServer, size)
return True |
import datetime
import unittest
from pyvalidator.is_before import is_before
from . import print_test_ok
class TestIsBefore(unittest.TestCase):
def test_valid_before_dates_against_a_start_date(self):
for i in [
['2010-07-02', '08/04/2011'],
['2010-08-04', '08/04/2011'],
[datetime.datetime.utcfromtimestamp(0).isoformat(), '08/04/2011'],
]:
self.assertTrue(is_before(*i))
print_test_ok()
def test_invalid_before_dates_against_a_start_date(self):
for i in [
['08/04/2011', '08/04/2011'],
['08/04/2011', '08/04/2011'],
[datetime.datetime(2011, 9, 10).isoformat(), '08/04/2011'],
]:
self.assertFalse(is_before(*i))
print_test_ok()
|
#!/usr/bin/python2.4
"""Diff Match and Patch -- Test harness
Copyright 2018 The diff-match-patch Authors.
https://github.com/google/diff-match-patch
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import time
import unittest
parentPath = os.path.abspath("..")
if parentPath not in sys.path:
sys.path.insert(0, parentPath)
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEquals(0, self.dmp.diff_commonOverlap("fi", u"\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for i in range(1, n + 1):
lineList.append(str(i) + "\n")
charList.append(unichr(i))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for i in range(1, n + 1):
lineList.append(str(i) + "\n")
charList.append(unichr(i))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
# More than 65536 to verify any 16-bit limitation.
lineList = []
for i in range(1, 66000 + 1):
lineList.append(str(i) + "\n")
chars = "".join(lineList)
results = self.dmp.diff_linesToChars(chars, "")
diffs = [(self.dmp.DIFF_INSERT, results[0])]
self.dmp.diff_charsToLines(diffs, results[2])
self.assertEquals(chars, diffs[0][1])
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
# Empty merge.
diffs = [(self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "bc")], diffs)
# Empty equality.
diffs = [(self.dmp.DIFF_EQUAL, ""), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "b")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "b")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for i in range(10):
a += a
b += b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
|
# XXX Deprecated
from adminapi.filters import * # NOQA F401 F403
|
def paint2dGraph(pl, g):
import matplotlib.pyplot as plt
pointList = []
min_x = pl[0].getPickPosition().getX()
min_y = pl[0].getPickPosition().getY()
max_x = pl[0].getPickPosition().getX()
max_y = pl[0].getPickPosition().getY()
for p in pl:
x = p.getPickPosition().getX()
y = p.getPickPosition().getY()
pointList.append((x, y))
if x < min_x:
min_x = x
if x > max_x:
max_x = x
if y < min_y:
min_y = y
if y > max_y:
max_y = y
pointArray = np.array(pointList)
#plt.plot(pointArray[:,0], pointArray[:,1], 'ko', markersize=8)
plt.plot(pointArray[:,0], pointArray[:,1], 'bo')
max_w = 0.0
for e in g.edges():
if g[e[0]][e[1]]['weight'] > max_w:
max_w = g[e[0]][e[1]]['weight']
print 'max_w', max_w
for e in g.edges():
plt.plot([pointArray[e[0]][0], pointArray[e[1]][0]], [pointArray[e[0]][1], pointArray[e[1]][1]], 'b', alpha=g[e[0]][e[1]]['weight'])
#(10000000*g[e[0]][e[1]]['weight']))
#(0.5 + g[e[0]][e[1]]['weight'])) #(g[e[0]][e[1]]['weight']/max_w))
#plt.savefig("edge_colormap.png") # save as png
plt.ylim([min_y, max_y])
plt.xlim([min_x, max_x])
plt.show() # display
def paint3dGraph(pl, g):
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import time
pointList = []
for p in pl:
x = p.getPickPosition().getX()
y = p.getPickPosition().getY()
z = p.getPickPosition().getZ()
pointList.append((x, y, z))
pointArray = np.array(pointList)
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot3D(pointArray[:,0], pointArray[:,1], pointArray[:,2], 'ko')
for e in g.edges():
w = g[e[0]][e[1]]['weight']
ax.plot3D([pointArray[e[0]][0], pointArray[e[1]][0]], [pointArray[e[0]][1], pointArray[e[1]][1]], [pointArray[e[0]][2], pointArray[e[1]][2]],
'b', color=(w, 0, 1.0 -w), alpha=0.2)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
for angle in range(0, 90):
#time.sleep(0.0001)
ax.view_init(30, angle)
plt.draw()
plt.show(block=True)
def graphToMarkerFile(filePath, pl, g, binFactor=1, xOffset=0, yOffset=0, zOffset=0, useShifts=False, name='graph'):
import math
mf = open(filePath, 'w')
mf.write('<marker_set name=\"' + name + '\">')
mf.write('\n')
pointList = []
for i in range(len(pl)):
p = pl[i]
x = (p.getPickPosition().getX()*binFactor) + xOffset
y = (p.getPickPosition().getY()*binFactor) + yOffset
z = (p.getPickPosition().getZ()*binFactor) + zOffset
if useShifts:
x = x + (p.getShift().getX()*binFactor)
y = y + (p.getShift().getY()*binFactor)
z = z + (p.getShift().getZ()*binFactor)
mf.write('<marker id=\"' + str(i) + '\" x=\"' + str(x) + '\" y=\"' + str(y) + '\" z=\"' + str(z) + '\"' +
' r=\"0\" g=\"0\" b=\"1\" radius=\"1.0\"/>')
mf.write('\n')
##
min_w = 1.0
max_w = 0.0
for e in g.edges():
s = e[0]
t = e[1]
w = g[s][t]['weight']
if w < min_w:
min_w = w
if w > max_w:
max_w = w
links = []
for e in g.edges():
s = e[0]
t = e[1]
w = g[s][t]['weight']
if (t, s) in links:
continue
if g[t].has_key(s):
op_w = g[t][s]['weight']
if op_w > w:
w = op_w
if w == min_w:
w = 0.0
w = w/max_w
mf.write('<link id1=\"' + str(s) + '\" id2=\"' + str(t) + '\" r=\"' + str(w) + '\" g=\"0\" b=\"' + str(1.0 -w) + '\" radius="0.5"/>')
mf.write('\n')
links.append((s, t))
mf.write('</marker_set>')
mf.close()
def peptideExitsToBildFile(filePath, pl, polysomeId, pixelSizeFactor=1.0, binFactor=1, xOffset=0, yOffset=0, zOffset=0, useShifts=False, riboType='80S', coneRadius=1.0):
import numpy as np
from pytom.angles.angleFnc import pointRotateZXZ
templateCenter = 0
point3 = 0
point4 = 0
if riboType == '80S':
templateCenter = np.array([10.0, 10.0, 10.0])
point3 = (np.array([10.0, 15.0, 8.0]) - templateCenter)*pixelSizeFactor #peptide exit
point4 = (np.array([10.0, 17.0, 6.0]) - templateCenter)*pixelSizeFactor # peptide exit top
elif riboType == '70S':
templateCenter = np.array([7.5, 7.5, 7.5])
point3 = (np.array([7.0, 11.0, 6.0]) - templateCenter)*pixelSizeFactor #peptide exit
point4 = (np.array([7.0, 14.0, 6.0]) - templateCenter)*pixelSizeFactor # peptide exit top
polysomeList = []
for i in pl._polysomeMap[polysomeId]:
polysomeList.append(pl[i])
f = open(filePath, 'w')
f.write('.color 0 1 0')
f.write('\n')
for p in polysomeList:
x = (p.getPickPosition().getX()*binFactor) + xOffset
y = (p.getPickPosition().getY()*binFactor) + yOffset
z = (p.getPickPosition().getZ()*binFactor) + zOffset
if useShifts:
x = x + (p.getShift().getX()*binFactor)
y = y + (p.getShift().getY()*binFactor)
z = z + (p.getShift().getZ()*binFactor)
t = np.array([x, y, z])
z1 = p.getRotation().getZ1()
z2 = p.getRotation().getZ2()
x1 = p.getRotation().getX()
pepExit = np.array(pointRotateZXZ(point3.tolist(), z1, z2, x1)) + t
pepExitTop = np.array(pointRotateZXZ(point4.tolist(), z1, z2, x1)) + t
f.write('.cone ' + str(pepExitTop[0]) + ' ' + str(pepExitTop[1]) + ' ' + str(pepExitTop[2]) + ' ' +
str(pepExit[0]) + ' ' + str(pepExit[1]) + ' ' + str(pepExit[2]) + ' ' + str(coneRadius))
f.write('\n')
f.close()
def mRNA2MarkerFile(filePath, pl, polysomeId, pixelSizeFactor=1.0, binFactor=1, xOffset=0, yOffset=0, zOffset=0, useShifts=False, riboType='80S', markerRadius=0.5, linkRadius=0.2, knn=3, rnn=50.0):
import numpy as np
from pytom.angles.angleFnc import pointRotateZXZ
from scipy.spatial import KDTree
if knn < 3:
knn = 3
templateCenter = 0
point1 = 0
point2 = 0
if riboType == '80S':
templateCenter = np.array([10.0, 10.0, 10.0])
point1 = (np.array([10.0, 5.0, 10.0]) - templateCenter)*pixelSizeFactor ##entry
point2 = (np.array([8.0, 6.0, 9.0]) - templateCenter)*pixelSizeFactor ##exit
elif riboType == '70S':
templateCenter = np.array([7.5, 7.5, 7.5])
point1 = (np.array([8.0, 3.0, 8.0]) - templateCenter)*pixelSizeFactor ##entry
point2 = (np.array([4.0, 3.0, 8.0]) - templateCenter)*pixelSizeFactor ##exit
polysomeList = []
for i in pl._polysomeMap[polysomeId]:
polysomeList.append(pl[i])
f = open(filePath, 'w') #.cmm
f.write('<marker_set name=\"mRNA ' + str(polysomeId) + '\">')
f.write('\n')
marker_id = 0
treeData = []
pointData = []
for p in polysomeList:
x = (p.getPickPosition().getX()*binFactor) + xOffset
y = (p.getPickPosition().getY()*binFactor) + yOffset
z = (p.getPickPosition().getZ()*binFactor) + zOffset
if useShifts:
x = x + (p.getShift().getX()*binFactor)
y = y + (p.getShift().getY()*binFactor)
z = z + (p.getShift().getZ()*binFactor)
t = np.array([x, y, z])
z1 = p.getRotation().getZ1()
z2 = p.getRotation().getZ2()
x1 = p.getRotation().getX()
entryPoint = np.array(pointRotateZXZ(point1.tolist(), z1, z2, x1)) + t
exitPoint = np.array(pointRotateZXZ(point2.tolist(), z1, z2, x1)) + t
f.write('<marker id=\"' + str(marker_id) + '\" x=\"' + str(entryPoint[0]) + '\" y=\"' + str(entryPoint[1]) + '\" z=\"' + str(entryPoint[2]) + '\"' +
' r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(markerRadius) + '\"/>')
f.write('\n')
marker_id = marker_id + 1
f.write('<marker id=\"' + str(marker_id) + '\" x=\"' + str(exitPoint[0]) + '\" y=\"' + str(exitPoint[1]) + '\" z=\"' + str(exitPoint[2]) + '\"' +
' r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(markerRadius) + '\"/>')
f.write('\n')
marker_id = marker_id + 1
treeData.append((entryPoint[0], entryPoint[1], entryPoint[2]))
pointData.append([entryPoint, exitPoint])
tree = KDTree(list(treeData))
for i in range(len(polysomeList)):
entryPoint = pointData[i][0]
exitPoint = pointData[i][1]
f.write('<link id1=\"' + str(i*2) + '\" id2=\"' + str((i*2) + 1) + '\" r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(linkRadius) + '\"/>')
f.write('\n')
query_tuple = tuple(exitPoint)
#####
nnd, nni = tree.query(query_tuple, k=knn, eps=0.0, p=2.0, distance_upper_bound=rnn)
nnd = list(nnd)
nni = list(nni)
nn = 0
if nnd.count(float('inf')) > 0:
infIndex = nnd.index(float('inf'))
nn = nni[:infIndex]
else:
nn = nni
#####
nCount = 0
for j in nn:
j = int(j)
if j == i:
continue
f.write('<link id1=\"' + str((i*2) + 1) + '\" id2=\"' + str((j*2)) + '\" r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(linkRadius) + '\"/>')
f.write('\n')
f.write('</marker_set>')
f.close()
def mRNA2MarkerFile2(filePath, pl, polysomeId, pixelSizeFactor=1.0, binFactor=1, xOffset=0, yOffset=0, zOffset=0, useShifts=False, riboType='80S', markerRadius=0.5, linkRadius=0.2, knn=3, rnn=50.0):
import numpy as np
from pytom.angles.angleFnc import pointRotateZXZ
import networkx as nx
templateCenter = 0
point1 = 0
point2 = 0
if riboType == '80S':
templateCenter = np.array([10.0, 10.0, 10.0])
point1 = (np.array([10.0, 5.0, 10.0]) - templateCenter)*pixelSizeFactor ##entry
point2 = (np.array([8.0, 6.0, 9.0]) - templateCenter)*pixelSizeFactor ##exit
elif riboType == '70S':
templateCenter = np.array([7.5, 7.5, 7.5])
point1 = (np.array([8.0, 3.0, 8.0]) - templateCenter)*pixelSizeFactor ##entry
point2 = (np.array([4.0, 3.0, 8.0]) - templateCenter)*pixelSizeFactor ##exit
polysomeList = []
polysomeSeq = pl._polysomeMap[polysomeId]
for i in polysomeSeq:
polysomeList.append(pl[i])
subG = pl._G.subgraph(polysomeSeq)
paths = nx.single_source_dijkstra_path(subG, polysomeId, weight='neg_log')
f = open(filePath, 'w') #.cmm
f.write('<marker_set name=\"mRNA ' + str(polysomeId) + '\">')
f.write('\n')
marker_id = 0
for p in polysomeList:
x = (p.getPickPosition().getX()*binFactor) + xOffset
y = (p.getPickPosition().getY()*binFactor) + yOffset
z = (p.getPickPosition().getZ()*binFactor) + zOffset
if useShifts:
x = x + (p.getShift().getX()*binFactor)
y = y + (p.getShift().getY()*binFactor)
z = z + (p.getShift().getZ()*binFactor)
t = np.array([x, y, z])
z1 = p.getRotation().getZ1()
z2 = p.getRotation().getZ2()
x1 = p.getRotation().getX()
entryPoint = np.array(pointRotateZXZ(point1.tolist(), z1, z2, x1)) + t
exitPoint = np.array(pointRotateZXZ(point2.tolist(), z1, z2, x1)) + t
f.write('<marker id=\"' + str(marker_id) + '\" x=\"' + str(entryPoint[0]) + '\" y=\"' + str(entryPoint[1]) + '\" z=\"' + str(entryPoint[2]) + '\"' +
' r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(markerRadius) + '\"/>')
f.write('\n')
marker_id = marker_id + 1
f.write('<marker id=\"' + str(marker_id) + '\" x=\"' + str(exitPoint[0]) + '\" y=\"' + str(exitPoint[1]) + '\" z=\"' + str(exitPoint[2]) + '\"' +
' r=\"1\" g=\"0.5\" b=\"0\" radius=\"' + str(markerRadius) + '\"/>')
f.write('\n')
marker_id = marker_id + 1
edgeSet = set()
for i in range(len(polysomeList)):
f.write('<link id1=\"' + str(i*2) + '\" id2=\"' + str((i*2) + 1) + '\" r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(linkRadius) + '\"/>')
f.write('\n')
vertex_id = polysomeSeq[i]
path = paths[vertex_id]
# print vertex_id, path
for j in range(len(path) -1):
edgeTuple = (path[j], path[j +1])
if not edgeTuple in edgeSet:
s = polysomeSeq.index(edgeTuple[0])
t = polysomeSeq.index(edgeTuple[1])
f.write('<link id1=\"' + str((s*2) + 1) + '\" id2=\"' + str((t*2)) + '\" r=\"1\" g=\"0\" b=\"0\" radius="0.2"/>')
f.write('\n')
edgeSet.add(edgeTuple)
f.write('</marker_set>')
f.close()
def mRNA2MarkerFile3(filePath, pl, polysomeId, entryPoint, exitPoint, pixelSizeFactor=1.0, binFactor=1, xOffset=0, yOffset=0, zOffset=0, useShifts=False, markerRadius=0.5, linkRadius=0.2, knn=3, rnn=50.0):
import numpy as np
from pytom.angles.angleFnc import pointRotateZXZ
import networkx as nx
point1 = entryPoint
point2 = exitPoint
polysomeList = []
polysomeSeq = pl._polysomeMap[polysomeId]
for i in polysomeSeq:
polysomeList.append(pl[i])
subG = pl._G.subgraph(polysomeSeq)
paths = nx.single_source_dijkstra_path(subG, polysomeId, weight='neg_log')
f = open(filePath, 'w') #.cmm
f.write('<marker_set name=\"mRNA ' + str(polysomeId) + '\">')
f.write('\n')
marker_id = 0
for p in polysomeList:
x = (p.getPickPosition().getX()*binFactor) + xOffset
y = (p.getPickPosition().getY()*binFactor) + yOffset
z = (p.getPickPosition().getZ()*binFactor) + zOffset
if useShifts:
x = x + (p.getShift().getX()*binFactor)
y = y + (p.getShift().getY()*binFactor)
z = z + (p.getShift().getZ()*binFactor)
t = np.array([x, y, z])
z1 = p.getRotation().getZ1()
z2 = p.getRotation().getZ2()
x1 = p.getRotation().getX()
entryPoint = np.array(pointRotateZXZ(point1.tolist(), z1, z2, x1)) + t
exitPoint = np.array(pointRotateZXZ(point2.tolist(), z1, z2, x1)) + t
f.write('<marker id=\"' + str(marker_id) + '\" x=\"' + str(entryPoint[0]) + '\" y=\"' + str(entryPoint[1]) + '\" z=\"' + str(entryPoint[2]) + '\"' +
' r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(markerRadius) + '\"/>')
f.write('\n')
marker_id = marker_id + 1
f.write('<marker id=\"' + str(marker_id) + '\" x=\"' + str(exitPoint[0]) + '\" y=\"' + str(exitPoint[1]) + '\" z=\"' + str(exitPoint[2]) + '\"' +
' r=\"1\" g=\"0.5\" b=\"0\" radius=\"' + str(markerRadius) + '\"/>')
f.write('\n')
marker_id = marker_id + 1
edgeSet = set()
for i in range(len(polysomeList)):
f.write('<link id1=\"' + str(i*2) + '\" id2=\"' + str((i*2) + 1) + '\" r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(linkRadius) + '\"/>')
f.write('\n')
vertex_id = polysomeSeq[i]
path = paths[vertex_id]
# print vertex_id, path
for j in range(len(path) -1):
edgeTuple = (path[j], path[j +1])
if not edgeTuple in edgeSet:
s = polysomeSeq.index(edgeTuple[0])
t = polysomeSeq.index(edgeTuple[1])
f.write('<link id1=\"' + str((s*2) + 1) + '\" id2=\"' + str((t*2)) + '\" r=\"1\" g=\"0\" b=\"0\" radius="0.2"/>')
f.write('\n')
edgeSet.add(edgeTuple)
f.write('</marker_set>')
f.close()
def mRNA2MarkerFile4(filePath, pl, polysomeId, entry_Point, exit_Point, binFactor=1, xOffset=0, yOffset=0, zOffset=0, useShifts=False, markerRadius=0.5, linkRadius=0.2, knn=3, rnn=50.0):
import numpy as np
from pytom.angles.angleFnc import pointRotateZXZ
from scipy.spatial import KDTree
if knn < 3:
knn = 3
point1 = entry_Point
point2 = exit_Point
polysomeList = []
for i in pl._polysomeMap[polysomeId]:
polysomeList.append(pl[i])
f = open(filePath, 'w') #.cmm
f.write('<marker_set name=\"mRNA ' + str(polysomeId) + '\">')
f.write('\n')
marker_id = 0
treeData = []
pointData = []
for p in polysomeList:
x = (p.getPickPosition().getX()*binFactor) + xOffset
y = (p.getPickPosition().getY()*binFactor) + yOffset
z = (p.getPickPosition().getZ()*binFactor) + zOffset
if useShifts:
x = x + (p.getShift().getX()*binFactor)
y = y + (p.getShift().getY()*binFactor)
z = z + (p.getShift().getZ()*binFactor)
t = np.array([x, y, z])
z1 = p.getRotation().getZ1()
z2 = p.getRotation().getZ2()
x1 = p.getRotation().getX()
entryPoint = np.array(pointRotateZXZ(point1.tolist(), z1, z2, x1)) + t
exitPoint = np.array(pointRotateZXZ(point2.tolist(), z1, z2, x1)) + t
f.write('<marker id=\"' + str(marker_id) + '\" x=\"' + str(entryPoint[0]) + '\" y=\"' + str(entryPoint[1]) + '\" z=\"' + str(entryPoint[2]) + '\"' +
' r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(markerRadius) + '\"/>')
f.write('\n')
marker_id = marker_id + 1
f.write('<marker id=\"' + str(marker_id) + '\" x=\"' + str(exitPoint[0]) + '\" y=\"' + str(exitPoint[1]) + '\" z=\"' + str(exitPoint[2]) + '\"' +
' r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(markerRadius) + '\"/>')
f.write('\n')
marker_id = marker_id + 1
treeData.append((entryPoint[0], entryPoint[1], entryPoint[2]))
pointData.append([entryPoint, exitPoint])
tree = KDTree(list(treeData))
for i in range(len(polysomeList)):
entryPoint = pointData[i][0]
exitPoint = pointData[i][1]
f.write('<link id1=\"' + str(i*2) + '\" id2=\"' + str((i*2) + 1) + '\" r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(linkRadius) + '\"/>')
f.write('\n')
query_tuple = tuple(exitPoint)
#####
nnd, nni = tree.query(query_tuple, k=knn, eps=0.0, p=2.0, distance_upper_bound=rnn)
nnd = list(nnd)
nni = list(nni)
nn = 0
if nnd.count(float('inf')) > 0:
infIndex = nnd.index(float('inf'))
nn = nni[:infIndex]
else:
nn = nni
#####
nCount = 0
for j in nn:
j = int(j)
if j == i:
continue
f.write('<link id1=\"' + str((i*2) + 1) + '\" id2=\"' + str((j*2)) + '\" r=\"1\" g=\"0\" b=\"0\" radius=\"' + str(linkRadius) + '\"/>')
f.write('\n')
f.write('</marker_set>')
f.close()
|
import sys
import commands
import re
import argparse
import json
from heapq import heappush, heappop
number_articulations = 0
labels = {}
vars = {}
debug = False
cut_id = 0
first_label = ''
last_label = ''
first_var = ''
first_var_bd = ''
last_var = ''
heap_cuts = []
my_graph = {}
def match_names(blocks):
global labels
global vars
global first_label
global last_label
global first_var
global first_var_bd
global last_var
first_var_ok = False
for i in range(1, len(blocks)):
id, instructions = blocks[i].split(':', 1)
var = id[1:]
if instructions.find('<label>:') != -1:
#print 'NOT IMPLEMENTED'
label = instructions.split('<label>:')[1].split(' ')[0]
else:
label = instructions.split('\n', 1)[1].split(':')[0]
if "bd_" in var:
if first_var_ok:
last_label = label
last_var = var
else:
first_var_ok = True
first_label = label
first_var_bd = var
var = var.replace("bd","bs")
first_var = var
labels[var] = label
vars[label] = var
label_costs = {}
transition_costs = {}
# read the file given by Mihail's code, that consists in tuples
# of the form (source,destination,cost) if cost is attached to transitions
# or form (block,cost) if cost is attached to blocks
def readmatchingfile(matchingfile):
global costs
#for line in matchingfile:
# name,arm,cost = line.split(',')
# name = name.replace("(%", "")
# name = name.replace(" ", "")
# cost = cost.replace(")\n", "")
# cost = int(cost.replace(" ", ""))
# label_costs[name] = cost
for line in matchingfile:
source,dest,cost = line.split(',')
source = source.replace("(", "")
source = source.replace("%", "")
source = source.replace(" ", "")
dest = dest.replace("%", "")
dest = dest.replace(" ", "")
cost = cost.replace(")\n", "")
cost = int(cost.replace(" ", ""))
if dest == "":
label_costs[source] = cost
else:
#print source
#print dest
if source not in transition_costs:
transition_costs[source] = {}
transition_costs[source][dest] = cost
heads = []
tails = []
semanticcut = []
semanticcut_maxcost = []
def addautomaticcuts():
global my_graph
#for mnode in my_graph[last_var][2]:
mnode = last_var
node = mnode
mergingpoints = [node]
while node != first_var:
node_dominator = my_graph[node][4]
mergingpoints.append(node_dominator)
node = node_dominator
N = len(mergingpoints)
step = 2
#print mergingpoints
while step < N:
k = 0
done=False
while not done:
tail = mergingpoints[k]
if k+step < N:
head = mergingpoints[k+step]
else:
head = mergingpoints[N-1]
done=True
#print "between " + tail + " " + head + " " + str(step)
heads.append([labels[head]])
tails.append([labels[tail]])
semanticcut.append("")
semanticcut_maxcost.append(0)
k += step
step = step*2
def readcutsfile(cutsfile):
# with this file, we can set that the portion between head1 and tail1
# and the portion between head2 and tail2 are exclusive
index=0
heads.append([])
tails.append([])
semanticcut.append("")
semanticcut_maxcost.append(0)
for line in cutsfile:
if "#" in line:
index = index+1
heads.append([])
tails.append([])
semanticcut.append("")
semanticcut_maxcost.append(0)
else:
head,tail = line.split(',')
head = head.replace("%", "")
head = head.replace(" ", "")
head = head.replace("\n", "")
tail = tail.replace("%", "")
tail = tail.replace(" ", "")
tail = tail.replace("\n", "")
heads[index].append(head)
tails[index].append(tail)
def getcost(label):
global costs
if label in label_costs:
return label_costs[label]
else:
return 0
def getcost_transition(source_label,dest_label):
#print "getcost_transition " + source_label + " " + dest_label
global costs
if source_label in transition_costs:
if dest_label in transition_costs[source_label]:
return transition_costs[source_label][dest_label]
#print "missing cost for transition: " + source_label + " " + dest_label +\
#":" + get_transition_varname(vars[source_label],vars[dest_label])
return 0
def get_tcost_var(var):
return 'c_' + var.split('_')[1] + '_' + var.split('_')[2]
def get_transition_varname(sourcevar,destvar):
#print sourcevar + " --> " + destvar
return "t_" + sourcevar.split('_')[1] + "_" + destvar.split('_')[1]
# count the number of distinct path between the starting point of the graph and
# a specific node
def getNumPaths(graph, node):
if graph[node][6] != -1:
return graph[node][6]
num = 0
empty = True
for n in graph[node][2]:
empty = False
num += getNumPaths(graph, n)
if empty:
num = 1
graph[node][6] = num
return num
def Maximum(x,y):
if x > y:
return x
else:
return y
def get_summary_max(graph,start,end,current,costs,tcosts):
global debug
global labels
if current in costs:
return costs[current]
current_cost = graph[current][1]
if debug:
print 'get_summary_max : ' + labels[current] + ' : ' + str(len(graph[current][2])) + ' predecessors\n'
if current != start:
#iterate through incoming edges
max = 0
for n in graph[current][2]:
if debug:
print ' pred ' + labels[current] + ' : ' + labels[n] + '\n'
cost_transition_var = get_transition_varname(n,current)
tcosts[cost_transition_var] = getcost_transition(labels[n],labels[current])
#print "tcost = " + str(tcosts[cost_transition_var])
max = Maximum(max,get_summary_max(graph,start,end,n,costs,tcosts)+tcosts[cost_transition_var])
#max = Maximum(max,get_summary_max(graph,start,end,n,costs,tcosts))
current_cost += max
costs[current] = current_cost
return current_cost
def get_summary_max_with_path(graph,start,end,current,costs,tcosts,longestpaths):
global debug
global labels
if current in costs:
return costs[current], longestpaths[current]
current_cost = graph[current][1]
longestpath = ""
if debug:
print 'get_summary_max : ' + labels[current] + ' : ' + str(len(graph[current][2])) + ' predecessors\n'
if current != start:
#iterate through incoming edges
max = 0
max_last_transition = 0
for n in graph[current][2]:
if debug:
print ' pred ' + labels[current] + ' : ' + labels[n] + '\n'
cost_transition_var = get_transition_varname(n,current)
tcosts[cost_transition_var] = getcost_transition(labels[n],labels[current])
#print "tcost = " + str(tcosts[cost_transition_var])
#max = Maximum(max,get_summary_max(graph,start,end,n,costs,tcosts)+tcosts[cost_transition_var])
c,p = get_summary_max_with_path(graph,start,end,n,costs,tcosts,longestpaths)
if max < c+tcosts[cost_transition_var]:
longestpath = p
max = c+tcosts[cost_transition_var]
max_last_transition = tcosts[cost_transition_var]
#max = Maximum(max,get_summary_max(graph,start,end,n,costs,tcosts))
# output Julien:
#longestpath += " --("+str(max_last_transition)+")-> " + labels[current] +\
#"(" + str(graph[current][1]) + ")"
# output Mihail:
longestpath += ", " + labels[current] + ")\n(" + labels[current]
current_cost += max
else:
# output Mihail:
longestpath += "(" + labels[current]
# output Julien:
#longestpath += labels[current] + "(" + str(graph[current][1]) + ")"
#longestpath += ""
costs[current] = current_cost
longestpaths[current] = longestpath
return current_cost,longestpath
def main():
global number_articulations
global labels
global vars
global debug
global semanticcut_maxcost
global semanticcut
global cut_id
global first_label
global last_label
global first_var
global first_var_bd
global last_var
global heap_cuts
global my_graph
parser = argparse.ArgumentParser(description='generateSMTwcet')
parser.add_argument("--nosummaries", help="do not add extra information to the SMT formula",action="store_true")
parser.add_argument("--recursivecuts", help="add automatic recursive cuts",action="store_true")
parser.add_argument('filename', type=str,
help='the file name')
parser.add_argument('--matchingfile', type=str,
help='name of the matching file')
parser.add_argument('--smtmatching', type=str,
help='name of the file matching labels to booleans')
parser.add_argument('--cutsfile', type=str,
help='name of the cuts file')
parser.add_argument('--printlongestsyntactic', type=str,
help='name of the file storing the longest syntactic path')
parser.add_argument('--printcutslist', type=str,
help='name of the file that lists the different cuts, in order of difficulty')
args = parser.parse_args()
if args.matchingfile:
usematching = True
matchingfile = open(args.matchingfile, 'r')
readmatchingfile(matchingfile)
else:
usematching = False
if args.cutsfile:
cutsfile = open(args.cutsfile, 'r')
readcutsfile(cutsfile)
add_summaries = not args.nosummaries
#read file
file = open(args.filename, 'r')
smt_formula, blocks = file.read().rsplit('-------', 1)
smt_declarations, smt_assertions = smt_formula.split('(assert')
smt_declarations = smt_declarations.strip() + '\n'
smt_assertions = '(assert \n' + smt_assertions.strip() + '\n'
blocks = blocks.split('BasicBlock')
# do the matching between block labels and block Booleans
match_names(blocks)
# init some variables
extra_assertions = ''
cost_sum = '(= cost (+'
cost_max = 0
# graph
graph = {}
edges = []
sorted_keys = []
nb_cuts = 0
# for each block, create a constraint with a cost value
for i in range(1, len(blocks)):
id, instructions = blocks[i].split(':', 1)
#var = 'bs_' + id[1:]
var = id[1:]
if var == "bd_0":
var = "bs_0"
if var == "bd_1":
var = "bs_1"
k = int(var.split("_")[1])
label = labels[var]
if instructions.find('<label>:') != -1:
dominator = instructions.split('Dominator = ',1)[1].split('\n',1)[0]
else:
dominator = instructions.split('Dominator = ',1)[1].split('\n',1)[0]
if dominator == first_var_bd:
dominator = first_var
if usematching:
cost = getcost(label)
else:
cost = int(instructions.split(' ')[1])
cost_max += cost
var_cost = k
cost_sum += ' c' + str(var_cost)
if var == first_var:
extra_assertions += '(= c' + str(var_cost) + ' ' + str(cost) + ')\n'
else:
if cost != 0:
extra_assertions+='(= c' + str(var_cost) + ' (ite ' + var + ' ' + str(cost) + ' 0))\n'
else:
extra_assertions+='(= c' + str(var_cost) + ' 0)\n'
#extra_assertions += '(or (and (= ' + var + ' true) (= c' + str(var_cost) + ' ' + str(cost) +\
# ')) (and (= ' + var + ' false) (= c' + str(var_cost) + ' 0)))\n'
smt_declarations += '(declare-fun c' + str(var_cost) + ' () Int)\n'
# build graph incrementally
sorted_keys.append(label)
# graph[node] = [variable_string, cost, income_edges, outgoing_edges, max_path_cost, min_path_cost, count_paths, var_cost_id, timeDFS, lowDFS, preDFS, isArticulationPoint]
#my_graph[var] = [label,cost,incoming_edges,outgoing_edges,dominator,count_paths]
my_graph[var] = [label,cost,[],[],dominator, 'c' + str(var_cost),-1]
#if i < len(blocks)-1:
if instructions.find('br ') != -1:
br_num = 0
outgoing = instructions.split('br ')[1].split('label')
for edge in outgoing:
e = edge.strip();
if len(e) >= 2 and e[0] == '%':
br_num = br_num + 1
if e[-1] == ',':
dest =e[1:-1]
else:
dest =e[1:]
edges.append((dest, label))
# add the cost of the transition
edge_name = "t_" + vars[label].split('_')[1] + "_" + vars[dest].split('_')[1]
if usematching:
tcost = getcost_transition(label,dest)
else:
tcost = 0
cost_max += cost
edge_cost = "c_" + vars[label].split('_')[1] + "_" + vars[dest].split('_')[1]
cost_sum += ' ' + edge_cost
smt_declarations += '(declare-fun ' + str(edge_cost) + ' () Int)\n'
#extra_assertions += '(or (and (= ' + edge_name + ' true) (= ' + str(edge_cost) + ' ' + str(tcost)
#extra_assertions += ')) (and (= ' + edge_name + ' false) (= ' + str(edge_cost) + ' 0)))\n'
extra_assertions+='(= ' + str(edge_cost) + ' (ite ' +\
edge_name + ' ' + str(tcost) + ' 0))\n'
# create the edges of the graph
for e in edges:
my_graph[vars[e[0]]][2].append(vars[e[1]])
my_graph[vars[e[1]]][3].append(vars[e[0]])
# insert the extra assertions in the formula
smt_declarations += '(declare-fun cost () Int)\n'
cost_sum += '))\n'
extra_assertions += cost_sum
#print json.dumps(my_graph)
#sys.exit(0)
# Find extra constraints from bifurcations using intermediate result of (Max, +) algorithm
if args.recursivecuts:
addautomaticcuts()
if add_summaries:
for node in my_graph:
if len(my_graph[node][2]) > 1 :
# this is an interesting point since it has more than 1 incoming edge
costs = {}
tcosts = {}
node_dominator = my_graph[node][4]
longest_path = get_summary_max(my_graph,node_dominator,node,node,costs,tcosts)
constraint = ''
constraint_size = 0
for n in costs:
constraint += ' ' + my_graph[n][5]
constraint_size += 1
for t in tcosts:
constraint += ' ' + get_tcost_var(t)
constraint_size += 1
cut = "cut" + str(cut_id)
heappush(heap_cuts,(constraint_size,cut,str(longest_path)))
cut_id += 1
smt_declarations += '(declare-fun ' + cut + ' () Int)\n'
extra_assertions += '(= ' + cut + ' (+ ' + constraint + '))\n'
extra_assertions += '(<= ' + cut + ' ' + str(longest_path) + '); between blocks ' + labels[node_dominator] + ' and ' + labels[node] + '\n'
nb_cuts += 1
#print my_graph[node][0] + "?"
index = 0
for t in tails:
if my_graph[node][0] in t:
tail = my_graph[node][0]
#print ";" + tail + " is in tails"
position=t.index(tail)
head = heads[index][position]
head_var = vars[head]
tail_var = vars[tail]
costs = {}
tcosts = {}
node_dominator = my_graph[node][4]
longest_path = get_summary_max(my_graph,head_var,tail_var,tail_var,costs,tcosts)
constraint = ''
constraint_size = 0
for n in costs:
constraint += ' ' + my_graph[n][5]
constraint_size += 1
for t in tcosts:
constraint += ' ' + get_tcost_var(t)
constraint_size += 1
#semanticcut_maxcost[index] = max(semanticcut_maxcost[index],longest_path)
#semanticcut[index] += constraint
cut = "cut" + str(cut_id)
heappush(heap_cuts,(constraint_size,cut,str(longest_path)))
cut_id += 1
smt_declarations += '(declare-fun ' + cut + ' () Int)\n'
extra_assertions += '(= ' + cut + ' (+ ' + constraint + '))\n'
extra_assertions += '(<= ' + cut + ' ' + str(longest_path)+\
'); portion between ' + labels[head_var] + ' and ' + labels[tail_var] + '\n'
nb_cuts += 1
index = index+1
# global cost variable
globalcosts = {}
globaltcosts = {}
longestpathsstrings = {}
#debug = True
longest_path, stringpath = get_summary_max_with_path(my_graph,first_var,last_var,last_var,globalcosts,globaltcosts,longestpathsstrings)
#for n in globalcosts:
# print labels[n] + " : " + str(globalcosts[n])
if add_summaries:
constraint = ''
for n in globalcosts:
constraint += ' ' + my_graph[n][5]
for t in globaltcosts:
constraint += ' ' + get_tcost_var(t)
#extra_assertions += '(<= (+' + constraint + ') ' + str(longest_path) + '); between blocks ' + labels[first_var] + ' and ' + labels[last_var] + ' (longest path)\n'
extra_assertions += '(<= (+' + constraint + ') ' + str(longest_path) +')\n'
extra_assertions += '(<= cost ' + str(longest_path) + '); longest path\n'
for index in range(len(semanticcut_maxcost)):
if semanticcut_maxcost[index] > 0:
extra_assertions += '(<= (+' + semanticcut[index] + ') ' +\
str(semanticcut_maxcost[index]) + '); semantic cut num' +\
str(index) + '\n'
smt_formula = smt_declarations + smt_assertions + '(assert (and (= ' + first_var + ' true) (= ' + last_var + ' true)))\n(assert\n(and\n' + extra_assertions + '))\n'
print smt_formula
print '; NB_PATHS = ' + str(getNumPaths(my_graph, vars[last_label]))
print '; NB_PATHS_DIGITS = ' + str(len(str(getNumPaths(my_graph,vars[last_label]))))
print '; NB_CUTS ' + str(nb_cuts)
print '; LONGEST_PATH ' + str(longest_path)
if args.smtmatching:
smtmatching = open(args.smtmatching, 'w')
for k in vars:
smtmatching.write(vars[k]+','+k+'\n')
smtmatching.close()
if args.printlongestsyntactic:
longestpath_file = open(args.printlongestsyntactic, 'w')
longestpath_file.write(stringpath+'\n')
longestpath_file.close()
if args.printcutslist:
cuts_file = open(args.printcutslist, 'w')
for x in xrange(len(heap_cuts)):
c = heappop(heap_cuts)
cuts_file.write(c[1]+' '+c[2]+'\n')
cuts_file.close()
#print json.dumps(my_graph[last_var])
sys.setrecursionlimit(10000)
main()
|
import logging
import transformers
from transformers import AutoConfig
logger = logging.getLogger(__name__)
def get_transformers_auto_model(
model_name, num_classes=None, model_type="AutoModel", **kwargs
):
config = AutoConfig.from_pretrained(
model_name, num_labels=num_classes, **kwargs
)
logger.info(f"Model Config: \n {config}")
return getattr(transformers, model_type).from_pretrained(
model_name, config=config
)
|
#!/usr/bin/env python
from setuptools import find_packages
from setuptools import setup
import re
version = re.search(r'__version__ = (.+)\n', open('threddsclient/__init__.py').read()).group(1)
long_description = (
open('README.rst').read() + '\n' + open('AUTHORS.rst').read() + '\n' + open('CHANGES.rst').read()
)
reqs = [line.strip() for line in open('requirements.txt')]
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
setup(
name='threddsclient',
version=version,
description='Thredds catalog client',
long_description=long_description,
classifiers=classifiers,
author='Birdhouse',
email='',
license='Apache 2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=reqs,
)
|
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch
def get_loader(root_folder,
batch_size,
num_workers=4):
dataset = dset.ImageFolder(root=root_folder,
transform=transforms.Compose([
transforms.CenterCrop(160),
transforms.Scale(64),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers)
return dataloader
if __name__=='__main__':
dataloader = get_loader("CelebA", 16, 4)
for i, data in enumerate(dataloader):
print(data[0])
break
|
import logging
import operator
import itertools
import os
import numpy as np
from typing import Tuple
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from functools import reduce
from data_loader import (
AmericanNationalCorpusDataset,
ObliterateLetters,
ToTensor,
)
from src.discriminator_net import DiscriminatorNet
from src.generator_net import GeneratorNet
def load_models(args, config, device='cpu') -> Tuple[
GeneratorNet, DiscriminatorNet]:
generator = GeneratorNet(config).to(device)
generator.load_state_dict(
torch.load(
os.path.join(
args.path_to_checkpoints,
f'epoch_{args.epoch_num}_generator.pt'
),
map_location=device
)
)
generator.eval()
discriminator = DiscriminatorNet(config).to(device)
discriminator.load_state_dict(
torch.load(
os.path.join(
args.path_to_checkpoints,
f'epoch_{args.epoch_num}_discriminator.pt'
),
map_location=device
)
)
discriminator.eval()
return generator, discriminator
def show_examples(args, config, device='cpu', shuffle=False):
generator, _ = load_models(args, config, device=device)
noisy_phrases = AmericanNationalCorpusDataset(
config,
transform_raw_phrase=ObliterateLetters(
obliterate_ratio=config['replace_with_noise_probability']
),
transform_sample_dict=ToTensor()
)
noisy_data_loader = DataLoader(
noisy_phrases,
batch_size=1,
num_workers=1,
shuffle=shuffle
)
with torch.no_grad():
for x in itertools.islice(noisy_data_loader, 5):
_input = x['concat_phrase'].to(device)
out = generator.forward(_input).cpu()
print('#' * 40)
print(noisy_phrases.show(x['raw_phrase']))
print(noisy_phrases.show(out))
print('#' * 40)
def measure_accuracy(generator, real_data_loader, fake_data_loader, device):
correct = 0
elements = 0
with torch.no_grad():
for fake_batch, real_batch in tqdm(
zip(fake_data_loader, real_data_loader)
):
_input = fake_batch['concat_phrase'].to(device)
output = generator.forward(_input)
correct += np.sum(
np.argmax(output.detach().cpu().numpy(), axis=-1)
== np.argmax(real_batch['raw_phrase'].numpy(), axis=-1)
)
elements += reduce(
operator.mul,
real_batch['raw_phrase'].shape[:-1],
1
)
# logging.debug(f'{correct} {elements} {correct / elements}')
return correct / elements
def eval_with_mean_accuracy(args, config, device):
noisy_phrases = AmericanNationalCorpusDataset(
config,
transform_raw_phrase=ObliterateLetters(
obliterate_ratio=config['replace_with_noise_probability']
),
transform_sample_dict=ToTensor()
)
real_phrases = AmericanNationalCorpusDataset(
config,
transform_raw_phrase=None,
transform_sample_dict=ToTensor()
)
test_noisy_data_loader = DataLoader(
noisy_phrases,
batch_size=config['batch_size'],
num_workers=config['num_workers'],
shuffle=False
)
test_real_data_loader = DataLoader(
real_phrases,
batch_size=config['batch_size'],
num_workers=config['num_workers'],
shuffle=False
)
generator, _ = load_models(args, config, device)
acc = measure_accuracy(
generator,
test_real_data_loader,
test_noisy_data_loader,
device
)
print(f'Mean Accuracy: {acc:.2f}')
|
'''
Adapt the code from one of the functions above to create a new function called 'multiplier'.
The user should be able to input two numbers that are stored in variables.
The function should multiply the two variables together and return the result to a variable in the main program.
The main program should output the variable containing the result returned from the function.
'''
def multiplier():
number1 = int(input("Enter a number: "))
number2 = int(input("Enter anither number: "))
product = number1 * number2
return product
output_num = multiplier()
print(output_num) |
import requests
url='https://www.python.org'
r = requests.get(url)
print r.status_code
|
#!/usr/bin/python
#
# Usage: plot.py [input_file] [xlabel] [ylabel] [x] [y] [where] [where_values] [groupby]
#
# input_file: Input tsv file where the first row contains column names
# xlabel: Label for plot horizontal axis
# ylabel: Label for plot vertical axis
# x: Name of column to plot on horizontal axis
# y: Name of column to plot on vertical axis
# where: Comma-separated list of columns for which to constrain the values contained in the plot
# where_values: Comma-separated list of values by which to constrain the columns given in [where]
# groupby: Comma-separated list of columns on which to group the data into separate curves
#
# The script will generate a 2-dimensional plot containing a set of curves. Values are averaged
# across rows of data that fit the constraints given in [where] and [where_values]. The averages
# are computed for separate curves determined by the [groupby]
#
import csv
import sys
import numpy as np
from scipy import stats
from random import randint
input_file = sys.argv[1]
xlabel = sys.argv[2]
ylabel = sys.argv[3]
x = sys.argv[4]
y = sys.argv[5]
where = None
where_values = None
if len(sys.argv) > 6 and sys.argv[6] != 'None' and sys.argv[7] != 'None':
where = sys.argv[6].split(",")
where_values = sys.argv[7].split(",")
groupby = None
if len(sys.argv) > 8:
groupby = sys.argv[8].split(",")
make_table = False
if len(sys.argv) > 9:
make_table = (sys.argv[9] == "True")
def read_tsv_file(file_path):
f = open(file_path, 'rt')
rows = []
try:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
rows.append(row)
finally:
f.close()
return rows
def row_match(row, where, where_values):
if where is None:
return True
for i in range(len(where)):
if row[where[i]] != where_values[i]:
return False
return True
# Map [groupby],x -> y value list filtered by 'where'
def aggregate(rows, x, y, where, where_values, groupby):
agg = dict()
for row in rows:
if not row_match(row, where, where_values):
continue
cur_agg = agg
if groupby is not None:
for key in groupby:
if row[key] not in cur_agg:
cur_agg[row[key]] = dict()
cur_agg = cur_agg[row[key]]
x_value = row[x]
y_value = row[y]
if x_value not in cur_agg:
cur_agg[x_value] = []
cur_agg[x_value].append(float(y_value))
return agg
def compute_statistics_helper(agg, agg_depth, keys, statistics, overall_statistics):
if agg_depth == 0:
cur_stats = statistics
for key in keys:
if key not in cur_stats:
cur_stats[key] = dict()
cur_stats = cur_stats[key]
cur_stats["mu"] = np.mean(agg)
cur_stats["stderr"] = stats.sem(agg)
cur_stats["max"] = max(agg)
overall_statistics["y_max"] = max(overall_statistics["y_max"], cur_stats["mu"])
if len(keys[len(keys) - 1]) != 0:
try:
overall_statistics["x_max"] = max(overall_statistics["x_max"], float(keys[len(keys) - 1]))
except ValueError:
pass
else:
for key in agg:
keys.append(key)
compute_statistics_helper(agg[key], agg_depth - 1, keys, statistics, overall_statistics)
keys.pop()
return statistics, overall_statistics
def compute_statistics(agg, groupby):
overall_statistics = dict()
overall_statistics["y_max"] = 1.0
overall_statistics["x_max"] = 0
statistics = dict()
depth = 1
if groupby is not None:
depth = len(groupby) + 1
return compute_statistics_helper(agg, depth, [], statistics, overall_statistics)
def make_latex_plot_helper(statistics, groupby, depth, keys, s):
if depth == 0:
plot_str = "\\addplot[color=black!" + str(randint(30,100)) + ",dash pattern=on " + str(randint(1,3)) + "pt off " + str(randint(1,2)) + "pt,error bars/.cd, y dir=both,y explicit] coordinates {\n"
x_values = [float(x_value) for x_value in statistics.keys()]
x_values.sort()
for x_value in x_values:
x_str = str(int(x_value))
plot_str = plot_str + "(" + x_str + "," + str(statistics[x_str]["mu"]) + ")+-(0.0," + str(statistics[x_str]["stderr"]) + ")\n"
plot_str = plot_str + "};\n"
plot_str = plot_str + "\\addlegendentry{\\tiny{"
if groupby is not None:
for i in range(len(groupby)):
plot_str = plot_str + groupby[i] + "=" + keys[i] + " "
plot_str = plot_str.strip()
plot_str = plot_str + "}};\n\n"
return s + plot_str
else:
for key in statistics:
keys.append(key)
s = make_latex_plot_helper(statistics[key], groupby, depth - 1, keys, s)
keys.pop()
return s
def make_latex_plot(statistics, overall_statistics, xlabel, ylabel, groupby):
s = ("\\begin{figure*}[ht]\n"
"\\begin{center}\n"
"\\begin{tikzpicture}\n"
"\\begin{axis}[%\n"
"width=.5\\textwidth,height=.5\\textwidth,\n"
"anchor=origin, % Shift the axis so its origin is at (0,0)\n"
"ymin=0,ymax=" + str(overall_statistics["y_max"]) + ",xmin=0,xmax=" + str(overall_statistics["x_max"]) + ",%\n"
"xlabel=" + xlabel + ",\n"
"ylabel=" + ylabel + ",\n"
"legend pos=outer north east\n"
"]\n"
)
depth = 0
if groupby is not None:
depth = len(groupby)
s = s + make_latex_plot_helper(statistics, groupby, depth, [], "")
s = s + ("\\end{axis}\n"
"\\end{tikzpicture}\n"
"\\end{center}\n"
"\\end{figure*}\n"
)
return s
def make_aggregate_table_helper(statistics, groupby, depth, keys, s):
if depth == 0:
try:
x_values = [float(x_value) if len(x_value) != 0 else "" for x_value in statistics.keys()]
except ValueError:
x_values = [x_value for x_value in statistics.keys()]
x_values.sort()
for x_value in x_values:
x_str = str(x_value)
for key in keys:
s += key + "\t"
if x_str not in statistics:
x_str = str(int(float(x_str))) # FIXME Stupid hack for now
s += x_str + "\t" + str(statistics[x_str]["mu"]) + "\t" + str(statistics[x_str]["stderr"]) + "\t" + str(statistics[x_str]["max"]) + "\n"
return s
else:
for key in statistics:
keys.append(key)
s = make_aggregate_table_helper(statistics[key], groupby, depth - 1, keys, s)
keys.pop()
return s
def make_aggregate_table(statistics, overall_statistics, xlabel, ylabel, groupby):
s = "\t".join(groupby) + "\t" + xlabel + "\t" + ylabel + "\t" + ylabel + " (stderr)" + "\t" + ylabel + " (max)\n"
depth = 0
if groupby is not None:
depth = len(groupby)
s = s + make_aggregate_table_helper(statistics, groupby, depth, [], "")
return s
rows = read_tsv_file(input_file)
agg = aggregate(rows, x, y, where, where_values, groupby)
statistics, overall_statistics = compute_statistics(agg, groupby)
if make_table:
print(make_aggregate_table(statistics, overall_statistics, xlabel, ylabel, groupby))
else:
print(make_latex_plot(statistics, overall_statistics, xlabel, ylabel, groupby))
|
import gc
import io
import math
import sys
import time
from PIL import Image, ImageOps
import requests
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
from tqdm.notebook import tqdm
import numpy as np
sys.path.append("./glid-3-xl")
from jack_guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
from dalle_pytorch import DiscreteVAE, VQGanVAE
from einops import rearrange
from math import log2, sqrt
import argparse
import pickle
import shutil
import os
from os.path import exists as path_exists
sys.path.append("glid-3-xl/encoders")
from encoders.modules import BERTEmbedder
from CLIP import clip
from pathvalidate import sanitize_filename
torch.cuda.empty_cache()
def run_model(args, status, stoutput, DefaultPaths):
global model, diffusion, ldm, bert, last_model, clip_model, clip_preprocess
try:
last_model
except:
last_model = ''
print(args)
def fetch(url_or_path):
if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'):
r = requests.get(url_or_path)
r.raise_for_status()
fd = io.BytesIO()
fd.write(r.content)
fd.seek(0)
return fd
return open(url_or_path, 'rb')
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
return torch.cat(cutouts)
def spherical_dist_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def tv_loss(input):
"""L2 total variation loss, as in Mahendran et al."""
input = F.pad(input, (0, 1, 0, 1), 'replicate')
x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]
y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]
return (x_diff**2 + y_diff**2).mean([1, 2, 3])
device = torch.device('cuda:0' if (torch.cuda.is_available() and not args.cpu) else 'cpu')
print('Using device:', device)
print(args.model_path)
model_state_dict = torch.load(args.model_path, map_location='cpu')
model_params = {
'attention_resolutions': '32,16,8',
'class_cond': False,
'diffusion_steps': 1000,
'rescale_timesteps': True,
'timestep_respacing': '27', # Modify this value to decrease the number of
# timesteps.
'image_size': 32,
'learn_sigma': False,
'noise_schedule': 'linear',
'num_channels': 320,
'num_heads': 8,
'num_res_blocks': 2,
'resblock_updown': False,
'use_fp16': False,
'use_scale_shift_norm': False,
'clip_embed_dim': 768 if 'clip_proj.weight' in model_state_dict else None,
'image_condition': True if model_state_dict['input_blocks.0.0.weight'].shape[1] == 8 else False,
'super_res_condition': True if 'external_block.0.0.weight' in model_state_dict else False,
}
if args.ddpm:
model_params['timestep_respacing'] = 1000
if args.ddim:
if args.steps:
model_params['timestep_respacing'] = 'ddim'+str(args.steps)
else:
model_params['timestep_respacing'] = 'ddim50'
elif args.steps:
model_params['timestep_respacing'] = str(args.steps)
model_config = model_and_diffusion_defaults()
model_config.update(model_params)
if args.cpu:
model_config['use_fp16'] = False
# Load models
if(last_model == args.model_path):
try:
model
status.write(f"Loading {args.model_path} loaded.")
except:
status.write(f"Loading {args.model_path} ...\n")
model, diffusion = create_model_and_diffusion(**model_config)
model.load_state_dict(model_state_dict, strict=False)
model.requires_grad_(args.clip_guidance).eval().to(device)
else:
#Yea I should make a function
status.write(f"Loading {args.model_path} ...\n")
model, diffusion = create_model_and_diffusion(**model_config)
model.load_state_dict(model_state_dict, strict=False)
model.requires_grad_(args.clip_guidance).eval().to(device)
if model_config['use_fp16']:
model.convert_to_fp16()
else:
model.convert_to_fp32()
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
# vae
try:
ldm
set_requires_grad(ldm, args.clip_guidance)
except:
status.write(f"Loading {args.kl_path} ...\n")
ldm = torch.load(args.kl_path, map_location="cpu")
ldm.to(device)
ldm.eval()
ldm.requires_grad_(args.clip_guidance)
set_requires_grad(ldm, args.clip_guidance)
try:
bert
set_requires_grad(bert, False)
except:
status.write(f"Loading {args.bert_path} ...\n")
bert = BERTEmbedder(1280, 32)
sd = torch.load(args.bert_path, map_location="cpu")
bert.load_state_dict(sd)
bert.to(device)
bert.half().eval()
set_requires_grad(bert, False)
# clip
try:
clip_model
except:
clip_model, clip_preprocess = clip.load('ViT-L/14', device=device, jit=False)
clip_model.eval().requires_grad_(False)
normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711])
def do_run():
if args.seed >= 0:
torch.manual_seed(args.seed)
# bert context
text_emb = bert.encode([args.text]*args.batch_size).to(device).float()
text_blank = bert.encode([args.negative]*args.batch_size).to(device).float()
text = clip.tokenize([args.text]*args.batch_size, truncate=True).to(device)
text_clip_blank = clip.tokenize([args.negative]*args.batch_size, truncate=True).to(device)
# clip context
text_emb_clip = clip_model.encode_text(text)
text_emb_clip_blank = clip_model.encode_text(text_clip_blank)
make_cutouts = MakeCutouts(clip_model.visual.input_resolution, args.cutn)
text_emb_norm = text_emb_clip[0] / text_emb_clip[0].norm(dim=-1, keepdim=True)
image_embed = None
# image context
if args.edit:
if args.edit.endswith('.npy'):
with open(args.edit, 'rb') as f:
im = np.load(f)
im = torch.from_numpy(im).unsqueeze(0).to(device)
input_image = torch.zeros(1, 4, args.height//8, args.width//8, device=device)
y = args.edit_y//8
x = args.edit_x//8
ycrop = y + im.shape[2] - input_image.shape[2]
xcrop = x + im.shape[3] - input_image.shape[3]
ycrop = ycrop if ycrop > 0 else 0
xcrop = xcrop if xcrop > 0 else 0
input_image[0,:,y if y >=0 else 0:y+im.shape[2],x if x >=0 else 0:x+im.shape[3]] = im[:,:,0 if y > 0 else -y:im.shape[2]-ycrop,0 if x > 0 else -x:im.shape[3]-xcrop]
input_image_pil = ldm.decode(input_image)
input_image_pil = TF.to_pil_image(input_image_pil.squeeze(0).add(1).div(2).clamp(0, 1))
input_image *= 0.18215
else:
w = args.edit_width if args.edit_width else args.width
h = args.edit_height if args.edit_height else args.height
input_image_pil = Image.open(fetch(args.edit)).convert('RGB')
input_image_pil = ImageOps.fit(input_image_pil, (w, h))
input_image = torch.zeros(1, 4, args.height//8, args.width//8, device=device)
im = transforms.ToTensor()(input_image_pil).unsqueeze(0).to(device)
im = 2*im-1
im = ldm.encode(im).sample()
y = args.edit_y//8
x = args.edit_x//8
input_image = torch.zeros(1, 4, args.height//8, args.width//8, device=device)
ycrop = y + im.shape[2] - input_image.shape[2]
xcrop = x + im.shape[3] - input_image.shape[3]
ycrop = ycrop if ycrop > 0 else 0
xcrop = xcrop if xcrop > 0 else 0
input_image[0,:,y if y >=0 else 0:y+im.shape[2],x if x >=0 else 0:x+im.shape[3]] = im[:,:,0 if y > 0 else -y:im.shape[2]-ycrop,0 if x > 0 else -x:im.shape[3]-xcrop]
input_image_pil = ldm.decode(input_image)
input_image_pil = TF.to_pil_image(input_image_pil.squeeze(0).add(1).div(2).clamp(0, 1))
input_image *= 0.18215
if args.mask:
mask_image = Image.open(fetch(args.mask)).convert('L')
mask_image = mask_image.resize((args.width//8,args.height//8), Image.ANTIALIAS)
mask = transforms.ToTensor()(mask_image).unsqueeze(0).to(device)
else:
print('draw the area for inpainting, then close the window')
app = QApplication(sys.argv)
d = Draw(args.width, args.height, input_image_pil)
app.exec_()
mask_image = d.getCanvas().convert('L').point( lambda p: 255 if p < 1 else 0 )
mask_image.save('mask.png')
mask_image = mask_image.resize((args.width//8,args.height//8), Image.ANTIALIAS)
mask = transforms.ToTensor()(mask_image).unsqueeze(0).to(device)
mask1 = (mask > 0.5)
mask1 = mask1.float()
input_image *= mask1
image_embed = torch.cat(args.batch_size*2*[input_image], dim=0).float()
elif model_params['image_condition']:
# using inpaint model but no image is provided
image_embed = torch.zeros(args.batch_size*2, 4, args.height//8, args.width//8, device=device)
kwargs = {
"context": torch.cat([text_emb, text_blank], dim=0).float(),
"clip_embed": torch.cat([text_emb_clip, text_emb_clip_blank], dim=0).float() if model_params['clip_embed_dim'] else None,
"image_embed": image_embed
}
# Create a classifier-free guidance sampling function
def model_fn(x_t, ts, **kwargs):
half = x_t[: len(x_t) // 2]
combined = torch.cat([half, half], dim=0)
model_out = model(combined, ts, **kwargs)
eps, rest = model_out[:, :3], model_out[:, 3:]
cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)
half_eps = uncond_eps + args.guidance_scale * (cond_eps - uncond_eps)
eps = torch.cat([half_eps, half_eps], dim=0)
return torch.cat([eps, rest], dim=1)
cur_t = None
def cond_fn(x, t, context=None, clip_embed=None, image_embed=None):
with torch.enable_grad():
x = x[:args.batch_size].detach().requires_grad_()
n = x.shape[0]
my_t = torch.ones([n], device=device, dtype=torch.long) * cur_t
kw = {
'context': context[:args.batch_size],
'clip_embed': clip_embed[:args.batch_size] if model_params['clip_embed_dim'] else None,
'image_embed': image_embed[:args.batch_size] if image_embed is not None else None
}
out = diffusion.p_mean_variance(model, x, my_t, clip_denoised=False, model_kwargs=kw)
fac = diffusion.sqrt_one_minus_alphas_cumprod[cur_t]
x_in = out['pred_xstart'] * fac + x * (1 - fac)
x_in /= 0.18215
x_img = ldm.decode(x_in)
clip_in = normalize(make_cutouts(x_img.add(1).div(2)))
clip_embeds = clip_model.encode_image(clip_in).float()
dists = spherical_dist_loss(clip_embeds.unsqueeze(1), text_emb_clip.unsqueeze(0))
dists = dists.view([args.cutn, n, -1])
losses = dists.sum(2).mean(0)
loss = losses.sum() * args.clip_guidance_scale
return -torch.autograd.grad(loss, x)[0]
if args.ddpm:
sample_fn = diffusion.ddpm_sample_loop_progressive
elif args.ddim:
sample_fn = diffusion.ddim_sample_loop_progressive
else:
sample_fn = diffusion.plms_sample_loop_progressive
def save_sample(i, sample, clip_score=False):
for k, image in enumerate(sample['pred_xstart'][:args.batch_size]):
image /= 0.18215
im = image.unsqueeze(0)
out = ldm.decode(im)
out = TF.to_pil_image(out.squeeze(0).add(1).div(2).clamp(0, 1))
out.save(f'{k}-{args.image_file}')
imageLocationInternal.append(f'{k}-{args.image_file}')
if clip_score:
image_emb = clip_model.encode_image(clip_preprocess(out).unsqueeze(0).to(device))
image_emb_norm = image_emb / image_emb.norm(dim=-1, keepdim=True)
similarity = torch.nn.functional.cosine_similarity(image_emb_norm, text_emb_norm, dim=-1)
final_filename = f'output/{args.prefix}_{similarity.item():0.3f}_{i * args.batch_size + k:05}.png'
#os.rename(filename, final_filename)
npy_final = f'output_npy/{args.prefix}_{similarity.item():0.3f}_{i * args.batch_size + k:05}.npy'
#os.rename(npy_filename, npy_final)
if args.init_image:
init = Image.open(args.init_image).convert('RGB')
init = init.resize((int(args.width), int(args.height)), Image.LANCZOS)
init = TF.to_tensor(init).to(device).unsqueeze(0).clamp(0,1)
h = ldm.encode(init * 2 - 1).sample() * 0.18215
init = torch.cat(args.batch_size*2*[h], dim=0)
else:
init = None
print(init)
#image_display = Output()
for i in range(args.num_batches):
cur_t = diffusion.num_timesteps - 1
total_steps = cur_t
status.write("Starting the execution...")
samples = sample_fn(
model_fn,
(args.batch_size*2, 4, int(args.height/8), int(args.width/8)),
clip_denoised=False,
model_kwargs=kwargs,
cond_fn=cond_fn if args.clip_guidance else None,
device=device,
progress=True,
init_image=init,
skip_timesteps=args.skip_timesteps if init is not None else 0,
)
itt = 0
before_start_time = time.perf_counter()
bar_container = status.container()
iteration_counter = bar_container.empty()
progress_bar = bar_container.progress(0)
for j, sample in enumerate(samples):
if(itt==0):
iteration_counter.empty()
imageLocation = stoutput.empty()
#for _ in range(args.batch_size):
# imageLocationInternal.append(stoutput.empty())
cur_t -= 1
if j % 5 == 0 and j != diffusion.num_timesteps - 1:
imageLocationInternal = []
#sample.save(args.image_file)
save_sample(i, sample)
imageLocation.image(imageLocationInternal)
itt += 1
time_past_seconds = time.perf_counter() - before_start_time
iterations_per_second = itt / time_past_seconds
time_left = (total_steps - itt) / iterations_per_second
percentage = round((itt / (total_steps + 1)) * 100)
iteration_counter.write(
f"{percentage}% {itt}/{total_steps+1} [{time.strftime('%M:%S', time.gmtime(time_past_seconds))}<{time.strftime('%M:%S', time.gmtime(time_left))}, {round(iterations_per_second,2)} it/s]"
)
progress_bar.progress(int(percentage))
#save_sample(i, sample, args.clip_score)
if not path_exists(DefaultPaths.output_path):
os.makedirs(DefaultPaths.output_path)
save_filename = f"{DefaultPaths.output_path}/{sanitize_filename(args.text)} [GLID-3 XL] {args.seed}.png"
for k in range(args.batch_size):
shutil.copyfile(
f'{k}-{args.image_file}',
f'{save_filename[ : -4]}-{k}.png',
)
imageLocation.empty()
status.write("Done!")
gc.collect()
do_run()
last_model = args.model_path |
from django.db import models
# Create your models here.
# 首页轮播数据
class Wheel(models.Model):
img = models.CharField(max_length=150)
name = models.CharField(max_length=20)
trackid = models.CharField(max_length=20)
# 首页导航数据
class Nav(models.Model):
img = models.CharField(max_length=150)
name = models.CharField(max_length=20)
trackid = models.CharField(max_length=20)
# 首页小轮播
class Mustbuy(models.Model):
img = models.CharField(max_length=150)
name = models.CharField(max_length=20)
trackid = models.CharField(max_length=20)
# 首页便利店 块等数据
class Shop(models.Model):
img = models.CharField(max_length=150)
name = models.CharField(max_length=20)
trackid = models.CharField(max_length=20)
# 主要信息
class MainShow(models.Model):
trackid = models.CharField(max_length=10)
name = models.CharField(max_length=20)
img = models.CharField(max_length=100)
categoryid = models.CharField(max_length=10)
brandname = models.CharField(max_length=20)
img1 = models.CharField(max_length=100)
childcid1 = models.CharField(max_length=10)
productid1 = models.CharField(max_length=10)
longname1 = models.CharField(max_length=50)
price1 = models.CharField(max_length=10)
marketprice1 = models.CharField(max_length=10)
img2 = models.CharField(max_length=100)
childcid2 = models.CharField(max_length=10)
productid2 = models.CharField(max_length=10)
longname2 = models.CharField(max_length=50)
price2 = models.CharField(max_length=10)
marketprice2 = models.CharField(max_length=10)
img3 = models.CharField(max_length=100)
childcid3 = models.CharField(max_length=10)
productid3 = models.CharField(max_length=10)
longname3 = models.CharField(max_length=50)
price3 = models.CharField(max_length=10)
marketprice3 = models.CharField(max_length=10)
# 分类模型
class FoodTypes(models.Model):
typeid = models.CharField(max_length=10)
typename = models.CharField(max_length=20)
typesort = models.IntegerField()
childtypenames = models.CharField(max_length=150)
# 商品模型类
class Goods(models.Model):
# 商品id
productid = models.CharField(max_length=10)
# 商品图片
productimg = models.CharField(max_length=150)
# 商品名称
productname = models.CharField(max_length=50)
# 商品长名称
productlongname = models.CharField(max_length=100)
# 是否精选
isxf = models.NullBooleanField(default=False)
# 是否买一赠一
pmdesc = models.CharField(max_length=10)
# 规格
specifics = models.CharField(max_length=20)
# 价格
price = models.CharField(max_length=10)
# 超市价格
marketprice = models.CharField(max_length=10)
# 组id
categoryid = models.CharField(max_length=10)
# 子类组id
childcid = models.CharField(max_length=10)
# 子类组名称
childcidname = models.CharField(max_length=10)
# 详情页id
dealerid = models.CharField(max_length=10)
# 库存
storenums = models.IntegerField()
# 销量
productnum = models.IntegerField()
# 购买者
# 简易用户模型
class userinfo(models.Model):
# 用户账户 要唯一性
useraccount = models.CharField(max_length=20,unique=True)
# 密码
upassword = models.CharField(max_length=300)
# 昵称
username = models.CharField(max_length=20)
# 手机号
userphone = models.CharField(max_length=20)
#用户地址
useradderss = models.CharField(max_length=200,null=False) #这里的null=False就是默认使当前字段为空
# 这个是错误的
class cart(models.Model):
userccount = models.CharField(max_length=20)
usergoods = models.CharField(max_length=20)
# 购物车1.0版本
class NewCart(models.Model):
nccount = models.CharField(max_length=20)
ngoodsid = models.CharField(max_length=20)
# 购物车2.0版本
class TwoCart(models.Model):
tccount = models.CharField(max_length=20)
tgoodid = models.CharField(max_length=20)
# 新增数量字段
tgoodnum = models.IntegerField()
# 购物车3.0版本
class Xcart(models.Model):
tccount = models.CharField(max_length=20)
tgoodid = models.ForeignKey(Goods)
# 新增数量字段
tgoodnum = models.IntegerField()
|
from enum import Enum
import re
class OperatingSystemClasses(Enum):
#ENUM:START
DESKTOP = 1
SERVER = 2
EMBEDDED = 3
#ENUM:END
def parse(self, string):
for j in OperatingSystemClasses:
if (string.lower()).find(j.name.lower())>=0:
return j
return OperatingSystemClasses.DESKTOP
|
import requests
import re
import datetime
from bs4 import BeautifulSoup
import pandas as pd
C_NEWS = "https://cnews.ru/search"
def get_links_with_dates(keyword):
r = requests.get(C_NEWS, params={"search": keyword})
soup = BeautifulSoup(r.text, 'html.parser')
urls = list(map(lambda x: 'https:' + x["href"], soup.find_all("a", {"class": "ani-postname"})))
dates = [x.text for x in soup.find_all("span", {"class": "ani-date"})]
return urls, dates
def get_text(url):
r = requests.get(url)
text = ""
try:
news_soup = BeautifulSoup(r.text, 'html.parser').find("article")
ps = news_soup.find_all("p")
for p in ps:
text += p.text.strip()
except Exception as e:
print(url)
text = text.replace("\n", " ")
return text
def get_all_texts_with_dates(keyword):
urls, dates = get_links_with_dates(keyword)
all_texts = []
for u in range(len(urls)):
if (not "https://softline" in urls[u]) and (not "https://events" in urls[u]):
text = get_text(urls[u]).strip()
date = dates[u]
one_article = {'date': date, 'text': text}
all_texts.append(one_article)
return all_texts
if __name__ == "__main__":
with open("input.txt", "r", encoding='utf-8') as f:
file = f.readlines()
for i in file:
print(get_all_texts_with_dates(i.strip()))
|
import torch
import numpy as np
import matplotlib.pyplot as plt
import os
import argparse
from matplotlib.animation import FuncAnimation
"""
Script to visualize trajectories. Automatically infer datatypes with the following heuristic:
state = plot x-y and values
1d: plot as lines
2d: plot as lines per step
3d: image.
"""
def init_plt(traj):
"""
Get figs ready for viz
"""
topics = list(traj['observation'].keys())
# topics = ['image_rgb', 'heightmap', 'rgbmap', 'imu']
n_panes = len(topics) + 2 #Add an extra for top-down traj, actions
M = int(n_panes//2) + (n_panes%2)
N = 2
# M = n_panes
# N = 1
fig, axs = plt.subplots(N, M, figsize = (M*4 + 1, N*4 + 1))
return fig, axs.flatten(), topics
def make_plot(traj, t, topics, fig, axs):
for ax in axs:
ax.cla()
# ax.set_aspect(1.)
for ax, topic in zip(axs, topics):
mode = len(traj['observation'][topic][t].shape)
if 'map' in topic:
mode = 4
if topic == 'state':
plot_data(traj['observation'][topic][:, 2:], t, mode, fig, ax)
else:
plot_data(traj['observation'][topic], t, mode, fig, ax)
ax.set_title(topic)
#Plot traj and acts
axs[-2].set_title('Traj')
# start = max(0, t-50)
start = 0
xs = traj['observation']['state'][start:t+1, 0]
ys = traj['observation']['state'][start:t+1, 1]
axs[-2].plot(xs, ys, marker='.', c='r')
axs[-2].scatter(xs[0], ys[0], marker='^', label='start', c='b')
axs[-2].scatter(xs[-1], ys[-1], marker='x', label='current', c='b')
axs[-2].legend()
if xs.max() - xs.min() < 5:
axs[-2].set_xlim(xs.mean() - 5, xs.mean() + 5)
if ys.max() - ys.min() < 5:
axs[-2].set_ylim(ys.mean() - 5, ys.mean() + 5)
axs[-1].set_title('Cmds')
throttle = traj['action'][start:t+1, 0]
steer = traj['action'][start:t+1, 1]
axs[-1].plot(throttle, label='throttle', linewidth=3.)
axs[-1].plot(steer, label='steer', linewidth=3.)
axs[-1].legend()
axs[-1].set_ylim(-1.1, 1.1)
# if t > 30:
# import pdb;pdb.set_trace()
def plot_data(data, t, mode, fig, ax):
start = max(0, t-50)
if mode == 1:
#Plot state history
ax.plot(data[start:t+1])
elif mode == 2:
if data[t].shape[-1] == 6:
ax.plot(data[t, :, 3:], linewidth=3)
else:
ax.plot(data[t], linewidth=3)
elif mode == 3:
ax.imshow(data[t].permute(1, 2, 0).squeeze()[:, :, [2, 1, 0]])
elif mode == 4:
if data[t].shape[0] == 3:
ax.imshow(data[t].permute(1, 2, 0).squeeze()[:, :, [2, 1, 0]].fliplr(), origin='lower')
elif data[t].shape[0] == 2:
ax.imshow(data[t].permute(1, 2, 0).squeeze()[:, :, 0].fliplr(), origin='lower')
else:
ax.imshow(data[t].permute(1, 2, 0).squeeze().fliplr(), origin='lower')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--traj_fp', type=str, required=True, help='The path to the <traj>.pt data file')
args = parser.parse_args()
traj = torch.load(args.traj_fp)
fig, axs, topics = init_plt(traj)
anim = FuncAnimation(fig, func = lambda t:make_plot(traj, t=t, topics=topics, fig=fig, axs=axs), frames=np.arange(traj['action'].shape[0]), interval=0.1*1000)
plt.show()
# anim.save('video.mp4')
|
"""Execute xCell transformation of gene expression.
This python package gives both a CLI interface and a python module to work with xCell in Python Pandas DataFrames.
Find the official R package here:
https://github.com/dviraran/xCell
And if you find this useful, please cite the authors' publication:
Aran D, Hu Z, Butte AJ. xCell: digitally portraying the tissue cellular heterogeneity landscape. Genome Biol. 2017 Nov 15;18(1):220. doi: 10.1186/s13059-017-1349-1. PubMed PMID: 29141660; PubMed Central PMCID: PMC5688663.
"""
import argparse, sys, os
import pandas as pd
from tempfile import mkdtemp, gettempdir
from subprocess import Popen, PIPE
def xCell(expression_df,
rnaseq=True,
scale=True,
alpha=0.5,
nperm=250,
parallel_sz=0,
verbose=False,
tempdir= None,
beta_pval=False,
perm_pval=False,
matrix=False
):
"""xCell function for use with pandas DataFrame objects
:param expression_df: REQUIRED: Expression data indexed on gene names column labels as sample ids
:type expression_df: pandas.DataFrame
:param parallel_sz: Number of processors to use when doing the calculations in parallel. This requires to previously load either the parallel or the snow library. If parallel is loaded and this argument is left with its default value (parallel_sz=0) then it will use all available core processors unless we set this argument with a smaller number. If snow is loaded then we must set this argument to a positive integer number that specifies the number of processors to employ in the parallel calculation.
:type parallel_sz: int Default: 0
:param verbose: Gives information about each calculation step.
:type verbose: bool Default: False
:param tempdir: Location to write temporary files
:type tempdir: string Default: System Default
:returns: pandas.DataFrame
"""
if matrix and (beta_pval or perm_pval): raise ValueError("can't return pvalues as a matrix")
df = expression_df
if not tempdir:
tempdir = mkdtemp(prefix="weirathe.",dir=gettempdir().rstrip('/'))
if verbose:
sys.stderr.write("Caching to "+tempdir+"\n")
## Remove genes from the genesets that do not occur in the dataset
#members = gmt_df['member'].unique()
#missing = set(members)-set(df.index)
#original = df.index
#if len(missing) > 0:
# if verbose: sys.stderr.write("WARNING removing "+str(len(missing))+\
# " genes from gene sets that don't exist in the data\n"+\
# ",".join(sorted(list(missing)))+"\n")
#gmt_df = gmt_df[~gmt_df['member'].isin(list(missing))]
## Write our gene sets
#gmt_df = gmt_df.groupby(['name']).\
# apply(lambda x: "\t".join(sorted(list(x['member'])))).reset_index().rename(columns={0:'members'})
#of = open(os.path.join(tempdir,"gs.gmt"),'w')
#for row in gmt_df.itertuples():
# name = row.name
# description = 'description'
# fields = row.members
# of.write(name+"\t"+description+"\t"+fields+"\n")
#of.close()
df.to_csv(os.path.join(tempdir,"expr.csv"))
cur = os.path.dirname(os.path.realpath(__file__))
rscript = os.path.join(cur,"xcell.r")
cmd = ["Rscript",rscript]+[str(x) for x in \
[rnaseq,scale,alpha,nperm,parallel_sz,verbose,tempdir,beta_pval,perm_pval]]
if verbose: sys.stderr.write(" ".join(cmd)+"\n")
sp = Popen(cmd,stdout=PIPE,stderr=PIPE)
if not verbose: sp.communicate()
else:
for line in sp.stderr: sys.stderr.write(line.decode('utf-8'))
if verbose: sys.stderr.write("finished R script\n")
output1 = pd.read_csv(os.path.join(tempdir,"pathways.csv"),index_col=0)
output1.index.name = 'name'
if matrix: return output1
df = output1.unstack().reset_index().rename(columns={0:'score','level_0':'sample'})
if beta_pval:
output2 = pd.read_csv(os.path.join(tempdir,"beta.csv"),index_col=0)
output2.index.name = 'name'
output2.columns = output1.columns
d2 = output2.unstack().reset_index().rename(columns={0:'beta_pval','level_0':'sample'})
df = df.merge(d2,on=['sample','name'])
if perm_pval:
output3 = pd.read_csv(os.path.join(tempdir,"randomP.csv"),index_col=0)
output3.index.name = 'name'
output3.columns = output1.columns
output4 = pd.read_csv(os.path.join(tempdir,"randomD.csv"),index_col=0)
output4.index.name = 'name'
d3 = output3.unstack().reset_index().rename(columns={0:'permute_pval','level_0':'sample'})
df = df.merge(d3,on=['sample','name'])
return df
def __cli():
args = __do_inputs()
# Now read in the input files for purposes of standardizing inputs
df = None
if args.tsv_in:
df = pd.read_csv(args.input,sep="\t",index_col=0)
else:
df = pd.read_csv(args.input,index_col=0)
result = xCell(df,
rnaseq=args.rnaseq,
scale=args.scale,
alpha=args.alpha,
nperm=args.nperm,
parallel_sz=args.parallel_sz,
verbose=args.verbose,
tempdir=args.tempdir,
beta_pval=args.beta_pval,
perm_pval=args.perm_pval,
matrix=args.matrix
)
sep = ','
use_index = False
if args.matrix: use_index = True
if args.tsv_out: sep = "\t"
if args.output:
result.to_csv(args.output,sep=sep,index=use_index)
else:
result.to_csv(os.path.join(args.tempdir,'final.csv'),sep=sep,index=use_index)
with open(os.path.join(args.tempdir,'final.csv')) as inf:
for line in inf:
sys.stdout.write(line)
def __do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="Execute R xCell",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
group0 = parser.add_argument_group('Input options')
group0.add_argument('input',help="Use - for STDIN")
group0.add_argument('--tsv_in',action='store_true',help="Exepct CSV by default, this overrides to tab")
group2 = parser.add_argument_group('Output options')
group2.add_argument('--tsv_out',action='store_true',help="Override the default CSV and output TSV")
group2.add_argument('--output','-o',help="Specifiy path to write transformed data")
group2.add_argument('--meta_output',help="Speciify path to output additional run information")
group2.add_argument('--matrix',action='store_true',help="Output results as a matrix")
group4 = parser.add_argument_group("Add pvalue calculations. Cannot output as matrix. These will be added as columns to the DataFrame")
group4.add_argument('--beta_pval',action='store_true',help="output the beta pvalue")
group4.add_argument('--perm_pval',action='store_true',help="output the random permutation pvalue")
group1 = parser.add_argument_group('command options')
parallel_sz_str = '''
Number of processors to use when doing the calculations in parallel. This requires
to previously load either the parallel or the snow library. If parallel is
loaded and this argument is left with its default value (parallel_sz=0) then it
will use all available core processors unless we set this argument with a smaller
number. If snow is loaded then we must set this argument to a positive integer
number that specifies the number of processors to employ in the parallel
calculation.
'''
group1.add_argument('--parallel_sz',type=int,default=0,help=parallel_sz_str)
verbose_str = '''
Gives information about each calculation step.
'''
group1.add_argument('--verbose',action='store_true',help=verbose_str)
rnaseq_str = '''
Inputs are RNAseq.
'''
group1.add_argument('--rnaseq',type=bool, default=True,help=rnaseq_str)
scale_str = '''
Scaling transforms with fit.vals.
'''
group1.add_argument('--scale',type=bool, default=True,help=scale_str)
alpha_str = '''
Value to override spillover alpha parameter.
'''
group1.add_argument('--alpha',type=float, default=0.5,help=alpha_str)
nperm_str = '''
Number of random resamplings.
'''
group1.add_argument('--nperm',type=int, default=250,help=nperm_str)
# Temporary working directory step 1 of 3 - Definition
label4 = parser.add_argument_group(title="Temporary folder parameters")
group3 = label4.add_mutually_exclusive_group()
group3.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group3.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
if args.matrix and (args.beta_pval or args.perm_pval): raise ValueError("can't return pvalues in a matrix.")
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
if __name__=="__main__":
__cli()
|
from django.apps import AppConfig
class MastersConfig(AppConfig):
name = 'Masters'
|
"""
MIT License
Copyright (c) 2021 Arbri Chili
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from src.image_processing.process_images import load_images_from_folder
import os
import h5py
import numpy as np
def slice_array(array, num_rows, num_cols):
h, w = array.shape
return (array.reshape(h // num_rows, num_rows, -1, num_cols)
.swapaxes(1, 2)
.reshape(-1, num_rows, num_cols))
def chop_arrays_to_grids(block_length, folder):
images = load_images_from_folder(folder)
images = [i[0] for i in images] # Remove filenames
grids = []
# Create list of smaller arrays from the larger array
for i in images:
chopped_np_arrays = [slice_array(i[:, :, x], block_length, block_length) for x in range(3)]
gbr = np.stack(chopped_np_arrays, axis=3)
grids.append(gbr)
# Combine the numpy arrays into a list
square_grids = np.stack([i for i in grids])
return square_grids
def save_to_hf(filename, folder, data_grid):
for files in os.listdir(folder):
if files == filename:
with h5py.File(filename, "w") as f:
f.clear()
group = f.create_group("colour_images")
group.create_dataset("test_data", data=data_grid)
return
print("No file found of name: " + filename + "\n please create data.h5 file")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import TypedJS
from .settings import app_settings
class TypedJSPlugin(CMSPluginBase):
name = _('Typed.js')
module = _('Typed.js')
model = TypedJS
render_template = 'djangocms_typedjs/_typedjs.html'
cache = False
allow_children = True
child_classes = ['TextPlugin', ]
text_enabled = True
def render(self, context, instance, placeholder):
context = super(TypedJSPlugin, self).render(context, instance, placeholder)
context['app_settings'] = app_settings
return context
def icon_src(self, instance):
return settings.STATIC_URL + 'djangocms_typedjs/images/djangocms-typedjs-icon.png'
def icon_alt(self, instance):
return u'Django CMS Typed.js plugin: %s' % instance.name
plugin_pool.register_plugin(TypedJSPlugin)
|
expected_output = {
'id':{
2147483659: {
'encoding': 'encode-xml',
'filter': {
'filter_type': 'xpath',
'xpath': '/if:interfaces-state/interface/oper-status'
},
'legacy_receivers': {
'10.69.35.35': {
'port': 45128,
'protocol': 'netconf'
}
},
'state': 'Valid',
'stream': 'yang-push',
'update_policy': {
'period': 1000,
'update_trigger': 'periodic'
}
}
}
} |
# Generated by Django 3.2 on 2021-04-18 20:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oauth', '0007_profile_profile_photo'),
]
operations = [
migrations.AlterModelOptions(
name='profile',
options={'ordering': ['-points']},
),
migrations.AddField(
model_name='profile',
name='friends',
field=models.ManyToManyField(null=True, related_name='_oauth_profile_friends_+', to='oauth.Profile', verbose_name='Friend list'),
),
]
|
#python3
from time import sleep
from random import randint
import unicornhat as uh
def rain():
uh.set_layout(uh.PHAT)
uh.brightness(0.5)
r,g,b = 0,0,255
x = randint(0,7)
for y in range(3,-1,-1):
uh.clear()
uh.set_pixel(x,y,r,g,b)
uh.show()
sleep(.2)
return
def sun():
uh.set_layout(uh.PHAT)
uh.brightness(1)
uh.clear()
r,g,b = 255,255,100 #yellow
for x in range(8):
uh.set_pixel(x,3,r,g,b)
for x in range(1,7):
uh.set_pixel(x,2,r,g,b)
for x in range(1,7):
uh.set_pixel(x,1,r,g,b)
r,g,b = 200,100,0 #orange
uh.set_pixel(0,2,r,g,b)
uh.set_pixel(7,2,r,g,b)
uh.set_pixel(0,1,r,g,b)
uh.set_pixel(7,1,r,g,b)
for x in range(2,6):
uh.set_pixel(x,0,r,g,b)
r,g,b = 225,100,100 #red
uh.set_pixel(0,1,r,g,b)
uh.set_pixel(7,1,r,g,b)
uh.set_pixel(0,0,r,g,b)
uh.set_pixel(1,0,r,g,b)
uh.set_pixel(6,0,r,g,b)
uh.set_pixel(7,0,r,g,b)
uh.show()
return
def cloudy():
uh.set_layout(uh.PHAT)
uh.brightness(1)
uh.clear()
r,g,b = 0,50,100 # grey blue
for x in range(8):
uh.set_pixel(x,3,r,g,b)
r,g,b = 64,64,64 # grey blue
for y in range(1,3):
for x in range(8):
uh.set_pixel(x,y,r,g,b)
r,g,b = 0,50,0 # green
for x in range(8):
uh.set_pixel(x,0,r,g,b)
uh.show()
return
if __name__ == '__main__':
while True:
cloudy()
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
"""
- request drf封装的请求
- view 应用于那个视图
- obj 对象实例
# SAFE_METHODS包含了'GET', 'HEAD', 'OPTIONS'这三种方法,
# 这三个方法是安全的,下边这个判断就是如果请求的方法在这个里边
# 就返回True
"""
if request.method in permissions.SAFE_METHODS:
return True
# 当前登录的用户与发布的是同一个人比较结果返回
return obj.owner == request.user
class IsAdminUserOrReadOnly(permissions.BasePermission):
"""
超级管理员具有添加,修改,删除的权限,get权限不受控制
"""
def has_permission(self, request, view):
if request.method in permissions.SAFE_METHODS:
return True
return bool(request.user and request.user.is_staff) |
#!/usr/bin/env python3
import sys
sys.path.append('../../../..')
sys.path.append('..')
import numpy as np
import numpy.random as ra
import xarray as xr
import torch
from maker import make_model, load_data, sf
from pyoptmat import optimize
from tqdm import tqdm
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import time
# Use doubles
torch.set_default_tensor_type(torch.DoubleTensor)
# Run on GPU!
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
# Run on CPU for running multiple jobs on workstation
# Optimal number of threads is about 4 on our machine
dev = "cpu"
device = torch.device(dev)
# Don't try to optimize for the Young's modulus
def make(n, eta, s0, R, d, C, g, **kwargs):
return make_model(torch.tensor(0.5), n, eta, s0, R, d, C, g,
device = device, use_adjoint = True, **kwargs).to(device)
if __name__ == "__main__":
# 1) Load the data for the variance of interest,
# cut down to some number of samples, and flatten
scale = 0.05
nsamples = 20 # at each condition
times, strains, temperatures, true_stresses = load_data(scale, nsamples, device = device)
sf = 0.1
use = int(len(times)*sf)
# Move to device and curtail to some number of steps
times = times[:use]
strains = strains[:use]
temperatures = temperatures[:use]
true_stresses = true_stresses[:use]
# 2) Setup names for each parameter and the initial conditions
names = ["n", "eta", "s0", "R", "d", "C", "g"]
ics = [ra.uniform(0,1) for i in range(len(names[:-2]))]
ics += [ra.uniform(0,1,size=(3,)), ra.uniform(0,1,size=3)]
# 3) Create the actual model
model = optimize.DeterministicModel(make, names, ics).to(device)
# 4) Run some number of times
loss = torch.nn.MSELoss(reduction = 'sum')
t1 = time.time()
niter = 2
t = tqdm(range(niter), total = niter)
for i in t:
model.zero_grad()
pred = model(times, strains, temperatures)
lossv = loss(pred, true_stresses)
lossv.backward()
te = time.time() - t1
print("Total run time: %f s" % te)
|
import argparse
import time
from PIL import Image
import tflite_runtime.interpreter as tflite
import platform
EDGETPU_SHARED_LIB = {
'Linux': 'libedgetpu.so.1',
'Darwin': 'libedgetpu.1.dylib',
'Windows': 'edgetpu.dll'
}[platform.system()]
def make_interpreter(model_file):
model_file, *device = model_file.split('@')
return tflite.Interpreter(
model_path=model_file,
experimental_delegates=[
tflite.load_delegate(EDGETPU_SHARED_LIB,
{'device': device[0]} if device else {})
])
def input_details(interpreter, key):
"""Returns input details by specified key."""
return interpreter.get_input_details()[0][key]
def input_tensor(interpreter):
"""Returns input tensor view as numpy array of shape (height, width, 3)."""
tensor_index = input_details(interpreter, 'index')
return interpreter.tensor(tensor_index)()[0]
def set_input(interpreter, data):
"""Copies data to input tensor."""
input_tensor(interpreter)[:, :] = data
interpreter = make_interpreter("converted/deeprehab_edgetpu.tflite")
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
image = Image.open("test.jpg").convert('RGB').resize((width, height), Image.ANTIALIAS)
set_input(interpreter, image)
print('----INFERENCE TIME----')
print('Note: The first inference on Edge TPU is slow because it includes',
'loading the model into Edge TPU memory.')
for _ in range(5):
start = time.perf_counter()
interpreter.invoke()
inference_time = time.perf_counter() - start
#classes = classify.get_output(interpreter, args.top_k, args.threshold)
print('%.1fms' % (inference_time * 1000)) |
from faker import Faker
header = {
"base_url": "",
"Sandbox-Key": str(Faker().text()),
"Content-Type": "application/json"
}
body = {
"AccessTokenGenerator": {
"client_secret": str(Faker().text()),
"client_id": str(Faker().text()),
"grant_type": str(Faker().text()),
"username": str(Faker().text()),
"password": str(Faker().text())
},
"AccountEnquiry": {
"accountNumber": str(Faker().credit_card_number()),
"accountType": str(Faker().text())
},
"CustomerEnquiry": {
"accountNumber": str(Faker().credit_card_number()),
"accountType": str(Faker().text())
},
"CustomerAndAccountEnquiry": {
"accountNumber": str(Faker().credit_card_number()),
"accountType": str(Faker().text())
},
"ChangeUserCredentials": {
"username": str(Faker().name()),
"oldPassword": str(Faker().text()),
"password": str(Faker().text()),
"moduleId": "UNION_ONE",
"clientSecret": str(Faker().text())
}
}
responses = {
"AccessTokenGenerator": {
"message": "OK",
"data": {
"access_token": str(Faker().text()),
"token_type": "bearer",
"refresh_token": str(Faker().text()),
"expires_in": Faker().random_int(),
"scope": "read"
}
},
"AccountEnquiry": {
"message": "OK",
"data": {
"code": "00",
"message": "Account Enquiry Successful",
"accountNumber": str(Faker().random_int()),
"accountName": str(Faker().name()),
"accountBranchCode": Faker().random_int(),
"customerNumber": str(Faker().credit_card_number()),
"accountClass": str(Faker().random_int()),
"accountCurrency": "NGN",
"accountType": "Current",
"availableBalance": str(Faker().random_int()),
"customerAddress": str(Faker().address()),
"customerEmail": str(Faker().email()),
"customerPhoneNumber": str(Faker().phone_number())
}
},
"CustomerEnquiry": {
"message": "OK",
"data": {
"code": "00",
"message": "Customer Enquiry Successful",
"country": str(Faker().country()),
"countryOfBirth": str(Faker().country()),
"dob": str(Faker().date()),
"nationality": "NG",
"lastName": str(Faker().name()),
"firstName": str(Faker().name()),
"otherNames": str(Faker().name()),
"customerType": "I",
"email": str(Faker().email()),
"phoneNumber": str(Faker().phone_number()),
"idType": "OTHERS",
"idNumber": str(Faker().random_int()),
"countryOfIssue": str(Faker().country()),
"effectiveDate": str(Faker().date()),
"expiryDate": str(Faker().date()),
"addressLine1": str(Faker().address()),
"addressLine2": str(Faker().address()),
"city": str(Faker().city()),
"state": str(Faker().state()),
"postalCode": str(Faker().random_int()),
"bvn": str(Faker().random_int())
}
},
"CustomerAndAccountEnquiry": {
"message": "OK",
"data": {
"code": "00",
"message": "Enquiry successful",
"account": {
"code": "00",
"message": "Account Enquiry Successful",
"accountNumber": str(Faker().random_int()),
"accountName": str(Faker().name()),
"accountBranchCode": Faker().random_int(),
"customerNumber": str(Faker().credit_card_number()),
"accountClass": str(Faker().random_int()),
"accountCurrency": "NGN",
"accountType": "Current",
"availableBalance": str(Faker().random_int()),
"customerAddress": str(Faker().address()),
"customerEmail": str(Faker().email()),
"customerPhoneNumber": str(Faker().phone_number())
},
"customer": {
"code": "00",
"message": "Customer Enquiry Successful",
"country": str(Faker().country()),
"countryOfBirth": str(Faker().country()),
"dob": str(Faker().date()),
"nationality": "NG",
"lastName": str(Faker().name()),
"firstName": str(Faker().name()),
"otherNames": str(Faker().name()),
"customerType": "I",
"email": str(Faker().email()),
"phoneNumber": str(Faker().phone_number()),
"idType": "OTHERS",
"idNumber": str(Faker().random_int()),
"countryOfIssue": str(Faker().country()),
"effectiveDate": str(Faker().date()),
"expiryDate": str(Faker().date()),
"addressLine1": str(Faker().address()),
"addressLine2": str(Faker().address()),
"city": str(Faker().city()),
"state": str(Faker().state()),
"postalCode": str(Faker().random_int()),
"bvn": str(Faker().random_int())
}
}
},
"ChangeUserCredentials": {
"message": "OK",
"data": {
"code": "00",
"message": "Password changes successfully",
"reference": str(Faker().text())
}
}
}
params = {
"access_token": str(Faker().text())
}
class R:
def __init__(self, text):
self.status_code = 200
self.text = text
def json(self):
return self.text
|
from unittest.mock import patch
from django.core.cache import cache
from django.test import TestCase
from constance.test import override_config
from django_redis import get_redis_connection
from crazyarms import constants
from .models import AudioAsset
@patch("autodj.models.random.sample", lambda l, n: list(l)[:n]) # Deterministic
class AntiRepeatTests(TestCase):
def setUp(self):
redis = get_redis_connection()
redis.flushdb()
@staticmethod
def create_assets(num_tracks_per_artist=1, num_artists=1):
assets = []
for a in range(num_artists):
for t in range(num_tracks_per_artist):
asset = AudioAsset(
title=f"T:{a * num_tracks_per_artist + t}",
artist=f"A:{a}",
status=AudioAsset.Status.READY,
)
asset.save()
assets.append(asset)
return assets
@staticmethod
def get_no_repeat_artists():
return cache.get(constants.CACHE_KEY_AUTODJ_NO_REPEAT_ARTISTS)
@staticmethod
def get_no_repeat_track_ids():
return cache.get(constants.CACHE_KEY_AUTODJ_NO_REPEAT_IDS)
@override_config(
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT=5,
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT_ARTIST=3,
)
def test_basic_no_repeat(self):
tracks = self.create_assets(2, 4)
played_track_names = [str(AudioAsset.get_next_for_autodj()) for _ in range(10)]
self.assertEqual(
played_track_names,
[
"A:0 - T:0",
"A:1 - T:2",
"A:2 - T:4",
"A:3 - T:6",
# We allow a repeat after 3 artists
"A:0 - T:1",
# We would have allowed a track repeat here, but that would involve artist repetation
"A:1 - T:3",
"A:2 - T:5",
"A:3 - T:7",
# Finally we get our repeats
"A:0 - T:0",
"A:1 - T:2",
],
)
self.assertEqual(self.get_no_repeat_track_ids(), [tracks[i].id for i in (2, 0, 7, 5, 3)])
self.assertEqual(
self.get_no_repeat_artists(),
[AudioAsset.normalize_artist(f"A:{a}") for a in (1, 0, 3)],
)
# Try one more and see if our cache values as expected
self.assertEqual(str(AudioAsset.get_next_for_autodj()), "A:2 - T:4")
self.assertEqual(self.get_no_repeat_track_ids(), [tracks[i].id for i in (4, 2, 0, 7, 5)])
self.assertEqual(
self.get_no_repeat_artists(),
[AudioAsset.normalize_artist(f"A:{a}") for a in (2, 1, 0)],
)
@override_config(
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT=0,
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT_ARTIST=0,
)
def test_disabled_when_set_to_zero(self):
self.create_assets(2, 2)
played_track_names = [str(AudioAsset.get_next_for_autodj()) for _ in range(3)]
self.assertEqual(played_track_names, ["A:0 - T:0"] * 3)
self.assertIsNone(self.get_no_repeat_artists())
self.assertIsNone(self.get_no_repeat_track_ids())
@override_config(
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT=0,
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT_ARTIST=2,
)
def test_no_artist_repeats_only(self):
self.create_assets(2, 3)
played_track_names = [str(AudioAsset.get_next_for_autodj()) for _ in range(5)]
self.assertEqual(
played_track_names,
["A:0 - T:0", "A:1 - T:2", "A:2 - T:4", "A:0 - T:0", "A:1 - T:2"],
)
@override_config(
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT=2,
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT_ARTIST=3,
)
def test_no_track_repeats_only(self):
self.create_assets(3, 1)
played_track_names = [str(AudioAsset.get_next_for_autodj()) for _ in range(5)]
self.assertEqual(
played_track_names,
["A:0 - T:0", "A:0 - T:1", "A:0 - T:2", "A:0 - T:0", "A:0 - T:1"],
)
@override_config(
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT=5,
AUTODJ_ANTI_REPEAT_NUM_TRACKS_NO_REPEAT_ARTIST=3,
)
def test_corner_cases_when_anti_repeat_not_possible(self):
# No assets exist
with self.assertLogs("crazyarms.autodj.models", level="INFO") as logger:
self.assertIsNone(AudioAsset.get_next_for_autodj())
self.assertEqual(
logger.output,
["WARNING:crazyarms.autodj.models:autodj: no assets exist (no min/max id), giving up early"],
)
# Should only work on status = READY
AudioAsset(status=AudioAsset.Status.PENDING).save()
with self.assertLogs("crazyarms.autodj.models", level="INFO") as logger:
self.assertIsNone(AudioAsset.get_next_for_autodj())
self.assertEqual(
logger.output,
["WARNING:crazyarms.autodj.models:autodj: no assets exist, giving up early"],
)
self.create_assets(2, 1)
with self.assertLogs("crazyarms.autodj.models", level="INFO") as logger:
self.assertEqual(str(AudioAsset.get_next_for_autodj()), "A:0 - T:0")
self.assertEqual(logger.output, ["INFO:crazyarms.autodj.models:autodj: selected A:0 - T:0"])
with self.assertLogs("crazyarms.autodj.models", level="INFO") as logger:
self.assertEqual(str(AudioAsset.get_next_for_autodj()), "A:0 - T:1")
self.assertEqual(
logger.output,
[
"WARNING:crazyarms.autodj.models:autodj: no track found, attempting to run with artist repeats",
"INFO:crazyarms.autodj.models:autodj: selected A:0 - T:1",
],
)
with self.assertLogs("crazyarms.autodj.models", level="INFO") as logger:
self.assertEqual(str(AudioAsset.get_next_for_autodj()), "A:0 - T:0")
self.assertEqual(
logger.output,
[
"WARNING:crazyarms.autodj.models:autodj: no track found, attempting to run with artist repeats",
"WARNING:crazyarms.autodj.models:autodj: no track found, attempting to run with artist and track"
" repeats",
"INFO:crazyarms.autodj.models:autodj: selected A:0 - T:0",
],
)
|
#!/usr/bin/env python
'''
Abstract UNIX daemon process.
The Daemon abstract class handles starting, stopping, and (re)configuring
a daemon process. The class prevents running multiple instances through
the use of a /var/run/<daemon>.pid lock file. The class intercepts SIGHUP
and sets the object's reconfigure variable. Similiarly SIGTERM and SIGINT
set the shutdown variable.
This module will become obsolete if/when PEP 3143 (Standard daemon
process library) is added to the standard library or python-daemon
is ported to ArchLinux.
Users of this module must subclass Daemon and define the configure()
and run() methods. See run() below for skeleton code.
See Linux Programming Interface, Michael Kerrisk, No Starch Press, 2010.
'''
import errno
import logging
import os
import signal
import sys
PID_PATHNAME = '/var/run/{name}.pid'
_log = logging.getLogger(__name__)
_daemons = []
class Daemon(object):
'''
'''
def __init__(self, name):
'''
Initialize the daemon.
'''
self.name = name
self.pidfile = PidFile(name)
self.shutdown = False
self.reconfigure = False
def configure(self):
'''
(Re)configure the daemon.
This method is called just before starting the daemon and
in response to SIGHUP (i.e. kill -1). Usually daemons will
reread their config files and reinitialize themselved when
this method is called.
Returns True if the daemon was successfully configured, False otherwise
* You should override this method.
'''
return True
def run(self):
'''
Run the daemon.
This method implements the actual daemon. It is subclassed
by the daemon author and only invoked by the start method.
Do not invoke this from anywhere except the start method.
By the time this method is called, the daemon will be configured,
it will be daemonized (unless explicitly prevented), and will
have the logger configured and connected.
* You must override this method.
'''
while not self.shutdown:
if self.reconfigure:
self.configure()
self.reconfigure = False
# do work here
# wait for more work here by:
# 1. time.sleep() or
# 2. select.select() with a timeout
raise NotImplementedError()
def start(self, run_in_foreground=False):
'''
Start the daemon.
Command line interfaces should call this method to start the
daemon running.
If an error occurs starting the daemon, one of the following
integers is returned. If startup is successful, this method
never returns.
errno.EEXIST: the pidfile /var/run/<name>.pid exists and
the process it points to is running
errno.ESRCH: the pidfile exists but the process is
not running
'''
_log.info('starting {} daemon'.format(self.name))
# Make sure this is the only instance running
pid = os.getpid()
rc = self.pidfile.create(pid)
if rc != 0:
return rc
try:
# Daemonize unless told otherwise
if not run_in_foreground:
daemonize()
pid = os.getpid()
self.pidfile.write(pid)
_log.info('{} daemon started (pid={})'.format(self.name, pid))
# Register signal handlers
signal.signal(signal.SIGHUP, _handle_signals)
signal.signal(signal.SIGTERM, _handle_signals)
if run_in_foreground:
signal.signal(signal.SIGINT, _handle_signals)
_daemons.append(self)
# Run daemon
self.run()
except:
_log.critical('{} crashed'.format(self.name), exc_info=True)
finally:
self.pidfile.destroy()
_log.info('{} daemon stopped'.format(self.name))
sys.exit(0)
def stop(self):
'''
Stop the daemon and delete the pid file.
This method is not run in the daemon's process. It sends a
SIGTERM to the daemon process which sets the shutdown variable
which the daemon notices and exits.
'''
_log.info('stopping {} daemon'.format(self.name))
self._send_signal(signal.SIGTERM)
self.pidfile.destroy()
def reconfigure_daemon(self):
'''
Reconfigure the daemon.
This method is not run in the daemon's process. It sends a
SIGHUP to the daemon process which sets the reconfigure variable
which the daemon notices and then rereads its config file.
'''
_log.info('reconfiguring {} daemon'.format(self.name))
self._send_signal(signal.SIGHUP)
def _send_signal(self, signum):
'''
Send a signal to the daemon process(es).
signum = the signal number, e.g. signal.SIG*
'''
pid = self.pidfile.read()
if pid > 0:
_log.debug('send signal {} to {}'.format(signum, pid))
try:
os.kill(pid, signum)
except OSError, ex:
if ex.errno != errno.ESRCH:
raise
_log.debug('no such process: {}'.format(pid))
class PidFile(object):
'''
'''
def __init__(self, daemon_name):
'''
Initialize the path to the pid file.
'''
self.path = PID_PATHNAME.format(name=daemon_name)
def create(self, pid):
'''
Create the pid (lock) file.
Returns 0 if the file was created, errno.EEXIST if the pidfile
exists and the process is running, or errno.ESRCH if
the pidfile exists but the process isn't running.
'''
try:
_log.debug('creating lock file {}'.format(self.path))
fd = os.open(self.path,
os.O_WRONLY|os.O_CREAT|os.O_EXCL,
0644)
os.write(fd, '{}\n'.format(pid))
os.close(fd)
return 0
except OSError, ex:
if ex.errno == errno.EEXIST:
if self.is_process_running():
_log.error('daemon already running')
return errno.EEXIST
else:
_log.error('stale pidfile {}: no such process'
.format(self.path))
return errno.ESRCH
else:
_log.error('cannot create pidfile {}'.format(self.path),
exc_info=ex)
raise
def is_process_running(self):
'''
Is the process identified by the pid in the file still running?
Returns True or False.
'''
pid = self.read()
if pid != -1:
try:
os.kill(pid, 0)
return True
except OSError, ex:
if ex.errno != errno.ESRCH:
raise
return False
def destroy(self):
'''
Delete the pid file.
'''
try:
_log.debug('deleting pidfile {}'.format(self.path))
os.remove(self.path)
except OSError, ex:
if ex.errno != errno.ENOENT:
_log.debug('cannot delete pidfile {}'.format(self.path),
exc_info=ex)
raise
_log.debug('pidfile {} already deleted'.format(self.path))
def read(self):
'''
Read and return the integer pid in the file.
Returns a non-negative integer if pid can be read from file or
-1 if an error occurs reading the file or if the file
doesn't contain a non-negative integer.
'''
pid = -1
try:
with open(self.path, 'r') as fp:
buf = fp.read().strip()
_log.debug('pidfile {} contents: {}'.format(self.path, buf))
pid = int(buf)
if pid < 0:
_log.error('pidfile {} contains invalid pid {}'
.format(self.path, buf))
pid = -1
except (OSError, IOError), ex:
if ex.errno != errno.ENOENT:
_log.error('error reading pidfile {}'.format(self.path),
exc_info=ex)
raise
_log.debug('pidfile {} does not exist'.format(self.path))
except ValueError:
_log.error('pidfile {} contents not an integer: {}'
.format(self.path, buf))
return pid
def write(self, pid):
'''
Write a new pid to the pid file.
This method is called after a process becomes a daemon (where
the process is forked twice and has a new pid).
'''
with open(self.path, 'w') as fp:
fp.write('{}\n'.format(pid))
def daemonize():
'''
Daemonize the current process.
Daemonizing a process disconnects it from the session and terminal
that started the process so that the process isn't affected by events
in the starting shell (e.g. logout), doesn't disrupt the starting
shell (e.g. by printing to the terminal), and doesn't lock system
resources (e.g. by leaving cwd on a mounted file system or holding
open inherited file descriptors.
Raises OSError if process partially or completely fails to daemonize.
See Linux Programming Interface section 37.2 Creating a Daemon, p. 768
'''
# Ensure process isn't a process group leader
try:
_log.debug('daemonize: 1st fork')
pid = os.fork()
if pid != 0:
_log.debug('daemonize: 1st fork parent exits')
os._exit(0)
_log.debug('daemonize: 1st fork child continues')
except OSError, ex:
_log.error('daemonize: 1st fork failed', exc_info=ex)
raise
# Ensure process is in its own session and has no controlling terminal
_log.debug('daemonize: starting new session')
os.setsid()
# Ensure process is not session leader and can't acquire a controlling
# terminal
try:
_log.debug('daemonize: 2nd fork')
pid = os.fork()
if pid != 0:
_log.debug('daemonize: 2nd fork parent exits')
os._exit(0)
_log.debug('daemonize: 2nd fork child continues')
except OSError, ex:
_log.error('daemonize: 2nd fork failed', exc_info=ex)
raise
# Ensure files and directories are created with requested permissions
_log.debug('daemonize: set umask to 0')
os.umask(0)
# Ensure process is not preventing a file system from unmounting
_log.debug('daemonize: cd to /')
os.chdir('/')
# Ensure process doesn't retain open file descriptors
_log.debug('daemonize: close file descriptors')
for fd in _get_open_file_descriptors():
try:
os.close(fd)
except OSError, ex:
if ex.errno != errno.EBADF:
raise
# Ensure I/O to standard file descriptors is discarded
_log.debug('daemonize: redirect stdin, stdout, stderr to /dev/null')
if os.open('/dev/null', os.O_RDWR) != 0:
raise OSError('cannot redirect stdin to /dev/null')
os.dup2(0, 1)
os.dup2(0, 2)
def _get_open_file_descriptors():
'''
Return a list of open file descriptors.
Depending on what the system provides, this method either returns
exactly the open file descriptors or a list of possibly open
file descriptors. Exclude any file descriptors used by
non-console loggers.
'''
logging_fds = set()
for handler in logging.root.handlers:
if hasattr(handler, 'stream') and \
hasattr(handler.stream, 'fileno') and \
handler.stream.fileno() > 2:
logging_fds.add(handler.stream.fileno())
if os.path.isdir('/proc/self/fd'):
fds = set()
for fd in os.listdir('/proc/self/fd'):
fds.add(int(fd))
elif 'SC_OPEN_MAX' in os.sysconf_names:
fds = set(range(os.sysconf('SC_OPEN_MAX')))
else:
fds = set(range(8192))
return fds - logging_fds
def _handle_signals(signum, unused_frame):
'''
Signal handler for SIGHUP, SIGINT, and SIGTERM.
This method only sets shutdown and reconfigure variables in the
running daemon to avoid reentrancy errors described in
Linux Programming Interface, section 21.2.2, pp. 422-428.
'''
_log.debug('handle signal {}'.format(signum))
for daemon in _daemons:
if signum == signal.SIGHUP:
daemon.reconfigure = True
elif signum == signal.SIGTERM or signum == signal.SIGINT:
daemon.shutdown = True
|
# coding: utf-8
import time
import pytest
import logging
import hubblestack.status
log = logging.getLogger(__name__)
def sleep_and_return_time(amount=0.1):
time.sleep(amount)
return time.time()
def test_marks_and_timers():
with HubbleStatusContext('test1', 'test2') as hubble_status:
t0 = time.time()
mark1 = hubble_status.mark('test1')
t1 = sleep_and_return_time()
mark1.fin()
short_status = hubble_status.short()
assert tuple(short_status) == ('x.test1',)
assert short_status['x.test1']['count'] == 1
assert short_status['x.test1']['dt'] == pytest.approx(0.1, rel=1e2)
mark1_again = hubble_status.mark('test1')
assert mark1_again is mark1
mark2 = hubble_status.mark('test2')
t2 = sleep_and_return_time()
mark1_again.fin()
mark2.fin()
short_status = hubble_status.short()
assert set(short_status) == {'x.test1', 'x.test2'}
assert short_status['x.test1']['count'] == 2
assert short_status['x.test1']['dt'] == pytest.approx(0.1, rel=1e2)
assert short_status['x.test2']['count'] == 1
assert short_status['x.test2']['dt'] == pytest.approx(0.1, rel=1e2)
def test_max_depth():
# some constants
t0 = 1553102100
N = 100
B = 5
M = 10
with HubbleStatusContext('test1', bucket_len=B, max_buckets=M) as hubble_status:
# mark some status, pretending to move through time from t0
for t in range(t0, t0+N):
hubble_status.mark('test1', timestamp=t)
assert len(hubble_status.buckets()) == M
# now change the game somewhat every mark() checks the stack depth to make
# sure we save no more than max_buckets per status item. If we change the
# setting in the module's copy of __opts__, we should instantly see the
# buckets drop for 'test1' after a mark().
hubblestack.status.__opts__['hubble_status']['max_buckets'] = 3
hubble_status.mark('test1')
assert len(hubble_status.buckets()) == 3
def test_bucket_len():
# some constants
t0 = 1553102100
N = 100
B = 5
with HubbleStatusContext('test1', bucket_len=B) as hubble_status:
hubble_status.mark('test1')
# issue test1 mark N times, pretending one mark per second
# ranging from t0 to t0+(N-1)
for t in range(t0, t0+N):
hubble_status.mark('test1', timestamp=t)
# the list of bucket ids
buckets = hubble_status.buckets()
# compute the id of the bucket for the current time
actual_time = int(time.time())
very_last_bucket = actual_time - (actual_time % B)
# … of course, if we get really unlucky, we'll hit just the right time of
# day to rollover the short B second bucket window. Assuming that might happen,
# check for either:
assert buckets[-1] in (very_last_bucket - B, very_last_bucket, very_last_bucket + B)
c = 0
for i,bucket in enumerate(buckets[:-1]):
assert bucket == t0 + B*i
short_status = hubble_status.short(bucket)
if 'x.test1' in short_status:
assert set(short_status) == {'x.test1',}
assert short_status['x.test1']['bucket'] == bucket
c += short_status['x.test1']['count']
assert c == N
assert len(buckets) == N/B + 1
class HubbleStatusContext(object):
# The tests below really mess up hubble_status. They change settings and
# mess with a session global variable (HubbleStatus.dat). If we don't
# attempt to restore HubbleStatus.dat and hubblestack.status.__opts__,
# other tests in the test suite will likely fail.
#
# The python context manager will do nicely:
orig_dat = orig_opt = None
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __enter__(self):
log.debug("__enter__ nuking HubbleStatus.dat and tuning __opts__")
self.orig_dat = hubblestack.status.HubbleStatus.dat
self.orig_opt = hubblestack.status.__opts__.get('hubble_status')
# completely reset the status stack
hubblestack.status.HubbleStatus.dat = dict()
# setup opts
bucket_len = self.kwargs.pop('bucket_len', 30e6) # 30Msec is roughly a year‡
max_buckets = self.kwargs.pop('max_buckets', 1e3)
namespace = self.kwargs.pop('namespace', 'x')
if self.kwargs:
raise ValueError('unknown arguments: {}', ', '.join(self.kwargs.keys()))
opts = dict(bucket_len=bucket_len, max_buckets=max_buckets)
# setup hubble_status
hubblestack.status.__opts__['hubble_status'] = opts
# create and return
return hubblestack.status.HubbleStatus(namespace, *self.args)
def __exit__(self, *_):
log.debug("__exit__ restoring HubbleStatus.dat and repairing __opts__")
hubblestack.status.HubbleStatus.dat = self.orig_dat
if self.orig_opt is not None:
hubblestack.status.__opts__['hubble_status'] = self.orig_opt
# ‡ These time units are from Deepness in the Sky:
# 4ksec - roughly an hour
# 100ksec - sorta a day
# 600ksec - like a week
# 3Msec - kindof a month
# 30Msec - roughly a year
|
from tests.refactor.utils import RefactorTestCase
class TypeVariableTestCase(RefactorTestCase):
def test_type_assing_union(self):
actions = [
(
"""\
import typing
if typing.TYPE_CHECKING:
from PyQt5.QtWebEngineWidgets import QWebEngineHistory
from PyQt5.QtWebKit import QWebHistory
HistoryType = typing.Union['QWebEngineHistory', 'QWebHistory']
"""
),
(
"""\
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
from PyQt5.QtWebEngineWidgets import QWebEngineHistory
from PyQt5.QtWebKit import QWebHistory
HistoryType = Union['QWebEngineHistory', 'QWebHistory']
"""
),
(
"""\
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
from PyQt5 import QtWebEngineWidgets, QtWebKit
HistoryType = Union['QtWebEngineWidgets.QWebEngineHistory', 'QtWebKit.QWebHistory']
"""
),
]
for action in actions:
self.assertActionAfterRefactorEqualToAction(action)
def test_type_assing_list(self):
actions = [
(
"""\
import typing
if typing.TYPE_CHECKING:
from PyQt5.QtWebKit import QWebHistory
HistoryType = typing.List['QWebHistory']
"""
),
(
"""\
from typing import TYPE_CHECKING, List
if TYPE_CHECKING:
from PyQt5.QtWebKit import QWebHistory
HistoryType = List['QWebHistory']
"""
),
]
for action in actions:
self.assertActionAfterRefactorEqualToAction(action)
def test_type_assing_cast(self):
actions = [
(
"""\
import typing
if typing.TYPE_CHECKING:
from PyQt5.QtWebKit import QWebHistory
HistoryType = typing.cast('QWebHistory', None)
"""
),
(
"""\
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from PyQt5.QtWebKit import QWebHistory
HistoryType = cast('QWebHistory', return_value)
"""
),
(
"""\
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from PyQt5 import QtWebKit
HistoryType = cast('QtWebKit.QWebHistory', return_value)
"""
),
]
for action in actions:
self.assertActionAfterRefactorEqualToAction(action)
|
import logging
from _collections_abc import Iterable
import os
import shutil
from typing import List
from helpers.shell import execute_shell
from synthesis import smt_format
from synthesis.solver_with_query_storage import SmtSolverWithQueryStorageAbstract
class TruncatableQueryStorage_ViaFile:
def __init__(self, file_name):
self._file_writer = open(file_name, mode='w')
assert self._file_writer.seekable()
def truncate(self, position):
self._file_writer.seek(position)
self._file_writer.truncate()
@property
def position(self):
return self._file_writer.tell()
def flush(self):
self._file_writer.flush()
def _append(self, s):
self._file_writer.write(s)
self._file_writer.write('\n')
def __iadd__(self, other):
if isinstance(other, Iterable) and \
not isinstance(other, str) and \
not isinstance(other, bytes): # TODO: looks weird
for s in other:
self._append(s)
return self
else:
self._append(other)
return self
def close(self):
self._file_writer.close()
class EmulatePushPop:
def __init__(self, query_storage:'truncate-able query storage'):
assert hasattr(query_storage, 'truncate')
assert hasattr(query_storage, 'position')
self._query_storage = query_storage
self._pushes = []
def pop(self):
position_to_trunk_to = self._pushes.pop()
self._query_storage.truncate(position_to_trunk_to)
def push(self):
self._pushes.append(self._query_storage.position)
class Z3NonInteractiveViaFiles(SmtSolverWithQueryStorageAbstract):
""" I use this solver for non-incremental solving. """
def __init__(self,
files_prefix:str,
z3_path:str,
remove_file:bool):
self._file_name = files_prefix + '.smt2'
super().__init__(TruncatableQueryStorage_ViaFile(self._file_name))
self._emulate_push_pop = EmulatePushPop(self._query_storage)
self._z3_cmd = z3_path + ' -smt2 ' + self._file_name
self.__remove_file = remove_file
def die(self):
self._query_storage.close()
if self.__remove_file:
os.remove(self._file_name)
def push(self):
self._emulate_push_pop.push()
def pop(self):
self._emulate_push_pop.pop()
def solve(self) -> List[str] or None:
logging.info('solving ' + self._file_name)
self._query_storage += smt_format.make_exit()
self._query_storage.flush()
#change the name of file and z3_cmd if necessary
ret, out, err = execute_shell(self._z3_cmd)
logging.debug('solver returned: \n' + out)
out_lines = [s.strip() for s in out.splitlines() if s]
if ret == 1 and out_lines[0].strip() != 'unsat':
assert 0, 'error while executing z3: ret: {ret}\n' \
'out:{out}\n' \
'err:{err}\n'.format(ret=str(ret),
out=out,
err=err)
if out_lines[0] == 'sat':
return out_lines[1:]
else:
return None
class FakeSolver(Z3NonInteractiveViaFiles):
"""
Solver saves the query into file, instead of calling the solver.
Always returns UNSAT.
"""
def __init__(self, smt_file_prefix, z3_path:str):
super().__init__(smt_file_prefix, z3_path, True)
self.__cur_index = 1
self.__file_prefix = smt_file_prefix
def solve(self) -> List[str] or None:
self._query_storage += smt_format.make_exit()
self._query_storage.flush()
file_name = '{file_prefix}_{index}.smt2'.format(file_prefix=self.__file_prefix,
index=str(self.__cur_index))
logging.info('copying {src} into {dst}'.format(src=self._file_name, dst=file_name))
logging.info(shutil.copyfile(self._file_name, file_name))
self.__cur_index += 1
return None # always return UNSAT
|
import string
from operator import ge as greater_than_or_equal, gt as greater_than
from collections import deque
OPERATOR_PRECEDENCE = {
'(':0,
'+':1,
'-':1,
'*':2,
'/':2,
'^':3,
}
RIGHT_ASSOCIATIVE_OPERATORS = '^'
LEFT_ASSOCIATIVE_OPERATORS = '+-/*'
def pop_operator_queue(operators, output, token):
"""
Pop operators from the queue. left associative and right assoc fns are compared slightly differently!
:type operators: deque
:type output: deque
:type token: str
:return: None
"""
comparison_op = greater_than if token in RIGHT_ASSOCIATIVE_OPERATORS else greater_than_or_equal
while operators and comparison_op(OPERATOR_PRECEDENCE[operators[-1]], OPERATOR_PRECEDENCE[token]):
output.append(operators.pop())
operators.append(token)
def to_postfix (infix):
infix = deque(infix)
output = deque()
operators = deque()
while infix:
token = infix.popleft()
if token in string.digits:
output.append(token)
elif token == '(':
operators.append(token)
elif token == ')':
while operators and operators[-1] != '(':
output.append(operators.pop())
output.append(operators.pop())
elif token in LEFT_ASSOCIATIVE_OPERATORS:
# >=
pop_operator_queue(operators, output, token)
elif token in RIGHT_ASSOCIATIVE_OPERATORS:
# >
pop_operator_queue(operators, output, token)
while operators:
output.append(operators.pop())
return ''.join(output).replace('(','')
import unittest
class TestFirst(unittest.TestCase):
def testFirst(self):
test = self
Test = self
test.assert_equals = Test.assertEqual
Test.assert_equals = Test.assertEqual
Test.assert_equals(to_postfix("2+7"), "27+")
Test.assert_equals(to_postfix("2+7+9"), "27+9+")
Test.assert_equals(to_postfix("2+7*5"), "275*+")
Test.assert_equals(to_postfix("99*6+"), "996*+")
#'337/*1+'
Test.assert_equals("33*8/", to_postfix("3*3/8"))
Test.assert_equals("33*71+/", to_postfix("3*3/(7+1)"))
Test.assert_equals("562-9*+", to_postfix("5+(6-2)*9"))
Test.assert_equals("562-9*+36^+", to_postfix("5+(6-2)*9+3^6"))
Test.assert_equals("562-9*+371-^+", to_postfix("5+(6-2)*9+3^(7-1)"))
Test.assert_equals(to_postfix("(5-4-1)+9/5/2-7/1/7"), "54-1-95/2/+71/7/-")
|
# -*- coding:utf-8 -*-
# Copyright xmuspeech (Author: JFZhou 2020-05-31)
import numpy as np
import os
import sys
sys.path.insert(0, 'subtools/pytorch')
import libs.support.kaldi_io as kaldi_io
from plda_base import PLDA
class CORAL(object):
def __init__(self,
mean_diff_scale=1.0,
within_covar_scale=0.8,
between_covar_scale=0.8):
self.tot_weight = 0
self.mean_stats = 0
self.variance_stats = 0
self.mean_diff_scale = 1.0
self.mean_diff_scale = mean_diff_scale
self.within_covar_scale = within_covar_scale
self.between_covar_scale = between_covar_scale
def add_stats(self, weight, ivector):
ivector = np.reshape(ivector,(-1,1))
if type(self.mean_stats)==int:
self.mean_stats = np.zeros(ivector.shape)
self.variance_stats = np.zeros((ivector.shape[0],ivector.shape[0]))
self.tot_weight += weight
self.mean_stats += weight * ivector
self.variance_stats += weight * np.matmul(ivector,ivector.T)
def update_plda(self,):
dim = self.mean_stats.shape[0]
#TODO:Add assert
'''
// mean_diff of the adaptation data from the training data. We optionally add
// this to our total covariance matrix
'''
mean = (1.0 / self.tot_weight) * self.mean_stats
'''
D(x)= E[x^2]-[E(x)]^2
'''
variance = (1.0 / self.tot_weight) * self.variance_stats - np.matmul(mean,mean.T)
'''
// update the plda's mean data-member with our adaptation-data mean.
'''
mean_diff = mean - self.mean
variance += self.mean_diff_scale * np.matmul(mean_diff,mean_diff.T)
self.mean = mean
o_covariance = self.within_var + self.between_var
eigh_o, Q_o = np.linalg.eigh(o_covariance)
self.sort_svd(eigh_o, Q_o)
eigh_i, Q_i = np.linalg.eigh(variance)
self.sort_svd(eigh_i, Q_i)
EIGH_O = np.diag(eigh_o)
EIGH_I = np.diag(eigh_i)
C_o = np.matmul(np.matmul(Q_o,np.linalg.inv(np.sqrt(EIGH_O))),Q_o.T)
C_i = np.matmul(np.matmul(Q_i,np.sqrt(EIGH_I)),Q_i.T)
A = np.matmul(C_i,C_o)
S_w = np.matmul(np.matmul(A,self.within_var),A.T)
S_b = np.matmul(np.matmul(A,self.between_var),A.T)
self.between_var = S_b
self.within_var = S_w
def sort_svd(self,s, d):
for i in range(len(s)-1):
for j in range(i+1,len(s)):
if s[i] > s[j]:
s[i], s[j] = s[j], s[i]
d[i], d[j] = d[j], d[i]
def plda_read(self,plda):
with kaldi_io.open_or_fd(plda,'rb') as f:
for key,vec in kaldi_io.read_vec_flt_ark(f):
if key == 'mean':
self.mean = vec.reshape(-1,1)
self.dim = self.mean.shape[0]
elif key == 'within_var':
self.within_var = vec.reshape(self.dim, self.dim)
else:
self.between_var = vec.reshape(self.dim, self.dim)
def plda_write(self,plda):
with kaldi_io.open_or_fd(plda,'wb') as f:
kaldi_io.write_vec_flt(f, self.mean, key='mean')
kaldi_io.write_vec_flt(f, self.within_var.reshape(-1,1), key='within_var')
kaldi_io.write_vec_flt(f, self.between_var.reshape(-1,1), key='between_var')
class CIP(object):
"""
Reference:
Wang Q, Okabe K, Lee K A, et al. A Generalized Framework for Domain Adaptation of PLDA in Speaker Recognition[C]//ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2020: 6619-6623.
"""
def __init__(self,
interpolation_weight=0.5):
self.interpolation_weight = interpolation_weight
def interpolation(self,coral,plda_in_domain):
mean_in,between_var_in,within_var_in = self.plda_read(plda_in_domain)
self.mean = mean_in
self.between_var = self.interpolation_weight*coral.between_var+(1-self.interpolation_weight)*between_var_in
self.within_var = self.interpolation_weight*coral.within_var+(1-self.interpolation_weight)*within_var_in
def plda_read(self,plda):
with kaldi_io.open_or_fd(plda,'rb') as f:
for key,vec in kaldi_io.read_vec_flt_ark(f):
if key == 'mean':
mean = vec.reshape(-1,1)
dim = mean.shape[0]
elif key == 'within_var':
within_var = vec.reshape(dim, dim)
else:
between_var = vec.reshape(dim, dim)
return mean,between_var,within_var
def main():
if len(sys.argv)!=5:
print('<plda-out-domain> <adapt-ivector-rspecifier> <plda-in-domain> <plda-adapt> \n',
)
sys.exit()
plda_out_domain = sys.argv[1]
train_vecs_adapt = sys.argv[2]
plda_in_domain = sys.argv[3]
plda_adapt = sys.argv[4]
coral=CORAL()
coral.plda_read(plda_out_domain)
for _,vec in kaldi_io.read_vec_flt_auto(train_vecs_adapt):
coral.add_stats(1,vec)
coral.update_plda()
cip=CIP()
cip.interpolation(coral,plda_in_domain)
plda_new = PLDA()
plda_new.mean = cip.mean
plda_new.within_var = cip.within_var
plda_new.between_var = cip.between_var
plda_new.get_output()
plda_new.plda_trans_write(plda_adapt)
if __name__ == "__main__":
main() |
from utils import get_paths, get_basic_stats
import os
def main():
book_paths = get_paths('../Data/books')
book2stats = {}
for book_path in book_paths:
stats = get_basic_stats(book_path)
book = os.path.basename(book_path).strip('.txt')
print(book, stats)
book2stats[book] = stats
with open(f'top_20_{book}.txt', 'w') as f:
f.write("\n".join(stats['top_20_tokens']))
stats2book_with_highest_value = {
"num_sents": max(book2stats, key=lambda book: book2stats[book]["num_sents"]),
"num_tokens": max(book2stats, key=lambda book: book2stats[book]["num_tokens"]),
"vocab_size": max(book2stats, key=lambda book: book2stats[book]["vocab_size"]),
"num_chapters_or_acts": max(book2stats, key=lambda book: book2stats[book]["num_chapters_or_acts"]),
}
print(stats2book_with_highest_value)
if __name__ == '__main__':
main()
|
from app import srv
from app import reply
@srv.route('/hello')
def hello():
return reply.say_hello()
|
# @Author: Tian Qiao <qiaotian>
# @Date: 2016-11-27T10:54:53+08:00
# @Email: [email protected]
# @Last modified by: qiaotian
# @Last modified time: 2016-11-27T10:54:53+08:00
import sys
print("excuting python script")
|
"""
*
* Author: Juarez Paulino(coderemite)
* Email: [email protected]
*
"""
a,b,c=map(int,input().split());d=a-b
print(0 if c==d==0 else '?' if c-abs(d)>=0 else '+' if d>0 else '-') |
from elasticsearch import Elasticsearch,RequestsHttpConnection,NotFoundError
from flask import url_for
import config
import json
es = Elasticsearch(config.ES_HOSTS,connection_class=RequestsHttpConnection)
def create(the_data,the_index,the_doc_type):
try:
results = es.create(index=the_index,
doc_type=the_doc_type,
body=json.dumps(the_data)
)
if results['created']:
return { 'status': 'success',
'message': '',
'created_id': results['_id'] }
else:
return { 'status': 'failure',
'message': 'failed to create new record.',
'created_id': '' }
except Exception as e:
print e
return { 'status': 'failure',
'message': 'unknown error',
'created_id': '' }
def update(the_data,primary_key,the_index,the_doc_type):
old_data = ESUtils.search(size=1,page=0,search='%s:"%s"'%(primary_key,the_data[primary_key]),
the_sort=None,the_index=the_index,the_doc_type=the_doc_type)
if len(old_data['results']) > 0:
the_data.pop('uri',None)
the_data.pop('id',None)
try:
es.update(index=the_index,
doc_type=the_doc_type,
id=old_data['results'][0]['id'],
body='{ "doc" : %s }'%(json.dumps(the_data))
)
return { 'status': 'success',
'message': '',
'created_id': old_data['results'][0]['id'] }
except Exception as e:
print 'ERROR:',e
return { 'status': 'failure',
'message': 'unknown error',
'created_id': '' }
else:
return create(the_data,the_index,the_doc_type)
def delete(the_id,the_index,the_doc_type):
try :
es.delete(index=the_index,
doc_type=the_doc_type,
id=the_id
)
return { 'status': 'success', 'message': '' }
except NotFoundError as e:
return { 'status': 'failure', 'message': 'id not found' }
except Exception as e:
print e
return { 'status': 'failure', 'message': 'unknown error' }
def get(the_id,the_index,the_doc_type):
try:
results = es.get(
index=the_index,
doc_type=the_doc_type,
id='%s'%(the_id),
ignore=404
)
if results and results['found'] :
return {'status':'success','message':'','results':[from_es_hit(results)]}
return {'status':'success','message':'','results':[]}
except NotFoundError as e:
return { 'status': 'failure', 'message': 'id not found', 'results': [] }
except Exception as e:
print e
return { 'status': 'failure', 'message': 'unknown exception', 'results': [] }
def search(size,page,search,the_sort,the_index,the_doc_type):
try:
results = es.search(
index=the_index,
doc_type=the_doc_type,
size=size,
q=search or "*",
sort=the_sort or ""
)
retVal = []
if results and results['hits']['total'] > 0 :
for hit in results['hits']['hits']:
retVal.append(from_es_hit(hit))
return {'status':'success','message':'','results':retVal}
except Exception as e:
print e
return {'status':'failure','message':'unknown error','results':[]}
def from_es_hit(hit):
the_data = {}
the_data['id'] = hit['_id']
for key,val in hit['_source'].items():
the_data[key] = val
#the_data['uri'] = url_for('get_'+hit['_index'], id=the_data['id'], _external=True)
return the_data
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from wsgi_intercept import interceptor
from nova.api.openstack.placement import deploy
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
from nova import conf
from nova import context
from nova import objects
from nova import rc_fields as fields
from nova import test
from nova.tests.functional.api.openstack.placement import test_report_client
from nova.tests import uuidsentinel as uuids
from nova.virt import driver as virt_driver
CONF = conf.CONF
VCPU = fields.ResourceClass.VCPU
MEMORY_MB = fields.ResourceClass.MEMORY_MB
DISK_GB = fields.ResourceClass.DISK_GB
COMPUTE_HOST = 'compute-host'
class IronicResourceTrackerTest(test.TestCase):
"""Tests the behaviour of the resource tracker with regards to the
transitional period between adding support for custom resource classes in
the placement API and integrating inventory and allocation records for
Ironic baremetal nodes with those custom resource classes.
"""
FLAVOR_FIXTURES = {
'CUSTOM_SMALL_IRON': objects.Flavor(
name='CUSTOM_SMALL_IRON',
flavorid=42,
vcpus=4,
memory_mb=4096,
root_gb=1024,
swap=0,
ephemeral_gb=0,
extra_specs={},
),
'CUSTOM_BIG_IRON': objects.Flavor(
name='CUSTOM_BIG_IRON',
flavorid=43,
vcpus=16,
memory_mb=65536,
root_gb=1024,
swap=0,
ephemeral_gb=0,
extra_specs={},
),
}
COMPUTE_NODE_FIXTURES = {
uuids.cn1: objects.ComputeNode(
uuid=uuids.cn1,
hypervisor_hostname='cn1',
hypervisor_type='ironic',
hypervisor_version=0,
cpu_info="",
host=COMPUTE_HOST,
vcpus=4,
vcpus_used=0,
cpu_allocation_ratio=1.0,
memory_mb=4096,
memory_mb_used=0,
ram_allocation_ratio=1.0,
local_gb=1024,
local_gb_used=0,
disk_allocation_ratio=1.0,
),
uuids.cn2: objects.ComputeNode(
uuid=uuids.cn2,
hypervisor_hostname='cn2',
hypervisor_type='ironic',
hypervisor_version=0,
cpu_info="",
host=COMPUTE_HOST,
vcpus=4,
vcpus_used=0,
cpu_allocation_ratio=1.0,
memory_mb=4096,
memory_mb_used=0,
ram_allocation_ratio=1.0,
local_gb=1024,
local_gb_used=0,
disk_allocation_ratio=1.0,
),
uuids.cn3: objects.ComputeNode(
uuid=uuids.cn3,
hypervisor_hostname='cn3',
hypervisor_type='ironic',
hypervisor_version=0,
cpu_info="",
host=COMPUTE_HOST,
vcpus=16,
vcpus_used=0,
cpu_allocation_ratio=1.0,
memory_mb=65536,
memory_mb_used=0,
ram_allocation_ratio=1.0,
local_gb=2048,
local_gb_used=0,
disk_allocation_ratio=1.0,
),
}
INSTANCE_FIXTURES = {
uuids.instance1: objects.Instance(
uuid=uuids.instance1,
flavor=FLAVOR_FIXTURES['CUSTOM_SMALL_IRON'],
vm_state=vm_states.BUILDING,
task_state=task_states.SPAWNING,
power_state=power_state.RUNNING,
project_id='project',
user_id=uuids.user,
),
}
def setUp(self):
super(IronicResourceTrackerTest, self).setUp()
self.flags(auth_strategy='noauth2', group='api')
self.flags(
reserved_host_memory_mb=0,
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0,
)
self.ctx = context.RequestContext('user', 'project')
self.app = lambda: deploy.loadapp(CONF)
self.report_client = test_report_client.NoAuthReportClient()
driver = mock.MagicMock(autospec=virt_driver.ComputeDriver)
driver.node_is_available.return_value = True
self.driver_mock = driver
self.rt = resource_tracker.ResourceTracker(COMPUTE_HOST, driver)
self.rt.scheduler_client.reportclient = self.report_client
self.rt.reportclient = self.report_client
self.url = 'http://localhost/placement'
self.create_fixtures()
def create_fixtures(self):
for flavor in self.FLAVOR_FIXTURES.values():
flavor._context = self.ctx
flavor.obj_set_defaults()
flavor.create()
# We create some compute node records in the Nova cell DB to simulate
# data before adding integration for Ironic baremetal nodes with the
# placement API...
for cn in self.COMPUTE_NODE_FIXTURES.values():
cn._context = self.ctx
cn.obj_set_defaults()
cn.create()
for instance in self.INSTANCE_FIXTURES.values():
instance._context = self.ctx
instance.obj_set_defaults()
instance.create()
def placement_get_inventory(self, rp_uuid):
url = '/resource_providers/%s/inventories' % rp_uuid
resp = self.report_client.get(url)
if 200 <= resp.status_code < 300:
return resp.json()['inventories']
else:
return resp.status_code
def placement_get_allocations(self, consumer_uuid):
url = '/allocations/%s' % consumer_uuid
resp = self.report_client.get(url)
if 200 <= resp.status_code < 300:
return resp.json()['allocations']
else:
return resp.status_code
def placement_get_custom_rcs(self):
url = '/resource_classes'
resp = self.report_client.get(url)
if 200 <= resp.status_code < 300:
all_rcs = resp.json()['resource_classes']
return [rc['name'] for rc in all_rcs
if rc['name'] not in fields.ResourceClass.STANDARD]
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
@mock.patch('nova.objects.compute_node.ComputeNode.save')
@mock.patch('keystoneauth1.session.Session.get_auth_headers',
return_value={'x-auth-token': 'admin'})
@mock.patch('keystoneauth1.session.Session.get_endpoint',
return_value='http://localhost/placement')
def test_ironic_ocata_to_pike(self, mock_vbi, mock_endpoint, mock_auth,
mock_cn):
"""Check that when going from an Ocata installation with Ironic having
node's resource class attributes set, that we properly "auto-heal" the
inventory and allocation records in the placement API to account for
both the old-style VCPU/MEMORY_MB/DISK_GB resources as well as the new
custom resource class from Ironic's node.resource_class attribute.
"""
with interceptor.RequestsInterceptor(
app=self.app, url=self.url):
# Before the resource tracker is "initialized", we shouldn't have
# any compute nodes in the RT's cache...
self.assertEqual(0, len(self.rt.compute_nodes))
# There should not be any records in the placement API since we
# haven't yet run update_available_resource() in the RT.
for cn in self.COMPUTE_NODE_FIXTURES.values():
self.assertEqual(404, self.placement_get_inventory(cn.uuid))
for inst in self.INSTANCE_FIXTURES.keys():
self.assertEqual({}, self.placement_get_allocations(inst))
# Nor should there be any custom resource classes in the placement
# API, since we haven't had an Ironic node's resource class set yet
self.assertEqual(0, len(self.placement_get_custom_rcs()))
# Now "initialize" the resource tracker as if the compute host is a
# Ocata host, with Ironic virt driver, but the admin has not yet
# added a resource_class attribute to the Ironic baremetal nodes in
# her system.
# NOTE(jaypipes): This is what nova.compute.manager.ComputeManager
# does when "initializing" the service...
for cn in self.COMPUTE_NODE_FIXTURES.values():
nodename = cn.hypervisor_hostname
self.driver_mock.get_available_resource.return_value = {
'hypervisor_hostname': nodename,
'hypervisor_type': 'ironic',
'hypervisor_version': 0,
'vcpus': cn.vcpus,
'vcpus_used': cn.vcpus_used,
'memory_mb': cn.memory_mb,
'memory_mb_used': cn.memory_mb_used,
'local_gb': cn.local_gb,
'local_gb_used': cn.local_gb_used,
'numa_topology': None,
'resource_class': None, # Act like admin hasn't set yet...
}
self.driver_mock.get_inventory.return_value = {
VCPU: {
'total': cn.vcpus,
'reserved': 0,
'min_unit': 1,
'max_unit': cn.vcpus,
'step_size': 1,
'allocation_ratio': 1.0,
},
MEMORY_MB: {
'total': cn.memory_mb,
'reserved': 0,
'min_unit': 1,
'max_unit': cn.memory_mb,
'step_size': 1,
'allocation_ratio': 1.0,
},
DISK_GB: {
'total': cn.local_gb,
'reserved': 0,
'min_unit': 1,
'max_unit': cn.local_gb,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
self.rt.update_available_resource(self.ctx, nodename)
self.assertEqual(3, len(self.rt.compute_nodes))
# A canary just to make sure the assertion below about the custom
# resource class being added wasn't already added somehow...
crcs = self.placement_get_custom_rcs()
self.assertNotIn('CUSTOM_SMALL_IRON', crcs)
# Verify that the placement API has the "old-style" resources in
# inventory and allocations
for cn in self.COMPUTE_NODE_FIXTURES.values():
inv = self.placement_get_inventory(cn.uuid)
self.assertEqual(3, len(inv))
# Now "spawn" an instance to the first compute node by calling the
# RT's instance_claim().
cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
cn1_nodename = cn1_obj.hypervisor_hostname
inst = self.INSTANCE_FIXTURES[uuids.instance1]
# Since we're pike, the scheduler would have created our
# allocation for us. So, we can use our old update routine
# here to mimic that before we go do the compute RT claim,
# and then the checks below.
self.rt.reportclient.update_instance_allocation(self.ctx,
cn1_obj,
inst,
1)
with self.rt.instance_claim(self.ctx, inst, cn1_nodename):
pass
allocs = self.placement_get_allocations(inst.uuid)
self.assertEqual(1, len(allocs))
self.assertIn(uuids.cn1, allocs)
resources = allocs[uuids.cn1]['resources']
self.assertEqual(3, len(resources))
for rc in (VCPU, MEMORY_MB, DISK_GB):
self.assertIn(rc, resources)
# Now we emulate the operator setting ONE of the Ironic node's
# resource class attribute to the value of a custom resource class
# and re-run update_available_resource(). We will expect to see the
# inventory and allocations reset for the first compute node that
# had an instance on it. The new inventory and allocation records
# will be for VCPU, MEMORY_MB, DISK_GB, and also a new record for
# the custom resource class of the Ironic node.
self.driver_mock.get_available_resource.return_value = {
'hypervisor_hostname': cn1_obj.hypervisor_hostname,
'hypervisor_type': 'ironic',
'hypervisor_version': 0,
'vcpus': cn1_obj.vcpus,
'vcpus_used': cn1_obj.vcpus_used,
'memory_mb': cn1_obj.memory_mb,
'memory_mb_used': cn1_obj.memory_mb_used,
'local_gb': cn1_obj.local_gb,
'local_gb_used': cn1_obj.local_gb_used,
'numa_topology': None,
'resource_class': 'small-iron',
}
self.driver_mock.get_inventory.return_value = {
VCPU: {
'total': cn1_obj.vcpus,
'reserved': 0,
'min_unit': 1,
'max_unit': cn1_obj.vcpus,
'step_size': 1,
'allocation_ratio': 1.0,
},
MEMORY_MB: {
'total': cn1_obj.memory_mb,
'reserved': 0,
'min_unit': 1,
'max_unit': cn1_obj.memory_mb,
'step_size': 1,
'allocation_ratio': 1.0,
},
DISK_GB: {
'total': cn1_obj.local_gb,
'reserved': 0,
'min_unit': 1,
'max_unit': cn1_obj.local_gb,
'step_size': 1,
'allocation_ratio': 1.0,
},
'CUSTOM_SMALL_IRON': {
'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
self.rt.update_available_resource(self.ctx, cn1_nodename)
# Verify the auto-creation of the custom resource class, normalized
# to what the placement API expects
self.assertIn('CUSTOM_SMALL_IRON', self.placement_get_custom_rcs())
allocs = self.placement_get_allocations(inst.uuid)
self.assertEqual(1, len(allocs))
self.assertIn(uuids.cn1, allocs)
resources = allocs[uuids.cn1]['resources']
self.assertEqual(3, len(resources))
for rc in (VCPU, MEMORY_MB, DISK_GB):
self.assertIn(rc, resources)
# TODO(jaypipes): Check allocations include the CUSTOM_SMALL_IRON
# resource class. At the moment, we do not add an allocation record
# for the Ironic custom resource class. Once the flavor is updated
# to store a resources:$CUSTOM_RESOURCE_CLASS=1 extra_spec key and
# the scheduler is constructing the request_spec to actually
# request a single amount of that custom resource class, we will
# modify the allocation/claim to consume only the custom resource
# class and not the VCPU, MEMORY_MB and DISK_GB.
|
#!/usr/bin/env python
"""
"Hashed sequence index" (hsx) file reader (for a fasta file)
-------------------------------------------------------------------
offset 0x00: D2 52 70 95 big endian magic number
.. (95 70 52 D2 => little endian)
offset 0x04: 00 00 01 xx version 1.0 (see note 1)
offset 0x08: 00 00 00 1C header length (in bytes, including this
.. field)
offset 0x0C: xx xx xx xx FN, number of files (see note 2)
offset 0x10: xx xx xx xx FO, offset to file table
offset 0x14: xx xx xx xx HN, number of hash buckets (see notes 3 and 4)
offset 0x18: xx xx xx xx HO, offset to hash table
offset 0x1C: xx xx xx xx SN, number of sequences
offset 0x20: xx xx xx xx SO, offset to sequence index table (see
.. note 5)
offset FO: xx xx xx xx FIO0, offset to file info for file 0
... (FN-1 more entries, at 4 bytes per)
offset FIOn: LL xx .. type of file (ascii "fa", "2bit", etc., see
note 6)
LL xx .. name of file (see note 7)
... (FN-1 more entries, variable length)
offset HO: xx xx xx xx xx SIOn, offset into sequence index table (see
.. notes 8, 9 and 10)
... (HN-1 more entries, at 5 bytes per)
xx xx xx xx xx offset past end of sequence index table
offset SO: xx xx xx xx xx length of the sequence (see note 11)
xx file number (index into file table)
xx xx xx xx xx xx offset to the sequence data (see note 12)
LL xx .. name of sequence (see note 13)
... (SN-1 more entries, variable length)
Notes:
(1) The least significant byte of the version is the "sub version".
For version 1, this is 00 (secondary hashes are not in use) or 01
(secondary hashes are in use).
(2) The number of files is limited to 255.
(3) It is assumed that the number of buckets is set so that the average
number of sequences per bucket (SN/HN) is reasonably small (e.g. 10).
(4) The hash table actually includes HN+1 buckets. The extra bucket has
size zero and gives the offset to just past the end of the sequence
index table.
(5) Entries in the sequence index table are necessarily stored in hash
order. Entries with the same hash are stored in alphabetical order;
actually, in lexicographic order over the bytes of their names.
(6) Strings are stored as a length byte followed by ascii text.
(7) If a file info record contains an empty name, the name of the file is
the same as the index file itself, with the file type used as the
extension (e.g. "reads.hsx" becomes "reads.fa"). This allows files to
be renamed without rebuilding the index.
(8) SIOn is the file offset for the nth entry in the sequence index table.
When this is in a hash table entry, it is the index for the first
sequence in that hash's bucket.
(9) The most significant bit in a bucket's SIOn value is used to indicate
whether the bucket is empty or not. If a bucket is empty, this bit is
set (1), otherwise it is clear.
(10) The end of a bucket can be determined from the SIOn entry for the
start of the next bucket.
(11) A sequence may be empty, so zero is a legitimate value for the
sequence length.
(12) The offset to the sequence data is an offset into the sequence file.
For fasta it can point to the ">" at the start of the sequence's
header, or directly to the sequence data.
(13) When secondary hashes are in use, the sequence name (including the
terminating zero) is replaced by the four-byte secondary hash.
:Author: Bob Harris ([email protected])
"""
import sys,struct
import hassock_hash
class HsxFile(object):
def __init__(self,fileName,debug=None):
self.fileName = fileName
self.file = None
self.numFiles = 0
if (debug == None): self.debug = []
else: self.debug = debug
self.open()
magicBig = 0xD2527095L
magicLittle = 0x957052D2L
version = 0x00000100L
msBit5 = 0x80 << (4*8)
def open(self):
self.file = file(self.fileName,"rb")
self.magic = magic = struct.unpack(">L",self.file.read(4))[0]
if (magic == HsxFile.magicBig): self.byteOrder = ">" # (big endian)
elif (magic == HsxFile.magicLittle): self.byteOrder = "<" # (little endian)
else:
assert (False), \
"%s is not an hsx file (magic = %08X)" \
% (self.fileName,magic)
self.struct4 = "%sL" % self.byteOrder
self.version = self.read4()
assert (self.version == HsxFile.version), \
"%s is hsx version %08X, which is not supported" \
% (self.fileName,self.version)
self.read_header()
self.load_file_table()
def close(self):
self.file.close()
for fileIx in range(self.numFiles):
(name,file) = self.fileTable[fileIx]
if (file != None): file.close()
def read_header(self):
self.headerLength = self.read4()
assert (self.headerLength >= 0x1C), \
"%s has unsupported header length (%08X)" \
% (self.fileName,self.headerSize)
(self.numFiles,
self.fileTableOffset,
self.numBuckets,
self.hashTableOffset,
self.numSequences,
self.seqTableOffset) = struct.unpack("%sLLLLLL" % self.byteOrder,self.file.read(24))
assert (self.numBuckets != 0), \
"%s has corrupt header (numBuckets = 0)" % (self.fileName)
def load_file_table(self):
self.file.seek(self.fileTableOffset)
offsetTable = self.file.read(4*self.numFiles)
offsetTable = struct.unpack("%s%s" % (self.byteOrder,"L"*self.numFiles),offsetTable)
self.fileTable = [None] * self.numFiles
basePath = baseName = None
for fileIx in range(self.numFiles):
self.file.seek(offsetTable[fileIx])
extension = self.readString()
name = self.readString()
if (name == ""):
if (baseName == None):
baseName = self.base_file_name()
name = baseName + "." + extension
else:
if (basePath == None):
basePath = self.base_file_path()
name = basePath + name + "." + extension
self.fileTable[fileIx] = (name,None) # (second field holds file when opened)
#.. print "fileTable[%d] = %s" % (fileIx,name)
def base_file_name(self):
slash = self.fileName.rfind("/")
dot = self.fileName.rfind(".")
if (dot < 0): return self.fileName
if (dot < slash): return self.fileName
return self.fileName[:dot]
def base_file_path(self):
slash = self.fileName.rfind("/")
if (slash < 0): return ""
return self.fileName[:slash+1]
def get_sequence(self,name):
if ("fetch" in self.debug):
print >>sys.stderr, "[fetching %s]" % name
# read hash bucket for this name
bucket = HsxFile.hash(name) % self.numBuckets
if ("fetch" in self.debug):
print >>sys.stderr, "[ bucket = %d (file offset %08X)]" \
% (bucket,self.hashTableOffset+5*bucket)
self.file.seek(self.hashTableOffset + 5*bucket)
bucketOffset = self.read5()
if (bucketOffset & HsxFile.msBit5 != 0):
if ("fetch" in self.debug):
print >>sys.stderr, "[ bucket is empty]"
return None
bucketEnd = self.read5() & ~HsxFile.msBit5
if ("fetch" in self.debug):
print >>sys.stderr, "[ bucket offset = %010X..%010X ]" \
% (bucketOffset,bucketEnd)
# scan the bucket until we find this sequence
self.file.seek(bucketOffset)
seqIx = 1
seqName = None
while (bucketOffset < bucketEnd):
seqLength = self.read5()
fileIx = self.read1()
seqOffset = self.read6()
seqName = self.readString()
if ("fetch" in self.debug):
print >>sys.stderr, "[ (%010X) name %d = %s]" \
% (bucketOffset,seqIx,seqName)
if (seqName == name): break
if (seqName > name): return None
bucketOffset += 1 + 6 + 5 + len(seqName) + 1
seqIx += 1
if (seqName != name):
if ("fetch" in self.debug):
print >>sys.stderr, "[ %s not in bucket]" % name
return None
# open the sequence file (if it isn't already open)
assert (fileIx < len(self.fileTable)), \
"file index for %s is out of bounds (%d > %d)" \
% (name,fileIx,len(self.fileTable))
(seqFileName,seqFile) = self.fileTable[fileIx]
if (seqFile == None):
if ("fetch" in self.debug):
print >>sys.stderr, "[ opening %s]" % seqFileName
seqFile = file(seqFileName,"rt")
self.fileTable[fileIx] = (seqFileName,seqFile)
if ("fetch" in self.debug):
print >>sys.stderr, "[ reading from %s:%012X]" \
% (seqFileName,seqOffset)
# read the sequence
seqFile.seek(seqOffset)
seqLines = []
seqRead = 0
while (True):
line = seqFile.readline()
if (line == ""): break
line = line.strip()
if ("fetch" in self.debug):
print >>sys.stderr, "[ read %s]" % line
if (line.startswith(">")):
if (len(seqLines) != 0): break
seqLines += [line]
continue
seqRead += len(line)
if (seqRead > seqLength):
line = line[:-seqLength-seqRead]
seqRead = seqLength
seqLines += [line]
if (seqRead == seqLength):
break
assert (seqRead == seqLength), \
"sequence for %s is short (%d < %d)" \
% (name,seqRead,seqLength)
return "\n".join(seqLines)
def read1(self):
return ord(self.file.read(1))
def read4(self):
return struct.unpack(self.struct4,self.file.read(4))[0]
def read5(self):
return self.read_and_unpack(5)
def read6(self):
return self.read_and_unpack(6)
def readString(self):
ch = self.file.read(1)
s = self.file.read(ord(ch))
return "".join(s)
def read_and_unpack(self,bytes):
data = self.file.read(bytes)
if (self.byteOrder == "<"): # (make data big endian)
data = [ch for ch in data]
data.reverse()
val = 0
for ch in data: val = (val << 8) + ord(ch)
return val
# hash
def hash(name):
return hassock_hash.hassock_hash(name)
hash = staticmethod(hash)
if __name__ == "__main__": main()
|
""" Helpers for client-side """
import sys
from urllib import error as urllib_error
from urllib import request
import json
from PIL import Image, ImageTk
from VectorMessenger.MessengerCore.Helpers import Global as h
def iconbitmap_universal(window: object, icon_image=h.ICON_CLIENT_PATH):
""" Cross-platform icon loader for tkinter windows.
Args:
window (object): Tkinter window to apply icon to.
icon_image (str)(Optional): Path to icon image.
"""
image_pil = Image.open(icon_image)
image_tk = ImageTk.PhotoImage(image_pil)
window.tk.call('wm', 'iconphoto', window._w, image_tk)
class RedirectSTD:
def __init__(self, text_widget: object):
""" Redirect STD(-OUT & -ERR) to tkinter Text widget.
Args:
text_widget (object): Tkinter Text widget.
"""
self.__text_widget = text_widget
self.redirect()
def redirect(self):
sys.stdout = self.__STD2TK(self.__text_widget)
sys.stderr = self.__STD2TK(self.__text_widget)
def disable(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
class __STD2TK:
def __init__(self, text_widget):
""" Low level redirect STD(-OUT & -ERR) to tkinter Text widget realisation.
Args:
text_widget (object): Tkinter Text widget.
"""
self.__text_widget = text_widget
def write(self, string):
self.__text_widget.config(state="normal")
self.__text_widget.insert("end", f'{string}')
self.__text_widget.see("end")
self.__text_widget.config(state="disabled")
class UpdateChecker:
"""
VM Update checker. Currently it works with modifying tk.Menu bar label, so its kinda hardcoded, yes.
"""
def __init__(self, ui_ctrl):
self.__U_NOUPDATES = '[ \u2713 ]'
self.__U_OUTDATE = '[ \u2191 ]'
self.__ui_ctrl = ui_ctrl
def check(self):
self.__ui_ctrl.entryconfig(4, label='Checking for updates \u2B6E')
try:
h.create_log('Checking for updates')
content = request.urlopen(h.VERSION_UPDATE_API).read().decode('utf-8')
except urllib_error.URLError:
self.__ui_ctrl.entryconfig(4, label="")
h.create_log("Can't check for updates. No connection to network or source unavailable")
else:
if 'docs.google.com' in h.VERSION_UPDATE_API:
content = content[1:]
content = json.loads(content)
if h.VERSION == content['version']:
self.__ui_ctrl.entryconfig(4, label=f'Up-To-Date {self.__U_NOUPDATES}')
h.create_log('Version is up to date')
else:
self.__ui_ctrl.entryconfig(4, label=f'Update Available {self.__U_OUTDATE}')
h.create_log('Update is available')
|
INVALID_OUTPUT_CLASS_DEFINITION = (
"Please set the output_class attribute to the appropriate {base_name} subclass."
)
|
from typing import Dict, List, Tuple
import torch
from torch.utils.data import Dataset
import os
import copy
import numpy as np
import pybullet as pb
import pybullet_data as pb_d
import random
from PIL import Image
import matplotlib.pyplot as plt
from tqdm import tqdm
import pickle
# Reproducing:
# http://alumni.media.mit.edu/~wad/color/numbers.html
# without white...
original_colors = [
#Red
(173, 35, 35),
#Blue
(42, 75, 215),
#Green
(29, 105, 20),
#Brown
(129, 74, 25),
#Purple
(129, 38, 192),
#Black
(0, 0, 0),
#Lt. Gray
(160, 160, 160),
#Lt. Green
(129, 197, 122),
#Lt. Blue
(157, 175, 255),
#Cyan
(41, 208, 208),
#Orange
(255, 146, 51),
#Yellow
(255, 238, 51),
#Tan
(233, 222, 187),
#Pink
(55, 205, 243),
#Dk. Gray
(87, 87, 87),
]
original_shapes = [
'cylinder',
'capsule',
'sphere',
'cube',
'torus',
'teddy',
'duck',
]
def generate_datapoint(
latent_one_hot,
latent_values,
latent_classes,
img_size,
nb_shapes,
nb_colors,
nb_samples,
sampled_positions,
sampled_orientation,
physicsClient,
):
'''
:param latent_one_hot: Numpy Array of shape (nb_objects, latent_one_hot_size)
:param latent_values: Numpy Array of shape (nb_objects, nb_latent_attr). E.g. contains actual pixel positions.
:param latent_classes: Numpy Array of shape (nb_objects, nb_latent_attr). E.g. contains bucket positions.
:param img_size: Integer pixel size of the squared image.
:param nb_shapes: Integer number of possible shapes.
:param nb_colors: Integer number of possible colors.
:param nb_samples: Integer number of possible sampled camera position.
:param sampled_positions: List of Numpy Array of shape (3,) describing the position of the object for each sample index.
:param sampled_orientation: List of float describing the Y-axis orientation of the object for each sample index.
:param physicsClient: Integer identifying the physicsClient used by PyBullet.
'''
global original_colors
global original_shapes
colors = copy.deepcopy(original_colors)
shapes = copy.deepcopy(original_shapes)
color_id = latent_classes[0]
obj_color = [float(colors[color_id][0])/255,float(colors[color_id][1])/255,float(colors[color_id][2])/255, 1.0]
shape_id = latent_classes[1]
obj_shape = shapes[shape_id]
obj_position = sampled_positions[latent_classes[2]]; obj_position[2] = 0
obj_orientation = np.zeros(3); #obj_orientation[0] = np.pi/2
obj_orientation[2] = sampled_orientation[latent_classes[2]]
#print('Position:', obj_position)
#print('Orientation:', obj_orientation)
cam_eye = np.zeros(3); cam_eye[2] = 7.0; cam_eye[1]= 10.0
cam_target = np.zeros(3)
cam_up = np.zeros(3); cam_up[1] = -1.0
def generate(shapeId, position, orientation, color, physicsClient):
datapath = pb_d.getDataPath()
pb.setAdditionalSearchPath(datapath)
pb.resetSimulation(physicsClient) #pb.RESET_USE_DEFORMABLE_WORLD)
pb.setGravity(0, 0, -9.81)
planeId = pb.loadURDF("plane.urdf", [0,0,0])
if 'torus' in shapeId:
orientation[0] = np.pi/2
position[2] += 0.5
frame_offset_orientation =np.zeros(3);
frame_offset_position = [0, 0, 0]
meshScale = [2.0,2.0,2.0]
torus_path = os.path.join(os.path.dirname(__file__), "data/torus.obj")
torusVisualId = pb.createVisualShape(shapeType=pb.GEOM_MESH,fileName=torus_path, rgbaColor=color,meshScale=meshScale, visualFramePosition=frame_offset_position,visualFrameOrientation=frame_offset_orientation )
torusCollisionId = pb.createCollisionShape(
shapeType=pb.GEOM_MESH,
fileName=torus_path,
meshScale=meshScale,
collisionFramePosition=frame_offset_position,
collisionFrameOrientation=frame_offset_orientation
)
torusId = pb.createMultiBody(
baseMass=1.0,
baseCollisionShapeIndex=torusCollisionId,
baseVisualShapeIndex=torusVisualId,
basePosition=position,
baseOrientation=pb.getQuaternionFromEuler(orientation)
)
elif 'teddy' in shapeId:
orientation[0] = np.pi/2
frame_offset_orientation =np.zeros(3);
frame_offset_position = [-2, -0.5, -0.5]
meshScale = [4.0,4.0,4.0]
teddyVisualId = pb.createVisualShape(
shapeType=pb.GEOM_MESH,
fileName="teddy2_VHACD_CHs.obj",
rgbaColor=color,
meshScale=meshScale,
visualFramePosition=frame_offset_position,
visualFrameOrientation=frame_offset_orientation
)
teddyCollisionId = pb.createCollisionShape(
shapeType=pb.GEOM_MESH,
fileName="teddy2_VHACD_CHs.obj",
meshScale=meshScale,
collisionFramePosition=frame_offset_position,
collisionFrameOrientation=frame_offset_orientation
)
teddyId = pb.createMultiBody(
baseMass=1.0,
baseCollisionShapeIndex=teddyCollisionId,
baseVisualShapeIndex=teddyVisualId,
basePosition=position,
baseOrientation=pb.getQuaternionFromEuler(orientation)
)
elif 'duck' in shapeId:
orientation[0] = np.pi/2
position[2] = -0.25
meshScale = [2.0,2.0,2.0]
duckVisualId = pb.createVisualShape(
shapeType=pb.GEOM_MESH,
fileName="duck.obj",
rgbaColor=color,
meshScale=meshScale,
)
duckCollisionId = pb.createCollisionShape(
shapeType=pb.GEOM_MESH,
fileName="duck.obj",
meshScale=meshScale,
)
duckId = pb.createMultiBody(
baseMass=1.0,
baseCollisionShapeIndex=duckCollisionId,
baseVisualShapeIndex=duckVisualId,
basePosition=position,
baseOrientation=pb.getQuaternionFromEuler(orientation)
)
elif 'cube' in shapeId:
position[-1] = 1.0
cubeVisualId = pb.createVisualShape(
shapeType=pb.GEOM_BOX,
#fileName="cube.obj",
rgbaColor=color,
halfExtents=[1.0,1.0,1.0],
)
cubeCollisionId = pb.createCollisionShape(
shapeType=pb.GEOM_BOX,
#fileName="cube.obj",
halfExtents=[1.0,1.0,1.0],
)
cubeId = pb.createMultiBody(
baseMass=1.0,
baseCollisionShapeIndex=cubeCollisionId,
baseVisualShapeIndex=cubeVisualId,
basePosition=position,
baseOrientation=pb.getQuaternionFromEuler(orientation)
)
elif 'sphere' in shapeId:
position[-1] = 1.0
sphereVisualId = pb.createVisualShape(
shapeType=pb.GEOM_SPHERE,
#fileName="sphere_smooth.obj",
rgbaColor=color,
radius=1.0,
)
sphereCollisionId = pb.createCollisionShape(
shapeType=pb.GEOM_SPHERE,
#fileName="sphere_smooth.obj",
radius=1.0,
)
sphereId = pb.createMultiBody(
baseMass=1.0,
baseCollisionShapeIndex=sphereCollisionId,
baseVisualShapeIndex=sphereVisualId,
basePosition=position,
baseOrientation=pb.getQuaternionFromEuler(orientation)
)
elif 'capsule' in shapeId:
position[-1] = 1.0
orientation[0] = np.pi/2
capsuleVisualId = pb.createVisualShape(
shapeType=pb.GEOM_CAPSULE,
#fileName="sphere_smooth.obj",
rgbaColor=color,
radius=1.0,
length=2.0,
# height=1.0,
)
capsuleCollisionId = pb.createCollisionShape(
shapeType=pb.GEOM_CAPSULE,
#fileName="sphere_smooth.obj",
radius=1.0,
height=2.0,
# height=1.0,
)
capsuleId = pb.createMultiBody(
baseMass=1.0,
baseCollisionShapeIndex=capsuleCollisionId,
baseVisualShapeIndex=capsuleVisualId,
basePosition=position,
baseOrientation=pb.getQuaternionFromEuler(orientation)
)
elif 'cylinder' in shapeId:
position[-1] = 1.0
cylinderVisualId = pb.createVisualShape(
shapeType=pb.GEOM_CYLINDER,
#fileName="sphere_smooth.obj",
rgbaColor=color,
radius=0.5,
length=2.0,
)
cylinderCollisionId = pb.createCollisionShape(
shapeType=pb.GEOM_CYLINDER,
#fileName="sphere_smooth.obj",
radius=0.5,
height=2.0,
)
cylinderId = pb.createMultiBody(
baseMass=1.0,
baseCollisionShapeIndex=cylinderCollisionId,
baseVisualShapeIndex=cylinderVisualId,
basePosition=position,
baseOrientation=pb.getQuaternionFromEuler(orientation)
)
generate(
shapeId=obj_shape,
position=obj_position,
orientation=obj_orientation,
color=obj_color,
physicsClient=physicsClient,
)
def render(size=img_size,
eye=cam_eye,
target=cam_target,
up=cam_up,
fov=45,
aspect=1.0,
nearVal=0.1,
farVal=30.1):
viewMatrix = pb.computeViewMatrix(
cameraEyePosition=eye,
cameraTargetPosition=target,
cameraUpVector=up,
)
projectionMatrix = pb.computeProjectionMatrixFOV(
fov=fov,
aspect=aspect,
nearVal=nearVal,
farVal=farVal
)
w, h, rgba_img, depth_img, seg_img = pb.getCameraImage(
width=size,
height=size,
viewMatrix=viewMatrix,
projectionMatrix=projectionMatrix
)
rgb_img = rgba_img[:, :, :-1]
return rgb_img
img = render()
#img = (img/255.).transpose((2,0,1))
img = (img).astype('uint8').transpose((2,1,0))
return img
def generate_dataset(root,
img_size=32,
nb_samples=100,
nb_shapes=5,
nb_colors=5,
):
global original_colors
global original_shapes
colors = copy.deepcopy(original_colors)
shapes = copy.deepcopy(original_shapes)
dirs = root
assert nb_shapes <= len(shapes) and nb_colors <= len(colors)
colors = colors[:nb_colors]
shapes = shapes[:nb_shapes]
samples = [i for i in range(nb_samples)]
sampled_positions = [np.random.uniform(low=-3,high=3, size=(3)) for _ in range(nb_samples)]
for i in range(len(sampled_positions)): sampled_positions[i][-1] = 0
sampled_orientation = [np.random.uniform(low=0,high=2*np.pi) for _ in range(nb_samples)]
latent_one_hot_repr_sizes = {
"color":nb_colors, #similar to id
"shape":nb_shapes,
"sample":nb_samples,
}
one_object_latents_ones_hot_size = sum([v for k,v in latent_one_hot_repr_sizes.items()])
print('building dataset...')
possible_shape_values = np.arange(0,nb_shapes)
possible_color_values = np.arange(0,nb_colors)
possible_sample_id_values = np.arange(0,nb_samples)
dummy_latent_values = np.zeros(3).astype(int)
dummy_latent_class = np.zeros(3).astype(int)
# (3, )
dummy_latent_one_hot = np.zeros(one_object_latents_ones_hot_size).astype(int)
# (one_object_latents_ones_hot_size, )
img_latent_class = []
img_latent_values = []
img_latent_one_hot = []
# Setting up the color when sampling later...:
one_hot_idx_start = 0
for color_id in possible_color_values:
obj_latent_class = dummy_latent_class.copy()
obj_latent_values = dummy_latent_values.copy()
obj_latent_one_hot = dummy_latent_one_hot.copy()
obj_latent_class[0] = color_id
obj_latent_values[0] = color_id
one_hot_idx_start_color = one_hot_idx_start
obj_latent_one_hot[one_hot_idx_start_color+color_id] = 1
for shape_id in possible_shape_values:
obj_latent_class[1] = shape_id
obj_latent_values[1] = shape_id
one_hot_idx_start_shape = one_hot_idx_start_color+nb_colors
obj_latent_one_hot[one_hot_idx_start_shape+shape_id] = 1
for sample_id in possible_sample_id_values:
obj_latent_class[2] = sample_id
obj_latent_values[2] = sample_id
one_hot_idx_start_sample = one_hot_idx_start_shape+nb_shapes
obj_latent_one_hot[one_hot_idx_start_sample+sample_id] = 1
img_latent_class.append(obj_latent_class.copy())
img_latent_values.append(obj_latent_values.copy())
img_latent_one_hot.append(obj_latent_one_hot.copy())
# Reset:
obj_latent_one_hot[one_hot_idx_start_sample+sample_id] = 0
# Reset:
obj_latent_one_hot[one_hot_idx_start_shape+shape_id] = 0
# Reset: done at the beginning of the loop...
dataset = {
"imgs":{},
"latents_values":img_latent_values,
"latents_classes":img_latent_class,
"latents_one_hot":img_latent_one_hot,
}
print('saving datasets...')
filename = os.path.join(dirs,'3d_shapes_pybullet_dataset.pickle')
with open(filename, 'wb') as f:
pickle.dump((dataset, nb_shapes, nb_colors, nb_samples, sampled_positions, sampled_orientation), f)
print('datasets saved at {}'.format(filename))
return dataset, nb_shapes, nb_colors, nb_samples, sampled_positions, sampled_orientation
class _3DShapesPyBulletDataset(Dataset) :
def __init__(self,
root,
img_size,
nb_shapes,
nb_colors,
nb_samples,
train=True,
transform=None,
generate=False,
split_strategy=None,
):
super(_3DShapesPyBulletDataset, self).__init__()
self.root = root
self.file = '3d_shapes_pybullet_dataset.pickle'
self.img_size = img_size
self.nb_shapes = nb_shapes
self.nb_colors = nb_colors
self.nb_samples = nb_samples
self.split_strategy = split_strategy
self.train = train
self.generate = generate
self.transform = transform
self.physicsClient = None
if generate or not self._check_exists():
if not self._check_exists():
print('Dataset not found. Let us generate it:')
dataset, nb_shapes, nb_colors, nb_samples, sampled_positions, sampled_orientation = self._generate(
root=root,
img_size=img_size,
nb_shapes=nb_shapes,
nb_colors=nb_colors,
nb_samples=self.nb_samples
)
else:
filepath = os.path.join(self.root, self.file)
with open(filepath, 'rb') as f:
dataset, nb_shapes, nb_colors, nb_samples, sampled_positions, sampled_orientation = pickle.load(f)
self.sampled_positions = sampled_positions
self.sampled_orientation = sampled_orientation
self.latents_values = np.asarray(dataset['latents_values'])
#(color, shape, sample_id) :
self.latents_classes = np.asarray(dataset['latents_classes'])
self.latents_one_hot = np.asarray(dataset['latents_one_hot'])
self.test_latents_mask = np.zeros_like(self.latents_classes)
self.imgs = dataset['imgs']
self.targets = np.zeros(len(self.latents_classes))
for idx, latent_cls in enumerate(self.latents_classes):
color = latent_cls[0]
shape = latent_cls[1]
target = color*self.nb_shapes+shape
self.targets[idx] = target
if self.split_strategy is not None:
strategy = self.split_strategy.split('-')
if 'combinatorial' in self.split_strategy:
self.counter_test_threshold = int(strategy[0][len('combinatorial'):])
# (default: 2) Specifies the threshold on the number of latent dimensions
# whose values match a test value. Below this threshold, samples are used in training.
# A value of 1 implies a basic train/test split that tests generalization to out-of-distribution values.
# A value of 2 implies a train/test split that tests generalization to out-of-distribution pairs of values...
# It implies that test value are encountered but never when combined with another test value.
# It is a way to test for binary compositional generalization from well known stand-alone test values.
# A value of 3 tests for ternary compositional generalization from well-known:
# - stand-alone test values, and
# - binary compositions of test values.
'''
With regards to designing axises as primitives:
It implies that all the values on this latent axis are treated as test values
when combined with a test value on any other latent axis.
N.B.: it is not possible to test for out-of-distribution values in that context...
N.B.1: It is required that the number of primitive latent axis be one less than
the counter_test_thershold, at most.
A number of fillers along this primitive latent axis can then be specified in front
of the FP pattern...
Among the effective indices, those with an ordinal lower or equal to the number of
filler allowed will be part of the training set.
'''
self.latent_dims = {}
# self.strategy[0] : 'combinatorial'
# 1: Shape
self.latent_dims['Shape'] = {'size': self.nb_shapes}
self.latent_dims['Shape']['nbr_fillers'] = 0
self.latent_dims['Shape']['primitive'] = ('FP' in strategy[1])
if self.latent_dims['Shape']['primitive']:
self.latent_dims['Shape']['nbr_fillers'] = int(strategy[1].split('FP')[0])
'''
self.latent_dims['Shape']['image_wise_primitive'] = ('IWP' in strategy[1])
if self.latent_dims['Shape']['image_wise_primitive']:
self.latent_dims['Shape']['nbr_fillers'] = int(strategy[1].split('IWP')[0])
assert self.latent_dims['Shape']['nbr_fillers'] < self.latent_dims['Shape']['size']//self.latent_dims['Shape']['divider'], \
"It seems that the test dataset will be empty."
'''
self.latent_dims['Shape']['position'] = 1
# (color, shape, sample)
# 2: divider (default:1) : specify how dense the data are along that dimension
# e.g. : divider=4 => effective size = 8
if 'RemainderToUse' in strategy[2]:
strategy[2] = strategy[2].split('RemainderToUse')
self.latent_dims['Shape']['remainder_use'] = int(strategy[2][1])
strategy[2] = strategy[2][0]
else:
self.latent_dims['Shape']['remainder_use'] = 0
self.latent_dims['Shape']['divider'] = int(strategy[2])
# 3: test_set_divider (default:4) : out of the effective samples, which indices
# will be used solely in test, when combined with another latent's test indices.
# e.g. ~ 80%/20% train/test ==> test_set_divider=4 => effective indices 4 and 8 will only be used in the test set,
# in combination with the other latent dims test set indices.
if 'N' in strategy[3]:
self.latent_dims['Shape']['untested'] = True
self.latent_dims['Shape']['test_set_divider'] = (self.latent_dims['Shape']['size']//self.latent_dims['Shape']['divider'])+10
elif 'E' in strategy[3]:
self.latent_dims['Shape']['test_set_size_sample_from_end'] = int(strategy[3][1:])
elif 'S' in strategy[3]:
self.latent_dims['Shape']['test_set_size_sample_from_start'] = int(strategy[3][1:])
else:
self.latent_dims['Shape']['test_set_divider'] = int(strategy[3])
# 4: Color
self.latent_dims['Color'] = {'size': self.nb_colors}
self.latent_dims['Color']['nbr_fillers'] = 0
self.latent_dims['Color']['primitive'] = ('FP' in strategy[4])
if self.latent_dims['Color']['primitive']:
self.latent_dims['Color']['nbr_fillers'] = int(strategy[4].split('FP')[0])
'''
self.latent_dims['Color']['image_wise_primitive'] = ('IWP' in strategy[4])
if self.latent_dims['Color']['image_wise_primitive']:
self.latent_dims['Color']['nbr_fillers'] = int(strategy[4].split('IWP')[0])
assert self.latent_dims['Color']['nbr_fillers'] < self.latent_dims['Color']['size']//self.latent_dims['Color']['divider'], \
"It seems that the test dataset will be empty."
'''
self.latent_dims['Color']['position'] = 0
#(color, shape, X, Y)
# 5: divider (default:1) : specify how dense the data are along that dimension
# e.g. : divider=4 => effective size = 8
if 'RemainderToUse' in strategy[5]:
strategy[5] = strategy[5].split('RemainderToUse')
self.latent_dims['Color']['remainder_use'] = int(strategy[5][1])
strategy[5] = strategy[5][0]
else:
self.latent_dims['Color']['remainder_use'] = 0
self.latent_dims['Color']['divider'] = int(strategy[5])
# 6: test_set_divider (default:4) : out of the effective samples, which indices
# will be used solely in test, when combined with another latent's test indices.
# e.g. ~ 80%/20% train/test ==> test_set_divider=4 => effective indices 4 and 8 will only be used in the test set,
# in combination with the other latent dims test set indices.
if 'N' in strategy[6]:
self.latent_dims['Color']['untested'] = True
self.latent_dims['Color']['test_set_divider'] = (self.latent_dims['Color']['size']//self.latent_dims['Color']['divider'])+10
elif 'E' in strategy[6]:
self.latent_dims['Color']['test_set_size_sample_from_end'] = int(strategy[6][1:])
elif 'S' in strategy[6]:
self.latent_dims['Color']['test_set_size_sample_from_start'] = int(strategy[6][1:])
else:
self.latent_dims['Color']['test_set_divider'] = int(strategy[6])
# 7: Sample
self.latent_dims['Sample'] = {'size': self.nb_samples}
self.latent_dims['Sample']['nbr_fillers'] = 0
self.latent_dims['Sample']['primitive'] = ('FP' in strategy[7])
if self.latent_dims['Sample']['primitive']:
self.latent_dims['Sample']['nbr_fillers'] = int(strategy[7].split('FP')[0])
'''
self.latent_dims['Sample']['image_wise_primitive'] = ('IWP' in strategy[7])
if self.latent_dims['Sample']['image_wise_primitive']:
self.latent_dims['Sample']['nbr_fillers'] = int(strategy[7].split('IWP')[0])
assert self.latent_dims['Sample']['nbr_fillers'] < self.latent_dims['Sample']['size']//self.latent_dims['Sample']['divider'], \
"It seems that the test dataset will be empty."
'''
self.latent_dims['Sample']['position'] = 2
#(color, shape, sample)
# 8: divider (default:1) : specify how dense the data are along that dimension
# e.g. : divider=4 => effective size = 10
if 'RemainderToUse' in strategy[8]:
strategy[8] = strategy[8].split('RemainderToUse')
self.latent_dims['Sample']['remainder_use'] = int(strategy[8][1])
strategy[8] = strategy[8][0]
else:
self.latent_dims['Sample']['remainder_use'] = 0
self.latent_dims['Sample']['divider'] = int(strategy[8])
# 9: test_set_divider (default:5) : out of the effective samples, which indices
# will be used solely in test, when combined with another latent's test indices.
# e.g. ~ 80%/20% train/test ==> test_set_divider=5 => effective indices 5 and 10 will only be used in the test set,
# in combination with the other latent dims test set indices.
if 'N' in strategy[9]:
self.latent_dims['Sample']['untested'] = True
self.latent_dims['Sample']['test_set_divider'] = (self.latent_dims['Sample']['size']//self.latent_dims['Sample']['divider'])+10
elif 'E' in strategy[9]:
self.latent_dims['Sample']['test_set_size_sample_from_end'] = int(strategy[9][1:])
elif 'S' in strategy[9]:
self.latent_dims['Sample']['test_set_size_sample_from_start'] = int(strategy[9][1:])
else:
self.latent_dims['Sample']['test_set_divider'] = int(strategy[9])
'''
nbr_primitives_and_tested = len([k for k in self.latent_dims
if self.latent_dims[k]['primitive'] \
or self.latent_dims[k]['image_wise_primitive'] \
or 'untested' not in self.latent_dims[k]])
assert nbr_primitives_and_tested==self.counter_test_threshold
'''
nbr_primitives_and_tested = len([k for k in self.latent_dims
if self.latent_dims[k]['primitive'] or 'untested' not in self.latent_dims[k]])
#assert(nbr_primitives_and_tested==self.counter_test_threshold)
elif 'compositional' in self.split_strategy:
shuffle_seed = int(self.split_strategy.split('-')[1])
self.train_nb_possible_colors = int(self.split_strategy.split('_')[-1])
assert self.train_nb_possible_colors < self.nb_colors
# From shape to colors:
shapes = {
shape_id:np.roll(np.arange(0,self.nb_colors), shift=idx)
for idx, shape_id in enumerate(range(self.nb_shapes))
}
test_nb_possible_colors = self.nb_colors-self.train_nb_possible_colors
self.training_shape_2_possible_colors = {
shape_id:possible_colors[test_nb_possible_colors:]
for shape_id, possible_colors in shapes.items()
}
self.testing_shape_2_possible_colors = {
shape_id:possible_colors[:test_nb_possible_colors]
for shape_id, possible_colors in shapes.items()
}
else:
self.divider = 1
self.offset = 0
self.indices = []
if self.split_strategy is None or 'divider' in self.split_strategy:
for idx in range(len(self.latents_values)):
if idx % self.divider == self.offset:
self.indices.append(idx)
self.train_ratio = 0.8
# Shuffled:
np.random.shuffle(np.asarray(self.indices))
end = int(len(self.indices)*self.train_ratio)
if self.train:
self.indices = self.indices[:end]
else:
self.indices = self.indices[end:]
print(f"Split Strategy: {self.split_strategy} --> d {self.divider} / o {self.offset}")
print(f"Dataset Size: {len(self.indices)} out of {len(self.latents_values)}: {100*len(self.indices)/len(self.latents_values)}%.")
elif 'combinatorial' in self.split_strategy:
indices_latents = list(zip(range(self.latents_classes.shape[0]), self.latents_classes))
for idx, latent_class in indices_latents:
effective_test_threshold = self.counter_test_threshold
counter_test = {}
skip_it = False
filler_forced_training = False
for dim_name, dim_dict in self.latent_dims.items():
dim_class = latent_class[dim_dict['position']]
quotient = (dim_class+1)//dim_dict['divider']
remainder = (dim_class+1)%dim_dict['divider']
if remainder!=dim_dict['remainder_use']:
skip_it = True
break
if dim_dict['primitive']:
ordinal = quotient
if ordinal > dim_dict['nbr_fillers']:
effective_test_threshold -= 1
if 'test_set_divider' in dim_dict and quotient%dim_dict['test_set_divider']==0:
counter_test[dim_name] = 1
elif 'test_set_size_sample_from_end' in dim_dict:
max_quotient = dim_dict['size']//dim_dict['divider']
if quotient > max_quotient-dim_dict['test_set_size_sample_from_end']:
counter_test[dim_name] = 1
elif 'test_set_size_sample_from_start' in dim_dict:
max_quotient = dim_dict['size']//dim_dict['divider']
if quotient <= dim_dict['test_set_size_sample_from_start']:
counter_test[dim_name] = 1
if dim_name in counter_test:
self.test_latents_mask[idx, dim_dict['position']] = 1
if skip_it: continue
if self.train:
if len(counter_test) >= effective_test_threshold:#self.counter_test_threshold:
continue
else:
self.indices.append(idx)
else:
if len(counter_test) >= effective_test_threshold:#self.counter_test_threshold:
self.indices.append(idx)
else:
continue
print(f"Split Strategy: {self.split_strategy}")
print(self.latent_dims)
print(f"Dataset Size: {len(self.indices)} out of {len(self.latents_values)} : {100*len(self.indices)/len(self.latents_values)}%.")
assert len(self.indices),\
"No valid data, maybe try a smaller divider..."
elif 'compositional' in self.split_strategy:
self.indices = []
for idx in range(self.latents_classes.shape[0]):
shape_id = self.latents_classes[idx][1]
color_id = self.latents_classes[idx][0]
color_selection = self.training_shape_2_possible_colors
if not(self.train): color_selection = self.testing_shape_2_possible_colors
if color_id in color_selection[shape_id]:
self.indices.append(idx)
print(f"Dataset Size: {len(self.indices)} out of {len(self.latents_values)}: {100*len(self.indices)/len(self.latents_values)}%.")
"""
self.latents_values = self.latents_values[self.indices]
self.latents_classes = self.latents_classes[self.indices]
self.latents_one_hot = self.latents_one_hot[self.indices]
self.test_latents_mask = self.test_latents_mask[self.indices]
self.targets = self.targets[self.indices]
"""
#self._generate_all()
self.same_color_indices = {}
self.same_shape_indices = {}
self.latents_to_possible_indices = {}
for idx, trueidx in enumerate(self.indices):
latents = self.getlatentclass(idx)
# color, shape, sample
color_id = latents[0]
shape_id = latents[1]
if color_id not in self.same_color_indices:
self.same_color_indices[color_id] = []
self.same_color_indices[color_id].append(idx)
if shape_id not in self.same_shape_indices:
self.same_shape_indices[shape_id] = []
self.same_shape_indices[shape_id].append(idx)
if color_id not in self.latents_to_possible_indices:
self.latents_to_possible_indices[color_id] = {}
if shape_id not in self.latents_to_possible_indices[color_id]:
self.latents_to_possible_indices[color_id][shape_id] = []
self.latents_to_possible_indices[color_id][shape_id].append(idx)
print('Dataset loaded : OK.')
def _save_generated_dataset(self):
if self._check_exists():
filepath = os.path.join(self.root, self.file)
with open(filepath, 'rb') as f:
dataset, _, _, _, _, _ = pickle.load(f)
dataset["imgs"].update(self.imgs)
else:
dataset = {
"imgs":self.imgs,
"latents_values":self.latents_values,
"latents_classes":self.latents_classes,
"latents_one_hot":self.latents_one_hot,
}
print('saving datasets...')
filename = os.path.join(self.root,self.file)
with open(filename, 'wb') as f:
pickle.dump((dataset, self.nb_shapes, self.nb_colors, self.nb_samples, self.sampled_positions, self.sampled_orientation), f)
print('Datasets saved at {}'.format(filename))
def _generate_all(self):
pbar = tqdm(total=len(self.indices))
for idx in self.indices:
pbar.update(1)
if idx in self.imgs: continue
self._generate_datapoint(idx=idx)
def _generate_datapoint(self, idx):
latents_values = self.latents_values[idx]
latents_one_hot = self.latents_one_hot[idx]
latents_classes = self.latents_classes[idx]
if self.physicsClient is None:
self.physicsClient = pb.connect(pb.DIRECT)
rgb_img = generate_datapoint(
latent_one_hot=latents_one_hot,
latent_values=latents_values,
latent_classes=latents_classes,
img_size=self.img_size,
nb_shapes=self.nb_shapes,
nb_colors=self.nb_colors,
nb_samples=self.nb_samples,
sampled_positions=self.sampled_positions,
sampled_orientation=self.sampled_orientation,
physicsClient=self.physicsClient,
)
self.imgs[idx] = rgb_img
if all([(index in self.imgs) for index in self.indices]):
self._save_generated_dataset()
# will only be called once, when the last element has just been generated,
# since this whole function will never be called again after all elements
# are generated...
def __len__(self) -> int:
return len(self.indices)
def _check_exists(self):
return os.path.exists(os.path.join(self.root,self.file))
def _generate(self,
root,
img_size,
nb_shapes,
nb_colors,
nb_samples):
"""
Generate the 3DShapesPyBullet dataset if it doesn't exist already.
"""
if root is None:
root = self.root
os.makedirs(root, exist_ok=True)
return generate_dataset(
root=root,
img_size=img_size,
nb_shapes=nb_shapes,
nb_colors=nb_colors,
nb_samples=nb_samples,
)
def getclass(self, idx):
if idx >= len(self):
idx = idx%len(self)
trueidx = self.indices[idx]
target = self.targets[trueidx]
return target
def getlatentvalue(self, idx):
if idx >= len(self):
idx = idx%len(self)
trueidx = self.indices[idx]
latent_value = self.latents_values[trueidx]
return latent_value
def getlatentclass(self, idx):
if idx >= len(self):
idx = idx%len(self)
trueidx = self.indices[idx]
latent_class = self.latents_classes[trueidx]
return latent_class
def getlatentonehot(self, idx):
if idx >= len(self):
idx = idx%len(self)
trueidx = self.indices[idx]
latent_one_hot = self.latents_one_hot[trueidx]
return latent_one_hot
def gettestlatentmask(self, idx):
if idx >= len(self):
idx = idx%len(self)
trueidx = self.indices[idx]
test_latents_mask = self.test_latents_mask[trueidx]
return test_latents_mask
def __getitem__(self, idx):
"""
Args:
idx (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if idx >= len(self):
idx = idx%len(self)
trueidx = self.indices[idx]
latent_value = torch.from_numpy(self.getlatentvalue(idx))
latent_class = torch.from_numpy(self.getlatentclass(idx))
latent_one_hot = torch.from_numpy(self.getlatentonehot(idx))
test_latents_mask = torch.from_numpy(self.gettestlatentmask(idx))
if trueidx not in self.imgs:
self._generate_datapoint(idx=trueidx)
img = self.imgs[trueidx]
target = self.getclass(idx)
#img = (img*255).astype('uint8').transpose((2,1,0))
img = img.transpose((2,1,0))
img = Image.fromarray(img, mode='RGB')
if self.transform is not None:
img = self.transform(img)
sampled_d = {
"experiences":img,
"exp_labels":target,
"exp_latents":latent_class,
"exp_latents_values":latent_value,
"exp_latents_one_hot_encoded":latent_one_hot,
"exp_test_latents_masks":test_latents_mask,
}
return sampled_d
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrappers for fetching schema from DataCommons API.
"""
import ast
import copy
import json
import logging
import os
from absl import app
from absl import flags
from sys import path
FLAGS = flags.FLAGS
flags.DEFINE_string('dcid', None, 'dcid of the node to query')
flags.DEFINE_string('dc_output_path', './prefetched_outputs/',
'Path to store the output')
flags.DEFINE_boolean('force_fetch', False,
'forces api query and not return cached result')
_MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
path.insert(1, os.path.join(_MODULE_DIR, '../../../../../../'))
from tools.download_utils.requests_wrappers import request_post_json
# logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
# logging.getLogger().setLevel(logging.DEBUG)
# requests_log = logging.getLogger("urllib3")
# requests_log.setLevel(logging.DEBUG)
# HTTPConnection.debuglevel = 1
# requests_log.propagate = True
def dc_check_existence(dcid_list: list,
use_autopush: bool = True,
max_items: int = 450) -> dict:
"""Checks if a given list of dcids are present in DC.
REST API is used to query the data with retry on timeout.
Uses caching of responses to avoid repeated calls.
Args:
dcid_list: List of dcids to be queried for existence.
use_autopush: Boolean value to use autopush API and not public API.
max_items: Limit of items to be queried in a single POST request.
Returns:
Dict object with dcids as key values and boolean values signifying existence as values.
"""
data_ = {}
ret_dict = {}
if use_autopush:
url_prefix = 'autopush.'
else:
url_prefix = ''
chunk_size = max_items
dcid_list_chunked = [
dcid_list[i:i + chunk_size]
for i in range(0, len(dcid_list), chunk_size)
]
for dcid_chunk in dcid_list_chunked:
data_["dcids"] = dcid_chunk
req = request_post_json(
f'https://{url_prefix}api.datacommons.org/node/property-labels',
data_)
resp_dicts = req['payload']
resp_dicts = ast.literal_eval(resp_dicts)
for cur_dcid in resp_dicts:
if not resp_dicts[cur_dcid]:
ret_dict[cur_dcid] = False
elif not resp_dicts[cur_dcid]['inLabels'] and not resp_dicts[
cur_dcid]['outLabels']:
ret_dict[cur_dcid] = False
else:
ret_dict[cur_dcid] = True
return ret_dict
# fetch pvs from dc, enums from dc
def fetch_dcid_properties_enums(dcid: str,
cache_path: str = _MODULE_DIR +
'/prefetched_outputs',
use_autopush: bool = True,
force_fetch: bool = False):
"""Fetches all the properties and it's possible values for a given dcid.
Args:
dcid: DCID of the object whose properties and enum values need to be fetched.
cache_path: Path of the directory where previously fetched results are stored.
use_autopush: Boolean value to use autopush or not.
force_fetch: Boolean value to force API call and disregard the cache.
Returns:
Dict object with properties as keys and list of possible enum values as values.
"""
cache_path = os.path.expanduser(cache_path)
if not os.path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
if use_autopush:
api_prefix = 'autopush.'
else:
api_prefix = ''
dc_props = {}
# get list of properties for each population type
if force_fetch or not os.path.isfile(
os.path.join(cache_path, f'{dcid}_dc_props.json')):
data_ = {}
data_["dcids"] = [dcid]
data_["property"] = "domainIncludes"
data_["direction"] = "in"
population_props = request_post_json(
f'https://{api_prefix}api.datacommons.org/node/property-values',
data_)
dc_population_pvs = population_props['payload']
dc_population_pvs = ast.literal_eval(dc_population_pvs)
if dc_population_pvs[dcid]:
dc_props = {}
for prop_dict in dc_population_pvs[dcid]['in']:
dc_props[prop_dict['dcid']] = []
with open(os.path.join(cache_path, f'{dcid}_dc_props.json'), 'w') as fp:
json.dump(dc_props, fp, indent=2)
else:
dc_props = json.load(
open(os.path.join(cache_path, f'{dcid}_dc_props.json'), 'r'))
# check if the list has enum type
if force_fetch or not os.path.isfile(
os.path.join(cache_path, f'{dcid}_dc_props_types.json')):
data_ = {}
data_['dcids'] = list(dc_props.keys())
data_['property'] = 'rangeIncludes'
data_['direction'] = 'out'
if data_['dcids']:
population_props_types = request_post_json(
f'https://{api_prefix}api.datacommons.org/node/property-values',
data_)
population_props_types = ast.literal_eval(
population_props_types['payload'])
for property_name in population_props_types:
if population_props_types[property_name]:
for temp_dict in population_props_types[property_name][
'out']:
dc_props[property_name].append(temp_dict['dcid'])
with open(os.path.join(cache_path, f'{dcid}_dc_props_types.json'),
'w') as fp:
json.dump(dc_props, fp, indent=2)
else:
dc_props = json.load(
open(os.path.join(cache_path, f'{dcid}_dc_props_types.json'), 'r'))
# get enum value list
if force_fetch or not os.path.isfile(
os.path.join(cache_path, f'{dcid}_dc_props_enum_values.json')):
new_dict = copy.deepcopy(dc_props)
for property_name in new_dict.keys():
dc_props[property_name] = []
for type_name in new_dict[property_name]:
if 'enum' in type_name.lower():
data_ = {}
data_['dcids'] = [type_name]
data_['property'] = 'typeOf'
data_['direction'] = 'in'
enum_values = request_post_json(
f'https://{api_prefix}api.datacommons.org/node/property-values',
data_)
enum_values = ast.literal_eval(enum_values['payload'])
if enum_values[type_name]:
for temp_dict in enum_values[type_name]['in']:
dc_props[property_name].append(temp_dict['dcid'])
with open(os.path.join(cache_path, f'{dcid}_dc_props_enum_values.json'),
'w') as fp:
json.dump(dc_props, fp, indent=2)
else:
dc_props = json.load(
open(os.path.join(cache_path, f'{dcid}_dc_props_enum_values.json'),
'r'))
return dc_props
def main(argv):
print(
json.dumps(fetch_dcid_properties_enums(FLAGS.dcid, FLAGS.dc_output_path,
FLAGS.force_fetch),
indent=2))
if __name__ == '__main__':
flags.mark_flags_as_required(['dcid'])
app.run(main)
|
from distutils.core import setup
setup(name='yamli',version='0.2',py_modules=['yamli'],
author='val314159',author_email='[email protected]',
install_requires='pyaml',url='https://github.com/val314159/yamli')
|
from PIL import Image
from PIL import ImageDraw, ImageFont
# 字体位置
BASE_PATH = "/Users/ruiweifang/softwares/images/"
# 处理后图片目录
process_path = "images/process/"
im = Image.open('images/d.jpg')
print(im.format, im.size, im.mode)
draw = ImageDraw.Draw(im)
# 字体 路径 文字大小
fnt = ImageFont.truetype(r'/Users/ruiweifang/softwares/images/STZHONGS.TTF', 100)
fnt2 = ImageFont.truetype(r'/Users/ruiweifang/softwares/images/SIMSUN.TTC', 50)
font = ImageFont.truetype(r'/Users/ruiweifang/softwares/images/STZHONGS.TTF', 40, encoding="unic") # 设置字体
draw.text((200, 100), u'中国加油\n\t武汉加油', fill='red', align="center", stroke_width=20, stroke_fill="black", font=fnt)
draw.text((700, 800), u'团车加油', fill='red', font=fnt2)
draw.text((100, 50), u'汉字水印测试', 'fuchsia', font)
# draw.ink = 0 + 0 * 256 + 255 * 256 * 256
draw.text((600, 500), "I love china!!!", fill="red", direction="ltr", language="en", stroke_width=10,
stroke_fill="yellow", font=fnt)
im.save(process_path + "d_watermark_text.jpg")
im.show()
# 缩略图
# size = 128, 128
# im.thumbnail(size)
# im.save(process_path + "e_thumbnail_128x128.jpg")
# 批量生成缩略图 glob(文件名模式匹配,不用遍历整个目录判断每个文件是不是符合)
# for infile in glob.glob("images/*.jpg"):
# path, filename = os.path.split(infile)
# im = Image.open(infile)
# im.thumbnail(size, Image.ANTIALIAS)
# im.save(process_path + "thumbnail_" + filename, "JPEG")
# 调整图片大小 resize
# im.resize((200,100))
# im.save(process_path + "ie_resize.jpg")
# 模糊效果 BLUR(模糊操作),GaussianBlur(高斯模糊),MedianFilter(中值过滤器),FIND_EDGES(查找边)
# im_blur = im.filter(ImageFilter.BLUR)
# im_blur.save(process_path + "d_blur.jpg", "jpeg")
# im_blur.show()
# 裁剪
# box = (100, 100, 400, 400)
# im_crop = im.crop(box)
# im_crop.save(process_path + "e_crop.jpg")
# 旋转 expand放大了图像尺寸 ,使得边角的图像不被裁剪四个角刚好贴着图像边缘
# im_rotate = im.rotate(45, expand=True)
# im_rotate.save(process_path + "d_rotate45_true.jpg")
# 打开 open 旋转 rotate 显示 show
# im.rotate(45).show()
# 水平反转
# im.transpose(Image.FLIP_LEFT_RIGHT).save(process_path + "flip_left_right.jpg", "JPEG")
# im.transpose(Image.FLIP_TOP_BOTTOM).save(process_path + "flip_top_bottom.jpg", "JPEG")
# im.transpose(Image.ROTATE_180).show()
# im.rotate(20).save(process_path + "d_rotate20.jpg", "JPEG")
# im.rotate(90).save(process_path + "d_rotate90.jpg", "JPEG")
# im.rotate(180).save(process_path + "d_rotate180.jpg", "JPEG")
# im.rotate(270).save(process_path + "d_rotate270.jpg", "JPEG")
# 图片水印
# mask = Image.open("images/watermark.png")
# layer = Image.new("RGBA", im.size, (0, 0, 0, 0))
# layer.paste(mask, (im.size[0] - 100, im.size[1] - 100))
# out = Image.composite(layer, im, layer).show()
# 粘贴
# im.paste(mask, (800, 800), None)
# im.show()
# 灰度图片
# im = im.convert("L")
# im.show()
# plt.imshow(im)
# plt.show()
#
# # 将原来的图片转换为RGBA模式
# im = im.convert('RGBA')
# # 新建一个图片,尺寸与上面的尺寸一样,透明度为0即完全透明
# txt = Image.new('RGBA', im.size, (0, 0, 0, 0))
# # 设置要写文字的字体,注意有的字体不能打汉字,这里用的微软雅黑可以
# fnt = ImageFont.truetype(r'/Users/ruiweifang/softwares/images/STZHONGS.TTF', 30)
# # 打汉字
# d = ImageDraw.Draw(txt)
# # 写要打的位置,内容,用的字体,文字透明度
# d.text((txt.size[0] - 385, txt.size[1] - 80), "中国加油,武汉加油\n团车加油", font=fnt, fill=(255, 255, 255, 150))
# # 两个图片复合
# out = Image.alpha_composite(im, txt)
# out.show()
# 保存加水印后的图片
# out.save(BASE_PATH+"composite.jpg")
|
from flask import Blueprint
med = Blueprint('med', __name__)
from . import views
|
#!/usr/bin/env python3
from taiseilib.common import (
run_main,
update_text_file,
)
import argparse
import hashlib
import json
import re
from pathlib import Path
meta_re = re.compile(r'(.*loadPackage\()({.*?})(\);.*)', re.DOTALL)
def main(args):
parser = argparse.ArgumentParser(description='Change package UUID in JavaScript loader generated by Emscripten\'s file_packager.py', prog=args[0])
parser.add_argument('loader',
help='the .js loader file',
metavar='FILE',
type=Path,
)
parser.add_argument('--output', '-o',
help='write result to FILE (default: overwrite input)',
metavar='FILE',
type=Path,
)
g = parser.add_mutually_exclusive_group(required=True)
g.add_argument('--uuid',
help='manually specify an UUID',
metavar='UUID',
type=str,
)
g.add_argument('--sha1',
help='take SHA1 of FILE and use that as an UUID',
metavar='FILE',
type=Path,
)
args = parser.parse_args(args[1:])
if args.uuid is None:
args.uuid = hashlib.sha1(args.sha1.read_bytes()).hexdigest()
if args.output is None:
args.output = args.loader
pre, meta, post = meta_re.match(args.loader.read_text()).groups()
meta = json.loads(meta)
meta['package_uuid'] = args.uuid
meta = json.dumps(meta, separators=(',', ':'), check_circular=False, ensure_ascii=False)
update_text_file(args.output, pre + meta + post)
if __name__ == '__main__':
run_main(main)
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import tempfile
from typing import List
from fastapi import APIRouter, File, HTTPException, UploadFile
from fastapi.background import BackgroundTasks
from fastapi.responses import FileResponse
from monailabel.interfaces.app import MONAILabelApp
from monailabel.interfaces.utils.app import app_instance
from monailabel.utils.others.generic import get_basename, get_mime_type, remove_file
from monailabel.utils.sessions import Sessions
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/session",
tags=["Session"],
responses={404: {"description": "Not found"}},
)
def get_session(session_id: str, update_ts: bool = False, image: bool = False):
instance: MONAILabelApp = app_instance()
sessions: Sessions = instance.sessions()
if sessions is None:
logger.error("Session Feature is Not Enabled")
raise HTTPException(status_code=406, detail="Session Feature is Not Enabled")
session_info = sessions.get_session(session_id, update_ts=update_ts)
if session_info:
if image:
return FileResponse(
session_info.image,
media_type=get_mime_type(session_info.image),
filename=get_basename(session_info.image),
)
return session_info.to_json()
raise HTTPException(status_code=404, detail=f"Session ({session_id}) Not Found")
def create_session(
background_tasks: BackgroundTasks,
uncompress: bool = False,
expiry: int = 0,
files: List[UploadFile] = File(...),
):
instance: MONAILabelApp = app_instance()
sessions: Sessions = instance.sessions()
if sessions is None:
logger.error("Session Feature is Not Enabled")
raise HTTPException(status_code=406, detail="Session Feature is Not Enabled")
logger.info(f"Uncompress: {uncompress}; Expiry: {expiry}")
logger.info(f"Request Files: {files}")
received_dir = tempfile.NamedTemporaryFile().name
os.makedirs(received_dir, exist_ok=True)
input_image = ""
total_files = 0
for f in files:
basename = get_basename(f.filename) if f.filename else tempfile.NamedTemporaryFile().name
input_image = os.path.join(received_dir, basename)
with open(input_image, "wb") as fb:
shutil.copyfileobj(f.file, fb)
total_files += 1
logger.info(f"{total_files} => {f} => {input_image}")
if total_files > 1:
logger.info(f"Input has multiple files; Saving ALL into: {received_dir}")
input_image = received_dir
session_id, session_info = sessions.add_session(input_image, expiry, uncompress)
background_tasks.add_task(remove_file, received_dir)
if total_files == 0:
raise HTTPException(status_code=404, detail="Image(s) Not Found")
logger.info(f"Session ID: {session_id}; Info: {session_info.to_str()}")
return {"session_id": session_id, "session_info": session_info.to_json()}
def remove_session(session_id: str):
instance: MONAILabelApp = app_instance()
sessions: Sessions = instance.sessions()
if sessions is None:
logger.error("Session Feature is Not Enabled")
raise HTTPException(status_code=406, detail="Session Feature is Not Enabled")
session_info = sessions.get_session(session_id)
if session_info:
sessions.remove_session(session_id)
return session_info.to_json()
raise HTTPException(status_code=404, detail="Session Not Found")
@router.get("/{session_id}", summary="Get Session ID")
async def api_get_session(session_id: str, update_ts: bool = False, image: bool = False):
return get_session(session_id, update_ts, image)
@router.put("/", summary="Create new session with Image")
async def api_create_session(
background_tasks: BackgroundTasks,
uncompress: bool = False,
expiry: int = 0,
files: List[UploadFile] = File(...),
):
return create_session(background_tasks, uncompress, expiry, files)
@router.delete("/{session_id}", summary="Delete Session")
async def api_remove_session(session_id: str):
return remove_session(session_id)
|
from rest_framework import status
from rest_framework.test import APITestCase
from django.urls import reverse
from validate_docbr import CPF
from users.models import User, Address
class AddressTestCase(APITestCase):
def setUp(self):
self.urls = reverse('address-list')
cpf = CPF()
user_cpf = cpf.generate()
self.user = User.objects.create_user(email='[email protected]', password='test123',
first_name='Test', last_name='TestCase', cpf=user_cpf)
Address.objects.create(id=1, owner=self.user, country='Brasil', state='SP', postal_code='18530-000', city='Tietê',
district='Loren ipsum', street='Loren ipsum ipsum', number=3)
def test_create_address_without_authentication(self):
""" Teste que verifica o que acontece se um usuário não autenticado
tentar cadastrar um endereço """
data = {
"country": "Brasil",
"state": "SP",
"postal_code": "18530-000",
"city": "Tietê",
"district": "Loren ipsum",
"street": "Loren ipsum ipsum",
"number": 3
}
response = self.client.post(self.urls, data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_address(self):
""" Teste para criar um endereço apartir de um usuário autenticado """
data = {
"country": "Brasil",
"state": "SP",
"postal_code": "18530-000",
"city": "Tietê",
"district": "Loren ipsum",
"street": "Loren ipsum ipsum",
"number": 3
}
self.client.force_authenticate(self.user)
response = self.client.post(self.urls, data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_get_all_addresses_without_authentication(self):
""" Teste que verifica o que acontece se um usuário não autenticado
tentar visualizar todos os endereços """
response = self.client.get(self.urls)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_all_addresses(self):
""" Teste para visualizar todos os endereços de um usuário autenticado """
self.client.force_authenticate(self.user)
response = self.client.get(self.urls)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_user_address(self):
""" Teste para atualizar um endereço de um usuário """
data = {
"country": "Brasil",
"state": "RJ",
"postal_code": "18530-000",
"city": "Tietê",
"district": "Loren ipsum",
"street": "Loren ipsum ipsum",
"number": 3
}
self.client.force_authenticate(self.user)
response = self.client.put('/api/users/address/1/', data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_one_address(self):
""" Teste para deletar um endereço de um determinado usuário """
self.client.force_authenticate(self.user)
response = self.client.delete('/api/users/address/1/')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
|
# -*- coding: utf-8 -*-
"""
/dms/usermanagementorg/views_group_delete_user.py
.. loescht User mit dem entsprechenden User-Namen
Django content Management System
Hans Rauch
[email protected]
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 21.04.2008 Beginn der Arbeit
0.02 24.04.2008 Liste der geloeschten User anzeigen
0.03 30.04.2008 delete_user
"""
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django import newforms as forms
from django.utils.safestring import mark_safe
from django.template.loader import get_template
from django.template import Context
from django.utils.translation import ugettext as _
from dms.queries import get_site_url
from dms.queries import delete_user_by_username
from dms.queries import get_userfolder_org_id
from dms.queries import delete_group_by_id
from dms.queries import delete_users_in_group
from dms.queries import get_users_by_org_id
from dms.queries import get_users_by_group_id
from dms.queries import get_group_by_id
from dms.queries import get_user_by_id
from dms.queries import delete_user
from dms.roles import require_permission
from dms.utils import get_tabbed_form
from dms.utils import clean_data
from dms.utils_form import get_item_vars_add
from dms.usermanagementorg.utils import get_groups
from dms.usermanagementorg.help_form import help_form
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
@require_permission('perm_manage_user_new')
def usermanagementorg_group_delete_user(request, item_container):
""" loescht User einer Gruppe """
org_id = get_userfolder_org_id(item_container)
def get_group_list(org_id):
""" liefert die vorhandenen Gruppen der Organisation org_id """
tList = get_template('app/usermanagementorg/group_list.html')
groups = get_groups(org_id, True)
return tList.render(Context({ 'groups': groups, }))
def get_group_names(org_id):
""" liefert die vorhandenen Gruppen der Organisation org_id """
groups = get_groups(org_id)
ret = []
ret.append((-1, _(u'Alle Mitglieder')))
for group in groups:
ret.append((group['id'], group['description']))
return ret
group_items = get_group_list(org_id)
class DmsItemForm(forms.Form):
""" Elemente des Eingabeformulars """
group_names = forms.ChoiceField(choices=get_group_names(org_id), widget=forms.RadioSelect() )
app_name = 'usermanagementorg'
my_title = _(u'Mitglieder löschen')
show_errors = ( request.method == 'POST' )
if show_errors:
data = request.POST.copy()
else :
data = {}
# --- Formular mit Daten fuellen und zur Anzeige vorbereiten
f = DmsItemForm(data)
tabs = [ ('tab_group_user_delete', [ 'group_names', ]) ]
if request.method == 'POST':
# --- User zum Loeschen anbieten
if request.POST.has_key('group_names'):
this_group = int(request.POST['group_names'])
if this_group == -1:
group_name = _(u'alle Mitglieder')
group_users = get_users_by_org_id(org_id)
else:
group_name = get_group_by_id(this_group).description
group_users = get_users_by_group_id(this_group)
users = []
for u in group_users:
users.append( {'name': u.user.get_standard_name(), 'username': u.user.username,
'email': u.user.email, 'id': u.user.id} )
tUsers = get_template('app/usermanagementorg/base_delete.html')
context = Context({ 'users': users, 'title': my_title,
'user_count': len(users), 'group_name': group_name,
'next': './user_management.html?op=member_delete_user_by_group' })
content = tUsers.render(context)
vars = get_item_vars_add(request, item_container, app_name, my_title, content, show_errors)
vars['submit'] = ''
return render_to_response('app/base_edit.html', vars)
else:
data = clean_data(f.data)
users = []
if data.has_key('delete_id'):
ids = data['delete_id']
for id in ids:
user = get_user_by_id(id)
users.append({'name': user.get_standard_name(),})
delete_user(id)
tUsers = get_template('app/usermanagementorg/deleted_users.html')
context = Context({ 'users': users,
'next': './user_management.html?op=member_delete_user_by_group' })
content = tUsers.render(context)
vars = get_item_vars_add(request, item_container, app_name, my_title, content, show_errors)
vars['submit'] = ''
return render_to_response('app/base_edit.html', vars)
return HttpResponseRedirect(get_site_url(item_container, 'user_management.html?op=member_delete_user_by_group'))
else:
content = get_tabbed_form(tabs, help_form, app_name, f)
vars = get_item_vars_add(request, item_container, app_name, my_title, content, show_errors)
vars['next'] = './user_management.html?op=member_delete_user_by_group'
return render_to_response('app/base_edit.html', vars)
|
from io import BytesIO
import streamlit as st
from arc.arc import Index
from arc.viz import plot
from matplotlib import pyplot
from matplotlib.figure import Figure
def cached_plot(plot_idx: Index, attribute: str | None = None) -> BytesIO:
full_idx = (plot_idx, attribute)
_arc = st.session_state.arc
plot_cache = st.session_state.plot_cache
if full_idx in plot_cache:
return plot_cache[full_idx]
image_buffer = BytesIO()
if attribute is not None:
fig: Figure = plot(getattr(_arc[plot_idx], attribute))
else:
fig: Figure = plot(_arc[plot_idx])
fig.savefig(image_buffer, format="png")
pyplot.close(fig)
plot_cache[full_idx] = image_buffer
return image_buffer
|
import numpy as np
from scipy import optimize, stats
def distance_A(A, D):
"""Takes distance matrix D and material attribute/category
matrix A and finds the sum of the pairwise distances of
each entry compared to D. This function should be used
in an optimizer to generate an optimal A matrix.
Implements first term of Equation 4
Parameters:
D: (k x k) matrix
The D matrix generated using the d-cnn.
A: (k x m) matrix
The A matrix generated in the process of
executing create_A. This is not necessarily
the optimal version of A we are trying to seek.
Returns:
dist: float
The sum of L2-norm distances between the rows of
the A matrix and the D matrix.
"""
k = D.shape[0]
dist = 0.0
for m in range(k):
for n in range(k):
l2_A = np.linalg.norm(A[m] - A[n])
dist += (l2_A - D[m][n])**2
return dist
# TODO Need to look over this and rework the function to better fit the definition.
def kld_A(A, beta_a, beta_b):
"""Takes optimized material attribute/category matrix A and
finds the KL divergence between the A matrix distribution
and a beta distribution.
Implements second term (gamma-weighted) Equation 4,
and term for Equation 5
Parameters:
A: (k x m) matrix
The optimal A matrix generated in the process of
executing create_A.
beta_a: float
The beta distribution parameter a in (4)
beta_b: float
The beta distribution parameter b in (4)
Returns:
kld: float
The KL-divergence between the Gaussian KDE of A
and the beta distribution with a, b parameters.
"""
# Use a grid of points to approximate the Beta distribution.
# Points in range [0, 1] because that is the Beta distribution's range.
# Start at 0.02, end at 0.98 to avoid infinite values
points = np.mgrid[0.02: 0.98: 0.04]
# points = np.vstack(np.meshgrid(x, x, x)).reshape(3, -1).T
akde = stats.gaussian_kde(A) # Gaussian kernel density estimate for A -> Eq. 5
beta = stats.beta(beta_a, beta_b) # Beta distribution to compare akde to
beta_pts = [p for p in points] # Beta distribution is one-dimensional
beta_pts = [beta.pdf(p) for p in beta_pts]
akde_pts = [akde(p) for p in points]
akde_pts = np.squeeze(akde_pts)
kld = stats.entropy(beta_pts, akde_pts) # KL-divergence -> Eq. 4 term 2
return kld
def min_A(A, D, w_kld, beta_a, beta_b):
"""Uses distance_A and kld_A to implement the
minimization objetive introduced in equation 4
Parameters:
D: (k x k) matrix
The D matrix generated using the d-cnn.
w_kld: float
The hyperparameter / weight gamma in (4)
beta_a: float
The beta distribution parameter a in (4)
beta_b: float
The beta distribution parameter b in (4)
Returns:
dist: float
The distance to be minimized in the minimization
objective.
"""
return distance_A(A,D) + w_kld * kld_A(A, beta_a, beta_b)
def create_A(D, m, w_kld = 1e-1, beta_a = 0.5, beta_b = 0.5):
"""Takes a material distance matrix D and runs it
through the L-BFGS algorithm to create an optimal
A matrix.
Implements Equation 4
Parameters:
D: (k x k) matrix
The D matrix generated using the d-cnn.
m: int
The number of material attribute categories desired
w_kld: float
The hyperparameter / weight gamma in (4)
beta_a: float
The beta distribution parameter a in (4)
Default = 0.5 b/c it exhibits suitable distribution for
widely fitting or widely not fitting material category
beta_b: float
The beta distribution parameter b in (4)
Default = 0.5 b/c it exhibits suitable distribution for
widely fitting or widely not fitting material category
Returns:
A: (k x m) matrix
The **optimized** material attribute/category matrix
to be utilized by the MMAC-CNN.
"""
k = D.shape[0]
# Unoptimized A function constrained to the range [0, 1]
A = np.random.random((k, m))
print(f'\n--- Before optim: ---')
print(f'D matrix :\n{D}')
print(f'\nA matrix :\n{A}')
print(f'\nd(D; A) : {distance_A(A, D)}')
# NOTE Sometimes the KL divergence blows up to inf and
# the resulting A matrix is useless. Especially with less M classes.
# If this happens, try to run the optimization code again.
#
# Is there an issue with the A matrix random initialization?
result = optimize.minimize(min_A, A, args=(D, w_kld, beta_a, beta_b),
method = 'L-BFGS-B', callback = None)
dist = result.fun
A = result.x
A = np.reshape(A, (k, m))
# Normalize A matrix values to the range [0, 1] after optimization
A = (A - np.min(A))/np.ptp(A)
print(f'\n--- After optim: ---')
print(f'A matrix :\n{A}')
print(f'\nd(D; A) : {dist}')
return A
|
# Commonly used java library dependencies
def java_base_repositories():
native.maven_jar(
name = "jcip_annotations",
artifact = "net.jcip:jcip-annotations:1.0",
sha1 = "afba4942caaeaf46aab0b976afd57cc7c181467e",
)
native.maven_jar(
name = "com_google_code_gson",
artifact = "com.google.code.gson:gson:2.8.0",
sha1 = "c4ba5371a29ac9b2ad6129b1d39ea38750043eff",
)
native.maven_jar(
name = "com_google_j2objc_j2objc_annotations",
artifact = "com.google.j2objc:j2objc-annotations:1.3",
sha1 = "ba035118bc8bac37d7eff77700720999acd9986d",
)
native.maven_jar(
name = "org_codehaus_mojo_animal_sniffer_annotations",
artifact = "org.codehaus.mojo:animal-sniffer-annotations:1.15",
sha1 = "3f19b51588ad71482ce4c169f54f697b6181d1b4",
)
# Guava v21 adds Java 8 features that aren't supported on Android. As of v21
# there is no way to support Android, so until that happens we stick to v20.
native.new_git_repository(
name = "com_google_guava",
tag = "v20.0",
remote = "https://github.com/google/guava.git",
build_file = str(Label("//third_party:guava.BUILD")),
)
# grpc uses annotations provided by this library
native.git_repository(
name = "error_prone",
tag = "v2.0.18",
remote = "https://github.com/google/error-prone.git",
)
native.maven_jar(
name = "com_google_code_findbugs_jsr305",
artifact = "com.google.code.findbugs:jsr305:3.0.1",
sha1 = "f7be08ec23c21485b9b5a1cf1654c2ec8c58168d",
)
native.maven_jar(
name = "org_slf4j_slf4j_api",
artifact = "org.slf4j:slf4j-api:1.7.7",
sha1 = "2b8019b6249bb05d81d3a3094e468753e2b21311"
)
native.maven_jar(
name = "org_slf4j_slf4j_simple",
artifact = "org.slf4j:slf4j-simple:1.7.7",
sha1 = "8095d0b9f7e0a9cd79a663c740e0f8fb31d0e2c8"
)
|
import os
import shutil
from contextlib import contextmanager
from pathlib import Path
from typing import Callable, Sequence, Optional
def make_func_that_creates_cwd_and_out_root_before_running(
out_root: Path, func: Callable[[Path], None]
):
"""
When switching branches, the CWD or target out_root may no longer exist.
Pass a function to this function to create a function that
creates those directories as needed before running the logic
"""
cwd = Path(os.getcwd())
absolute_out_root = out_root.absolute()
def make_dirs_add_run_func(path: Path):
# Need to ensure that both cwd and out root exist on the template branch
for p in [cwd, absolute_out_root]:
if not p.exists():
p.mkdir(parents=True)
# If cwd was deleted when switching branches, need to navigate back there
# or os.getcwd will throw a FileNotExistsError (which also breaks path.absolute())
os.chdir(cwd)
func(path)
return make_dirs_add_run_func
def make_all_dirs(paths: Sequence[Path]):
for path in paths:
absolute_path = path.resolve()
if not absolute_path.exists():
absolute_path.mkdir(parents=True)
def copy_flexlate_configs(src: Path, dst: Path, root: Path):
for path in src.absolute().iterdir():
if path.name in ("flexlate.json", "flexlate-project.json"):
shutil.copy(path, dst)
elif path.name == ".git":
continue
elif path.is_dir():
dst_dir = dst / path.name
if not dst_dir.exists():
dst_dir.mkdir()
copy_flexlate_configs(path, dst_dir, root)
def location_relative_to_new_parent(
path: Path,
orig_parent: Path,
new_parent: Path,
path_is_relative_to: Optional[Path] = None,
) -> Path:
if path_is_relative_to is None and not path.is_absolute():
raise ValueError(
f"must pass path_is_relative_to when passing relative path {path}"
)
abs_path: Path = path
if not path.is_absolute() and path_is_relative_to is not None:
abs_path = path_is_relative_to.absolute() / path
try:
result = new_parent / abs_path.relative_to(orig_parent)
return result
except ValueError as e:
# python >= 3.9: is not in the subpath of
# python <= 3.8: does not start with
if "is not in the subpath of" in str(e) or "does not start with" in str(e):
# Path is not in project, must be user path, return as is
return path
else:
raise e
@contextmanager
def change_directory_to(path: Path):
current_path = os.getcwd()
os.chdir(path)
yield
os.chdir(current_path)
def make_absolute_path_from_possibly_relative_to_another_path(
path: Path, possibly_relative_to: Path
) -> Path:
if path.is_absolute():
return path
else:
return (possibly_relative_to / path).resolve()
|
import os
import random
import sys
def usage():
print "{0} <height> [width [range [seed]]]".format(sys.argv[0])
sys.exit(0)
def generate(height, width, range):
map = [[random.randint(-range, range)
for _ in xrange(width)] for _ in xrange(height)]
total = sum([sum(row) for row in map])
avg = total // (height*width)
total -= avg * (height*width)
map = [[v - avg for v in row] for row in map]
for _ in xrange(total):
map[random.randint(0, height-1)][random.randint(0, width-1)] -= 1
assert(sum([sum(row) for row in map]) == 0)
return map
def dump(map):
print len(map), len(map[0])
for row in map:
print ' '.join([str(v) for v in row])
def int_arg(n, default=None):
try:
return int(sys.argv[n])
except:
return default
def main():
height = int_arg(1)
width = int_arg(2, height)
if not width:
usage()
range = int_arg(3, 100)
seed = int_arg(4)
random.seed(seed)
map = generate(height, width, range)
dump(map)
if __name__ == "__main__":
main()
|
import json
class Construct_Lacima_MQTT_Devices(object):
def __init__(self,bc,cd):
self.bc = bc
self.cd = cd
bc.add_header_node("MQTT_DEVICES")
cd.construct_package("MQTT_DEVICES_DATA")
cd.add_redis_stream("MQTT_INPUT_QUEUE",50000)
cd.add_redis_stream("MQTT_SENSOR_QUEUE",10000)
cd.add_redis_stream("MQTT_PAST_ACTION_QUEUE",300)
cd.add_hash("MQTT_SENSOR_STATUS")
cd.add_hash("MQTT_DEVICES")
cd.add_hash("MQTT_SUBSCRIPTIONS")
cd.add_hash("MQTT_CONTACT_LOG")
cd.add_hash("MQTT_UNKNOWN_DEVICES")
cd.add_hash("MQTT_UNKNOWN_SUBSCRIPTIONS")
cd.add_hash("MQTT_REBOOT_LOG")
cd.add_job_queue("MQTT_PUBLISH_QUEUE",depth= 50,forward = False)
cd.close_package_contruction()
properties = {}
properties["HOST"] = "farm_control.fios-router.home"
properties["PORT"] = 8883
properties["BASE_TOPIC"] = "/REMOTES"
self.bc.add_info_node( "MQTT_SERVER","MQTT_SERVER",properties=properties )
#self.add_security_monitor("GARAGE_MONITOR_1")
#self.add_current_monitor("CURRENT_MONITOR_1")
self.add_well_monitor("WELL_MONITOR_1")
self.irrigation_hash_fields()
self.add_minute_average_fields()
bc.end_header_node("MQTT_DEVICES")
def add_minute_average_fields(self):
properties = {}
properties["data"] = {}
properties["data"]["WELL_PRESSURE"] = ["WELL_MONITOR_1",'INPUT/AD1/VALUE/RESPONSE', "WELL_PRESSURE" ]
#properties["data"]["EQUIPMENT_CURRENT"] =["CURRENT_MONITOR_1",'INPUT/AD1/VALUE/RESPONSE',"EQUIPMENT_CURRENT"]
#properties["data"]["IRRIGATION_CURRENT"] = [ "CURRENT_MONITOR_1", 'INPUT/AD1/VALUE/RESPONSE', "IRRIGATION_CURRENT"]
properties["data"]["MAIN_FLOW_METER"] = ["WELL_MONITOR_1", 'INPUT/PULSE_COUNT/VALUE', "MAIN_FLOW_METER" ]
properties["data"]["CLEANING_FLOW_METER"] = ["WELL_MONITOR_1",'INPUT/PULSE_COUNT/VALUE' , "CLEANING_OUTLET" ]
properties["data"]["INPUT_PUMP_CURRENT"] = ["WELL_MONITOR_1",'INPUT/AD1/VALUE/RESPONSE' ,"INPUT_PUMP_CURRENT" ]
properties["data"]["OUTPUT_PUMP_CURRENT"] = ["WELL_MONITOR_1", 'INPUT/AD1/VALUE/RESPONSE', "OUTPUT_PUMP_CURRENT" ]
self.bc.add_info_node( "SENSOR_MINUTE_FIELDS","SENSOR_MINUTE_FIELDS",properties=properties )
def irrigation_hash_fields(self):
properties = {}
properties["data"] = {}
properties["data"]["WELL_PRESSURE"] = ["WELL_MONITOR_1",'INPUT/AD1/VALUE/RESPONSE', "WELL_PRESSURE" ]
#properties["data"]["EQUIPMENT_CURRENT"] =["CURRENT_MONITOR_1" ,'INPUT/AD1/VALUE/RESPONSE',"EQUIPMENT_CURRENT"]
#properties["data"]["IRRIGATION_CURRENT"] = [ "CURRENT_MONITOR_1", 'INPUT/AD1/VALUE/RESPONSE', "IRRIGATION_CURRENT"]
properties["data"]["MAIN_FLOW_METER"] = ["WELL_MONITOR_1", 'INPUT/PULSE_COUNT/VALUE', "MAIN_FLOW_METER" ]
properties["data"]["CLEANING_FLOW_METER"] = ["WELL_MONITOR_1",'INPUT/PULSE_COUNT/VALUE' , "CLEANING_OUTLET" ]
properties["data"]["INPUT_PUMP_CURRENT"] = ["WELL_MONITOR_1",'INPUT/AD1/VALUE/RESPONSE' ,"INPUT_PUMP_CURRENT" ]
properties["data"]["OUTPUT_PUMP_CURRENT"] = ["WELL_MONITOR_1", 'INPUT/AD1/VALUE/RESPONSE', "OUTPUT_PUMP_CURRENT" ]
properties["data"]["SLAVE_RELAY_STATE"] = ["CURRENT_MONITOR_1" ,"OUTPUT/MQTT_CURRENT/RELAY_STATE/RESPONSE",None]
properties["data"]["SLAVE_MAX_CURRENT"] = ["CURRENT_MONITOR_1" ,"INPUT/MQTT_CURRENT/MAX_CURRENTS/RESPONSE",None]
properties["data"]["INSTANT_CURRENT"] = ["CURRENT_MONITOR_1" ,"INPUT/MQTT_CURRENT/CURRENTS/RESPONSE",None]
self.bc.add_info_node( "IRRIGATION_HASH_FIELDS","IRRIGATION_HASH_FIELDS",properties=properties )
def add_well_monitor(self,mqtt_tag):
properties = {}
properties["type"] = "WELL_MONITOR"
properties["HEART_BEAT"] = "HEART_BEAT"
properties["HEART_BEAT_TIME_OUT"] = 120
properties["REBOOT_FLAG"] = True
properties["REBOOT_KEY"] = "REBOOT"
properties["topic"] = mqtt_tag
properties["null_commands"] = {}
properties["subscriptions"] = {}
properties["subscriptions"]["REBOOT"] = True
properties["subscriptions"]["HEART_BEAT"] = True
properties["subscriptions"]['INPUT/AD1/VALUE/RESPONSE'] = {"type":"analog_input","main_field":'MEASUREMENTS' ,"fields":
[ { "name":"WELL_PRESSURE","type":"pressure_gauge","reduction":2,"range":100,"channel_field":'CHANNEL',"channel_value":0},
{ "name":"INPUT_PUMP_CURRENT","type":"rms_current_transformer","range":50,"channel_field":'CHANNEL',"channel_value":6,"resistor":150},
{ "name":"OUTPUT_PUMP_CURRENT","type":"rms_current_transformer","range":20,"channel_field":'CHANNEL',"channel_value":7,"resistor":150} ]}
properties["subscriptions"]['INPUT/PULSE_COUNT/VALUE'] = {"type":"pulse_flow","main_field":"DATA", "fields": [
{"name":"MAIN_FLOW_METER", "GPIO_PIN":5,"data_field":"COUNTS","conversion":4./2./60./2.0 },
{"name":"CLEANING_OUTLET", "GPIO_PIN":18,"data_field":"COUNTS","conversion":4./300./3.78541 }
]}
self.bc.add_info_node( "MQTT_DEVICE",mqtt_tag,properties=properties )
'''
def add_current_monitor(self,mqtt_tag):
properties = {}
properties["type"] = "CURRENT_MONITOR"
properties["topic"] = mqtt_tag
properties["HEART_BEAT"] = "HEART_BEAT"
properties["HEART_BEAT_TIME_OUT"] = 120
properties["REBOOT_FLAG"] = True
properties["REBOOT_KEY"] = "REBOOT"
properties["null_commands"] = {}
properties["null_commands"]["INPUT/MQTT_CURRENT/GET_LIMIT_CURRENTS"] = True
properties["null_commands"]["INPUT/MQTT_CURRENT/GET_MAX_CURRENTS"] = True
properties["null_commands"]["INPUT/MQTT_CURRENT/READ_CURRENT"] = True
properties["null_commands"]["OUTPUT/MQTT_CURRENT/READ_RELAY_STATES"] = True
properties["null_commands"]["OUTPUT/MQTT_CURRENT/CLEAR_MAX_CURRENTS"] = True
properties["null_commands"]["OUTPUT/MQTT_CURRENT/ENABLE_EQUIPMENT_RELAY"] = True
properties["null_commands"]["OUTPUT/MQTT_CURRENT/ENABLE_IRRIGATION_RELAY"] = True
properties["null_commands"]["OUTPUT/MQTT_CURRENT/DISABLE_EQUIPMENT_RELAY"] = True
properties["null_commands"]["OUTPUT/MQTT_CURRENT/DISABLE_IRRIGATION_RELAY"] = True
properties["subscriptions"] = {}
properties["subscriptions"]["REBOOT"] = True
properties["subscriptions"]["HEART_BEAT"] = True
properties["subscriptions"]['INPUT/AD1/VALUE/RESPONSE'] = {"type":"analog_input","main_field":'MEASUREMENTS' ,"fields":[
{"name":"EQUIPMENT_CURRENT","type":"analog","channel_field":'CHANNEL',"channel_value":0},
{"name":"IRRIGATION_CURRENT","type":"analog","channel_field":'CHANNEL',"channel_value":3}]}
properties["subscriptions"]["OUTPUT/MQTT_CURRENT/EQUIPMENT_RELAY_TRIP/RESPONSE"] = True
properties["subscriptions"]["OUTPUT/MQTT_CURRENT/IRRIGATION_RELAY_TRIP/RESPONSE"] = True
properties["subscriptions"]["INPUT/MQTT_CURRENT/GET_LIMIT_CURRENTS/REPONSE"] = True
properties["subscriptions"]["INPUT/MQTT_CURRENT/MAX_CURRENTS/RESPONSE"] = {"type":"flat","main_field":None ,"fields":[
{"name":'MAX_EQUIPMENT_CURRENT',"field":'MAX_EQUIPMENT_CURRENT'},
{"name":'MAX_IRRIGATION_CURRENT',"field":'MAX_IRRIGATION_CURRENT'}]}
properties["subscriptions"]["INPUT/MQTT_CURRENT/CURRENTS/RESPONSE"] = {"type":"flat","main_field":None ,"fields":[
{"name":'EQUIPMENT_CURRENT',"field":'EQUIPMENT_CURRENT'},
{"name":'IRRIGATION_CURRENT',"field":'IRRIGATION_CURRENT'}]}
properties["subscriptions"]["OUTPUT/MQTT_CURRENT/RELAY_STATE/RESPONSE"] = {"type":"flat","main_field":None ,"fields":[
{"name":'EQUIPMENT_STATE',"field":'EQUIPMENT_STATE'},
{"name":'IRRIGATION_STATE',"field":'IRRIGATION_STATE'}]}
self.bc.add_info_node( "MQTT_DEVICE",mqtt_tag,properties=properties )
def add_security_monitor(self,mqtt_tag):
properties = {}
properties["REBOOT_FLAG"] = True
properties["REBOOT_KEY"] = "REBOOT"
properties["type"] = "SECURITY_MONITOR"
properties["HEART_BEAT"] = "HEART_BEAT"
properties["HEART_BEAT_TIME_OUT"] = 120
properties["topic"] = mqtt_tag
properties["null_commands"] = {}
properties["subscriptions"] = {}
properties["subscriptions"]["REBOOT"] = True
properties["subscriptions"]["HEART_BEAT"] = True
self.bc.add_info_node( "MQTT_DEVICE",mqtt_tag,properties=properties )
''' |
import numpy as np
import funcs
def normalize_and_make_homogeneous(x_unnormalized):
"""Modify x_unnormalized to normalize the vector according to standard DLT methods and make homogeneous. Normalization is used to stabilize calculation of DLT
x_unnormalized: 3 or 2 dimensional input data to be normalized
"""
mean, std_dev = np.mean(x_unnormalized, 0), np.std(x_unnormalized)
if x_unnormalized.shape[1] == 2:
transform = np.array([[std_dev, 0, mean[0]], [0, std_dev, mean[1]], [0, 0, 1]])
elif x_unnormalized.shape[1] == 3:
transform = np.array([[std_dev, 0, 0, mean[0]], [0, std_dev, 0, mean[1]], [0, 0, std_dev, mean[2]], [0, 0, 0, 1]])
else:
print("Please use number of dimensions equal to 2 or 3.")
assert False
transform = np.linalg.inv(transform)
x_unnormalized = np.dot(transform, np.concatenate((x_unnormalized.T, np.ones((1,x_unnormalized.shape[0])))))
x_unnormalized = x_unnormalized[0:x_unnormalized.shape[1], :].T
return transform, x_unnormalized
def calculate_calibration_matrix(points3D, points2D):
"""Use DLT to calculate the 11 params of the calibration matrix. Calibration matrix transforms from 3D to 2D."""
transform3D, points3D_norm = normalize_and_make_homogeneous(points3D)
transform2D, points2D_norm = normalize_and_make_homogeneous(points2D)
matrix = []
for i in range(points3D.shape[0]):
X, Y, Z = points3D_norm[i,0], points3D_norm[i,1], points3D_norm[i,2]
x, y = points2D_norm[i, 0], points2D_norm[i, 1]
matrix.append([-X, -Y, -Z, -1, 0, 0, 0, 0, x*X, x*Y, x*Z, x])
matrix.append([0,0,0,0,-X,-Y,-Z,-1,y*X,y*Y,y*Z, y])
matrix = np.array(matrix)
_,_,V = np.linalg.svd(matrix)
calibration_matrix = np.reshape(V[-1,:] / V[-1,-1], (3,4))
#Invert normalization with transform matrices
calibration_matrix = np.dot(np.linalg.pinv(transform2D), np.dot(calibration_matrix, transform3D))
calibration_matrix = calibration_matrix / calibration_matrix[-1,-1]
return calibration_matrix
def get_calibration_points(old_points, old_points3D, new_image, dist):
"""Match feature points for the new image to the point cloud."""
_, mask_a, points2D, mask_b = funcs.find_matches(old_points[0], old_points[1], new_image[0], new_image[1], dist)
points3D = old_points3D[mask_a]
return points3D, points2D
def perform_dlt(old_points, old_points3D, new_image, dist=0.7):
"""Perform dlt on the new image to get camera matrix.
old_points: 2D points in pointcloud from image 1 or 2. list with old_points[0] being 2d points and old_points[1] being description vectors for the points
old_points3D: same points as old_points but with depth. old_points3D are in the same order as old_points
new_image: 2D points from image 3. list with new_image[0] being 2d points and new_image[1] being description vectors for the points
"""
old_points3D = np.array(old_points3D)
points3D, points2D = get_calibration_points(old_points, old_points3D, new_image, dist)
return calculate_calibration_matrix(points3D, points2D)
|
import torch
import ipdb
import random
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
import pytorch_lightning as pl
from sklearn.metrics import f1_score as compute_f1_score
MLP_HIDDEN_SIZE1 = 256
MLP_HIDDEN_SIZE2 = 256
RNN_HIDDEN_SIZE = 128
class MapPreloadModel(pl.LightningModule):
def __init__(self,
vocab_size,
max_seq_len,
mlp_hidden_size1=MLP_HIDDEN_SIZE1,
mlp_hidden_size2=MLP_HIDDEN_SIZE2,
rnn_hidden_size=RNN_HIDDEN_SIZE,
padding_idx=None,
transformer_input_size=768,
use_extra_feature=True,
rnn_num_layer=1,
lr=1e-3):
super().__init__()
if transformer_input_size is not None:
# Add projection layer
self.input_projection = nn.Linear(transformer_input_size, 768)
else:
self.input_projection = None
self.rnn = MapPreloadRnn(vocab_size, rnn_hidden_size, rnn_hidden_size, padding_idx,
num_layers=rnn_num_layer)
rnn_output_dim = 2 * rnn_hidden_size * max_seq_len
self.rnn_projection = nn.Linear(rnn_output_dim, 768)
if use_extra_feature:
mlp_input_size = self.rnn_projection.out_features + self.input_projection.out_features # bi directional
else:
mlp_input_size = self.rnn_projection.out_features
self.use_extra_feature = use_extra_feature
self.mlp = MapPreloadMLP(vocab_size, mlp_input_size, mlp_hidden_size1, mlp_hidden_size2)
self.loss_func = nn.CrossEntropyLoss()
self.lr = lr
print(self)
def forward(self, rnn_x=None, transformer_embedding=None):
rnn_x, x_lens = rnn_x
rnn_embedding = self.rnn_projection(self.rnn(rnn_x, x_lens))
if self.use_extra_feature:
embedding = torch.cat([rnn_embedding, self.input_projection(transformer_embedding)], dim=1)
else:
embedding = rnn_embedding
output = self.mlp(embedding)
return output
def forward_loss(self, batch, loss_msg):
rnn_x, transformer_embedding, y = batch
output = self(rnn_x=rnn_x, transformer_embedding=transformer_embedding)
loss = self.loss_func(output, y.flatten())
self.log(loss_msg, loss, prog_bar=True)
return loss, output
def training_step(self, batch, batch_idx):
loss, _ = self.forward_loss(batch, 'train_loss')
return loss
def validation_step(self, batch, batch_idx):
loss, _ = self.forward_loss(batch, 'val_loss')
return loss
def test_step(self, batch, batch_idx, label1_thresh=0.5):
loss, output = self.forward_loss(batch, 'test_loss')
_, _, y = batch
# compute the f1-score
predict_labels = torch.argmax(F.softmax(output, dim=1), dim=1)
# compute labels
true_labels = y.flatten().detach().cpu().tolist()
predict_labels = predict_labels.flatten().detach().cpu().tolist()
self.log('test_loss', loss, prog_bar=True)
return loss, (true_labels, predict_labels)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return {
'optimizer': optimizer,
'lr_scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
patience=5,
min_lr=1e-7,
verbose=True),
'monitor': 'train_loss'
}
class MapPreloadMLP(nn.Module):
def __init__(self,
vocab_size,
input_size,
hidden_size1,
hidden_size2
):
super().__init__()
self.output_layer = nn.Sequential(
nn.Linear(input_size, hidden_size1),
# nn.BatchNorm1d(hidden_size1),
nn.ReLU(),
nn.Dropout(),
nn.Linear(hidden_size1, hidden_size2),
# nn.BatchNorm1d(hidden_size2),
nn.ReLU(),
nn.Dropout(),
nn.Linear(hidden_size2, vocab_size)
)
def forward(self, x):
return self.output_layer(x)
class MapPreloadRnn(nn.Module):
def __init__(self,
vocab_size,
input_size,
hidden_size,
padding_idx,
num_layers=1,
):
super().__init__()
self.embeds = nn.Embedding(vocab_size, input_size, padding_idx=padding_idx)
self.hidden_size = hidden_size
self.rnn = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
def forward(self, x, x_lens):
x = self.embeds(x)
packed_x = pack_padded_sequence(x, batch_first=True, lengths=x_lens)
packed_output, _ = self.rnn(packed_x)
unpacked_output, output_lens = pad_packed_sequence(packed_output,
batch_first=True) # batch_size x max_seq_len x dim
unpacked_output = unpacked_output.reshape(unpacked_output.shape[0], -1)
outputs = unpacked_output.reshape(unpacked_output.shape[0], -1)
return outputs
def main():
# Use toy data to check model
import sys
sys.path.append('..')
from game_toy_datasets import ToyDataset1, collate_fn_variable_len, MapPreloadDataset1
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
# Dataset Config
data_size = 1000
vocab_size = 5
max_seq_len = 64
padding_index = 0
transformer_input_size = 768
use_transformer_feature = False
dataset = MapPreloadDataset1(data_size, vocab_size, max_seq_len, padding_index,
transformer_input_size,
use_transformer_feature)
train, val = random_split(dataset, [int(0.9 * len(dataset)), int(0.1 * len(dataset))])
# Train config
epoch = 20
batch_size = 64
trainer = pl.Trainer(max_epochs=epoch,
deterministic=True,
progress_bar_refresh_rate=1,
num_sanity_val_steps=2,
log_every_n_steps=50,
reload_dataloaders_every_epoch=False)
# Init model
hidden_size = 64
mlp_hidden_size1 = hidden_size
mlp_hidden_size2 = hidden_size
rnn_hidden_size = hidden_size
rnn_num_layer = 2
model = MapPreloadModel(
vocab_size,
max_seq_len,
mlp_hidden_size1,
mlp_hidden_size2,
rnn_hidden_size=rnn_hidden_size,
padding_idx=padding_index,
use_base_features=True,
use_pretrain_features=use_transformer_feature,
transformer_input_size=transformer_input_size,
rnn_num_layer=rnn_num_layer)
# Train
trainer.fit(model,
DataLoader(train, batch_size=batch_size, shuffle=True, collate_fn=collate_fn_variable_len),
DataLoader(val, batch_size=batch_size, collate_fn=collate_fn_variable_len))
# Test
trainer.test(model, DataLoader(val, batch_size=batch_size, collate_fn=collate_fn_variable_len))
if __name__ == '__main__':
main()
|
from urllib.parse import urlparse, urlunparse
from traverse_page import get_soup
class GoogleSearch:
def __init__(self, phrase):
self.phrase = phrase
self.phrase = self.phrase.replace(' ', '+')
self.url = f"https://google.pl/search?q={self.phrase}"
def get_soup(self, url=None, parser='html.parser', user_agent='desktop'):
if url is None:
url = self.url
return get_soup(url=url, parser=parser, user_agent=user_agent)
def get_external_links(self, bs_obj=None):
if bs_obj is None:
bs_obj = self.get_soup()
for link in bs_obj.find_all("div", {"class": "r"}):
link = link.find("a")
if 'href' in link.attrs:
page_url = link.attrs['href']
if 'google' not in page_url:
page_url = urlparse(page_url)
# yield urlunparse((page_url.scheme, page_url.netloc, page_url.path, '', '', ''))
yield urlunparse((page_url.scheme, page_url.netloc, '', '', '', ''))
def get_next_pages(self, bs_obj=None):
if bs_obj is None:
bs_obj = self.get_soup()
pages_dict = dict()
pages_links = bs_obj.find_all("a", {"class": "fl"})
for page_link in pages_links:
try:
key_value = str(page_link['aria-label'])
if key_value not in pages_dict.keys():
pages_dict[key_value] = f"https://google.com{page_link['href']}"
except ValueError:
pass
except KeyError:
pass
return pages_dict
|
# -*- coding: utf-8 -*-
import filecmp
import os.path
import unittest
from pseudol10nutil import POFileUtil, PseudoL10nUtil
import pseudol10nutil.transforms
class TestPOFileUtil(unittest.TestCase):
def setUp(self):
self.pofileutil = POFileUtil()
def test_generate_pseudolocalized_po(self):
input_file = "./testdata/locales/helloworld.pot"
expected_file = "./testdata/locales/eo/LC_MESSAGES/helloworld.po"
basename, ext = os.path.splitext(expected_file)
generated_file = basename + "_generated" + ext
self.pofileutil.pseudolocalizefile(input_file, generated_file)
self.assertTrue(filecmp.cmp(expected_file, generated_file))
os.remove(generated_file)
class TestPseudoL10nUtil(unittest.TestCase):
def setUp(self):
self.util = PseudoL10nUtil()
self.test_data = u"The quick brown fox jumps over the lazy dog"
def test_default(self):
expected = u"⟦Ťȟê ʠüıċǩ ƀȓøẁñ ƒøẋ ǰüɱƥš øṽêȓ ťȟê ĺàźÿ đøğ﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎Ѝ⟧"
self.assertEqual(expected, self.util.pseudolocalize(self.test_data))
def test_empty_string(self):
self.assertEqual(u"", self.util.pseudolocalize(u""))
self.assertEqual(u"", self.util.pseudolocalize(None))
def test_default_fmtspec(self):
test_data_fmtspec = u"The quick brown {0} jumps over the lazy {1}."
expected = u"⟦Ťȟê ʠüıċǩ ƀȓøẁñ {0} ǰüɱƥš øṽêȓ ťȟê ĺàźÿ {1}.﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎Ѝא⟧"
self.assertEqual(expected, self.util.pseudolocalize(test_data_fmtspec))
test_data_fmtspec = u"The quick brown {animal1} jumps over the lazy {animal2}."
expected = u"⟦Ťȟê ʠüıċǩ ƀȓøẁñ {animal1} ǰüɱƥš øṽêȓ ťȟê ĺàźÿ {animal2}.﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘⟧"
self.assertEqual(expected, self.util.pseudolocalize(test_data_fmtspec))
def test_default_printffmtspec(self):
test_data_printffmtspec = u"The quick brown %s jumps over the lazy %s."
expected = u"⟦Ťȟê ʠüıċǩ ƀȓøẁñ %s ǰüɱƥš øṽêȓ ťȟê ĺàźÿ %s.﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎Ѝ⟧"
self.assertEqual(expected, self.util.pseudolocalize(test_data_printffmtspec))
test_data_printffmtspec = u"The quick brown %(animal1)s jumps over the lazy %(animal2)s."
expected = u"⟦Ťȟê ʠüıċǩ ƀȓøẁñ %(animal1)s ǰüɱƥš øṽêȓ ťȟê ĺàźÿ %(animal2)s.﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦⟧"
self.assertEqual(expected, self.util.pseudolocalize(test_data_printffmtspec))
def test_transliterate_diacritic(self):
expected = u"Ťȟê ʠüıċǩ ƀȓøẁñ ƒøẋ ǰüɱƥš øṽêȓ ťȟê ĺàźÿ đøğ"
self.util.transforms = [pseudol10nutil.transforms.transliterate_diacritic]
self.assertEqual(expected, self.util.pseudolocalize(self.test_data))
test_data_fmtspec = u"Source {0} returned 0 rows, source {1} returned 1 row."
expected = u"Șøüȓċê {0} ȓêťüȓñêđ 0 ȓøẁš, šøüȓċê {1} ȓêťüȓñêđ 1 ȓøẁ."
self.assertEqual(expected, self.util.pseudolocalize(test_data_fmtspec))
test_data_printffmtspec = u"Source %(source0)s returned 0 rows, source %(source1)s returned 1 row."
expected = u"Șøüȓċê %(source0)s ȓêťüȓñêđ 0 ȓøẁš, šøüȓċê %(source1)s ȓêťüȓñêđ 1 ȓøẁ."
self.assertEqual(expected, self.util.pseudolocalize(test_data_printffmtspec))
test_data_printffmtspec = u"Source %s returned %d rows."
expected = u"Șøüȓċê %s ȓêťüȓñêđ %d ȓøẁš."
self.assertEqual(expected, self.util.pseudolocalize(test_data_printffmtspec))
def test_transliterate_circled(self):
expected = u"Ⓣⓗⓔ ⓠⓤⓘⓒⓚ ⓑⓡⓞⓦⓝ ⓕⓞⓧ ⓙⓤⓜⓟⓢ ⓞⓥⓔⓡ ⓣⓗⓔ ⓛⓐⓩⓨ ⓓⓞⓖ"
self.util.transforms = [pseudol10nutil.transforms.transliterate_circled]
self.assertEqual(expected, self.util.pseudolocalize(self.test_data))
test_data_fmtspec = u"Source {0} returned 0 rows, source {1} returned 1 row."
expected = u"Ⓢⓞⓤⓡⓒⓔ {0} ⓡⓔⓣⓤⓡⓝⓔⓓ ⓪ ⓡⓞⓦⓢ, ⓢⓞⓤⓡⓒⓔ {1} ⓡⓔⓣⓤⓡⓝⓔⓓ ① ⓡⓞⓦ."
self.assertEqual(expected, self.util.pseudolocalize(test_data_fmtspec))
test_data_printffmtspec = u"Source %(source0)s returned 0 rows, source %(source1)s returned 1 row."
expected = u"Ⓢⓞⓤⓡⓒⓔ %(source0)s ⓡⓔⓣⓤⓡⓝⓔⓓ ⓪ ⓡⓞⓦⓢ, ⓢⓞⓤⓡⓒⓔ %(source1)s ⓡⓔⓣⓤⓡⓝⓔⓓ ① ⓡⓞⓦ."
self.assertEqual(expected, self.util.pseudolocalize(test_data_printffmtspec))
test_data_printffmtspec = u"Source %s returned %d rows."
expected = u"Ⓢⓞⓤⓡⓒⓔ %s ⓡⓔⓣⓤⓡⓝⓔⓓ %d ⓡⓞⓦⓢ."
self.assertEqual(expected, self.util.pseudolocalize(test_data_printffmtspec))
def test_transliterate_fullwidth(self):
expected = u"The quick brown fox jumps over the lazy dog"
self.util.transforms = [pseudol10nutil.transforms.transliterate_fullwidth]
self.assertEqual(expected, self.util.pseudolocalize(self.test_data))
test_data_fmtspec = u"Source {0} returned 0 rows, source {1} returned 1 row."
expected = u"Source {0} returned 0 rows, source {1} returned 1 row."
self.assertEqual(expected, self.util.pseudolocalize(test_data_fmtspec))
test_data_printffmtspec = u"Source %(source0)s returned 0 rows, source %(source1)s returned 1 row."
expected = u"Source %(source0)s returned 0 rows, source %(source1)s returned 1 row."
self.assertEqual(expected, self.util.pseudolocalize(test_data_printffmtspec))
test_data_printffmtspec = u"Source %s returned %d rows."
expected = u"Source %s returned %d rows."
self.assertEqual(expected, self.util.pseudolocalize(test_data_printffmtspec))
def test_angle_brackets(self):
expected = u"《The quick brown fox jumps over the lazy dog》"
self.util.transforms = [pseudol10nutil.transforms.angle_brackets]
self.assertEqual(expected, self.util.pseudolocalize(self.test_data))
def test_curly_brackets(self):
expected = u"❴The quick brown fox jumps over the lazy dog❵"
self.util.transforms = [pseudol10nutil.transforms.curly_brackets]
self.assertEqual(expected, self.util.pseudolocalize(self.test_data))
def test_square_brackets(self):
expected = u"⟦The quick brown fox jumps over the lazy dog⟧"
self.util.transforms = [pseudol10nutil.transforms.square_brackets]
self.assertEqual(expected, self.util.pseudolocalize(self.test_data))
def test_pad_length(self):
expected = u"The quick brown fox jumps over the lazy dog﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎ЍאdžᾏⅧ㈴㋹퓛ﺏ𝟘🚦﹎Ѝ"
self.util.transforms = [pseudol10nutil.transforms.pad_length]
self.assertEqual(expected, self.util.pseudolocalize(self.test_data))
if __name__ == "__main__":
unittest.main()
|
'''
A situation where 'tab_line_stays_inside' needs to be False.
* tab_line_stays_inside が True だと各MyTabsの線同士の末端が繋が
らなくなり、見た目が悪くなってしまう。
'''
from kivy.app import runTouchApp
from kivy.lang import Builder
import kivyx.uix.behavior.tablikelooks
KV_CODE = '''
#:set LINE_WIDTH 2
<MyTab@ToggleButtonBehavior+Image>:
size_hint_min: self.texture.size if self.texture else (1, 1)
source: r'data/logo/kivy-icon-48.png'
group: 'test'
<MyTabs@KXTablikeLooksBehavior+BoxLayout>:
tab_line_color: '#AAAAFF'
tab_line_stays_inside: False
tab_line_width: LINE_WIDTH
spacing: 20
padding: 20
size_hint_min: self.minimum_size
GridLayout:
cols: 3
rows: 3
padding: LINE_WIDTH
Widget:
MyTabs:
orientation: 'horizontal'
tab_style_h: 'top'
MyTab:
MyTab:
MyTab:
MyTab:
Widget:
MyTabs:
orientation: 'vertical'
tab_style_v: 'left'
MyTab:
MyTab:
MyTab:
Widget:
size_hint: 1000, 1000
MyTabs:
orientation: 'vertical'
tab_style_v: 'right'
MyTab:
MyTab:
Widget:
MyTabs:
orientation: 'horizontal'
tab_style_h: 'bottom'
MyTab:
MyTab:
MyTab:
MyTab:
MyTab:
Widget:
'''
root = Builder.load_string(KV_CODE)
runTouchApp(root)
|
from statistics import median_low, mean
from math import floor, ceil
pos = list(map(int, input().split(",")))
avg = ceil(mean(pos)) if mean(pos) < median_low(pos) else floor(mean(pos))
print(sum(abs(i - avg) * (abs(i - avg) + 1) // 2 for i in pos))
|
class SearchingSorting:
def swap(self, A, x, y):
temp = A[x]
A[x] = A[y]
A[y] = temp
def linear_search(self, item, my_list):
self.found = False
self.position = 0
while self.position < len(my_list) and not self.found:
if my_list[self.position] == item:
self.found = True
self.position = self.position + 1
print('found??' + str(self.found))
return self.found
def selection_sort(self, alist):
for i in range(len(alist)):
self.least = i
for k in range(i + 1, len(alist)):
if int(alist[k]) < int(alist[self.least]):
self.least = k
SearchingSorting.swap(self, alist, self.least, i)
|
from math import sqrt, log
class Node():
def __init__(self, game, move = None, parent = None):
self.player_to_move = game.player_to_move
self.remaining_moves = game.get_moves()
self.move = move
self.parent = parent
self.cumulative_score = 0.0
self.visits_count = 0
self.children = []
self.uct_score = 0.0
def has_children(self):
return len(self.children) > 0
def has_remaining_moves(self):
return len(self.remaining_moves) > 0
def update_uct_score(self):
self.uct_score = self.cumulative_score / self.visits_count \
+ sqrt(2.0 * log(self.parent.visits_count) / self.visits_count)
def select_child(self):
for child in self.children:
child.update_uct_score()
return max(self.children, key=lambda child: child.uct_score)
def create_child(self, game, move):
child = Node(game, move, self)
self.children.append(child)
self.remaining_moves = [m for m in self.remaining_moves if not m == move]
return child
def update(self, score):
self.cumulative_score += score
self.visits_count += 1
def get_expected_success_rate(self):
return (self.cumulative_score + 1) / (self.visits_count + 2)
|
# frase = str(input('Digite uma frase para ver se é Palindromo: ')).strip().upper().replace(' ','')
# inverso = ''
# for i in range(len(frase)-1,-1,-1):
# inverso += frase[i]
# if inverso == frase:
# print(f'Sua frase {frase} é um palindromo')
# else:
# print(f'Sua frase {frase} não é um palindromo')
#pode fazer tambem
frase = str(input('Digite a frase para saber se é um palindromo: ')).upper().strip()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for i in range(len(frase)-1,-1,-1):
inverso += frase[i]
if inverso == frase:
print(f'Sua frase {frase} é um palindromo')
else:
print(f'Sua frase {frase} não é um palindromo') |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import darknet_tools
import functools
import json
import os
import settings
import subprocess
import sys
import cv2
from collections import defaultdict
from pythonapi import common_tools, eval_tools
from six.moves import cPickle
import glob
import imp
def read(file_paths):
all = defaultdict(list)
removal = (0., 0., 0.)
size_ranges = ((float('-inf'), float('inf')), (32., float('inf')), (64., float('inf')))
img_lists=glob.glob(settings.TEST_IMAGE_DIR+"/*.jpg")
levelmap=dict()
for img_path in img_lists:
img=cv2.imread(img_path)
imshape= img.shape
img_id = os.path.basename(img_path)[:-4]
for level_id, (cropratio, cropoverlap) in enumerate(settings.TEST_CROP_LEVELS):
cropshape = (int(round(settings.TEST_IMAGE_SIZE // cropratio)), int(round(settings.TEST_IMAGE_SIZE // cropratio)))
for o in darknet_tools.get_crop_bboxes(imshape, cropshape, (cropoverlap, cropoverlap)):
levelmap[img_id,level_id, o['name']] = (o['xlo'], o['ylo'], cropshape[1], cropshape[0])
if img_id=='4' and o['name']=='19_17' and level_id==0: print("yes")
def bounded_bbox(bbox):
x, y, w, h = bbox
x1, y1 = x + w, y + h
x0, y0 = max(0, x), max(0, y)
x1, y1 = min(imshape[1], x1), min(imshape[0], y1)
return (x0, y0, x1 - x0, y1 - y0)
def read_one(result_file_path):
with open(result_file_path) as f:
lines = f.read().splitlines()
one = []
for line in lines:
file_path, cate_id, prob, x, y, w, h = line.split()
image_id, level_id, crop_name = os.path.splitext(os.path.basename(file_path))[0].split('_', 2)
level_id = int(level_id)
cx, cy, cw, ch = levelmap[image_id,level_id, crop_name]
cate_id = settings.NUM_CHAR_CATES if proposal_output else int(cate_id) - 1
x, y, w, h, prob = float(x), float(y), float(w) - float(x), float(h) - float(y), float(prob)
longsize = max(w, h)
size_range = size_ranges[level_id]
if longsize < size_range[0] or size_range[1] <= longsize:
continue
rm = removal[level_id]
if (cx != 0 and x < rm) or (cy != 0 and y < rm) or (cx + cw != imshape[1] and x + w + rm >= cw) or (cy + ch != imshape[0] and y + h + rm >= ch):
continue
real_bbox = bounded_bbox((x + cx, y + cy, w, h))
if real_bbox[2] > 0 and real_bbox[3] > 0:
all[image_id].append({'image_id': image_id, 'cate_id': cate_id, 'prob': prob, 'bbox': real_bbox})
for file_path in file_paths:
read_one(file_path)
return all
def draw():
file = open("products/detections.jsonl")
lines = file.read().splitlines()
if not os.path.isdir(os.path.dirname(settings.TEST_RESULTS_DIR):
os.makedirs(os.path.dirname(settings.TEST_RESULTS_DIR))
for line in lines:
print(type(line))
line = eval(line)
img = cv2.imread(settings.TEST_IMAGE_DIR+'/'+line['image_id']+'.jpg')
detect = line['detections']
for i,det in enumerate(detect):
if det['score']>0.4:
tl = (int(det['bbox'][0]), int(det['bbox'][1]))
br = (int(det['bbox'][0] + det['bbox'][2]), int(det['bbox'][1] + det['bbox'][3]))
cv2.rectangle(img, tl, br, (0, 255, 0), 3)
cv2.putText(img, str(i), tl, 1, 1, (0,255,0))
cv2.imwrite(settings.TEST_RESULTS_DIR+'result_'+line['image_id']+'.jpg', img)
def main():
dn_merge = imp.load_source('dn_prepare', '../detection/merge_results.py')
file_paths = []
for split_id in range(settings.TEST_SPLIT_NUM):
result_file_path = darknet_tools.append_before_ext(settings.TEST_RESULTS_OUT, '.{}'.format(split_id))
file_paths.append(result_file_path)
print('loading ssd outputs')
unmerged = read(file_paths)
print('doing nms sort')
nms_sorted = dn_merge.do_nms_sort(unmerged, .5)
print('writing results')
dn_merge.write(nms_sorted, os.path.join(settings.PRODUCTS_ROOT, 'proposals.jsonl' if proposal_output else 'detections.jsonl'))
print('draw boundingbox')
draw()
if __name__ == '__main__':
proposal_output = 'proposal' in sys.argv[1:]
main()
|
import optparse
import textwrap
from herder.scripts import resolve_config, init_environment
def get_optparser(help):
"""Construct an OptionParser for the language scripts."""
parser = optparse.OptionParser(description=textwrap.dedent(help))
parser.add_option('-d', '--domain', dest='domain',
help='Name of the translation domain to manipulate.')
parser.add_option('-l', '--lang', dest='language',
help='Language code to manipulate.')
parser.add_option('-c', '--config', dest='config',
help='Path to application configuration to load.')
parser.set_defaults(domain=None,
language=None,
config='development.ini')
return parser
def add():
"""
Add a new language to a domain; if adding a refined language (ie,
"en-US", the base language will be copied. Otherwise English will be
used as the starting point."""
# parse the command line
opts, args = get_optparser(add.__doc__).parse_args()
# set up the environment
init_environment(resolve_config(opts.config))
from herder import model
if None in (opts.domain, opts.language):
raise Exception("You must specify the domain and language.")
domain = model.Domain.by_name(opts.domain)
new_language = domain.add_language(opts.language)
print "Added new language %s in %s." % (
new_language.name, new_language._message_store)
|
# Test the exploration module
import os
import numpy as np
import tempdir
from activepapers.storage import ActivePaper
from activepapers import library
from activepapers.exploration import ActivePaper as ActivePaperExploration
def make_local_paper(filename):
paper = ActivePaper(filename, "w")
paper.data.create_dataset("frequency", data=0.2)
paper.data.create_dataset("time", data=0.1*np.arange(100))
paper.add_module("my_math",
"""
import numpy as np
def my_func(x):
return np.sin(x)
""")
paper.close()
def check_local_paper(filename):
ap = ActivePaperExploration(filename)
from my_math import my_func
frequency = ap.data['frequency'][...]
time = ap.data['time'][...]
sine = my_func(2.*np.pi*frequency*time)
assert (sine == np.sin(2.*np.pi*frequency*time)).all()
ap.close()
def test_local_paper():
with tempdir.TempDir() as t:
filename = os.path.join(t, "test.ap")
make_local_paper(filename)
check_local_paper(filename)
if "NO_NETWORK_ACCESS" not in os.environ:
def test_published_paper():
with tempdir.TempDir() as t:
library.library = [t]
ap = ActivePaperExploration("doi:10.6084/m9.figshare.808595")
import time_series
ts = np.arange(10)
assert time_series.integral(ts, 1)[-1] == 40.5
ap.close()
|
# Copyright (c) 2015 Kurt Yoder
# See the file LICENSE for copying permission.
import os
import logging
from . import log_file
from . import json_file
class ParserHostException(Exception):
pass
class Host(object):
def __init__(self, path):
self.path = path
self.logger = logging.getLogger(__name__ + '.' + type(self).__name__)
self.parsers = []
for entry in os.listdir(self.path):
full = os.path.join(self.path, entry)
self.logger.debug('examining: %s', full)
if not os.path.isfile(full):
continue
self.logger.debug('it is a file')
parser = self.find_parser(full)
if parser is None:
continue
self.logger.debug('appending parser')
self.parsers.append(parser)
def find_parser(self, full_path):
# log file?
parser = log_file.get_parser(full_path)
if parser is not None:
self.logger.debug('retrieved log parser')
return parser
# json file?
parser = json_file.get_parser(full_path)
if parser is not None:
self.logger.debug('retrieved json parser')
return parser
return None
|
from django.apps import AppConfig
class SignupConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'signup'
|
from application import db
from store.models import Store
class Pet(db.Document):
external_id = db.StringField(db_field="ei")
name = db.StringField(db_field="n")
species = db.StringField(db_field="s")
breed = db.StringField(db_field="b")
age = db.IntField(db_field="a")
store = db.ReferenceField(Store, db_field="st")
price = db.DecimalField(db_field="p", precision=2, rounding='ROUND_HALF_UP')
sold = db.BooleanField(db_field="sl", default=False)
received_date = db.DateTimeField(db_field="rd")
sold_date = db.DateTimeField(db_field="sd")
live = db.BooleanField(db_field="l", default=True)
meta = {
'indexes': [('external_id', 'live'), ('species', 'breed', 'live'), ('store', 'live')]
}
|
import magicbot
import wpilib
from wpilib import VictorSP
from magicbot import tunable
from robotpy_ext.control.toggle import Toggle
from wpilib.buttons import JoystickButton
from common.ctre import WPI_TalonSRX, WPI_VictorSPX
from wpilib.kinematics import DifferentialDriveKinematics
from common.rev import IdleMode, MotorType
from components import Drive, Intake
r"""
/ \ / \
\ / (+) \ /
|
|X
(+) -------|--Y---- (-)
|
|
/ \ (-) / \
\ / \ /
Counter-Clockwise is Positive
/-\ ^
|X| | (+)
\-/ |
-->
"""
class Robot(magicbot.MagicRobot):
drive: Drive
intake: Intake
def createObjects(self):
# Joysticks
self.joystick_left = wpilib.Joystick(0)
self.joystick_right = wpilib.Joystick(1)
self.joystick_alt = wpilib.Joystick(2)
# Buttons
self.btn_intake_in = JoystickButton(self.joystick_alt, 2)
self.btn_intake_out = JoystickButton(self.joystick_alt, 1)
# Set up Speed Controller Groups
self.left_motors = wpilib.SpeedControllerGroup(
VictorSP(0),
VictorSP(1),
)
self.right_motors = wpilib.SpeedControllerGroup(
VictorSP(2),
VictorSP(3),
)
# Drivetrain
self.train = wpilib.drive.DifferentialDrive(self.left_motors, self.right_motors)
# Intake
self.intake_motor = VictorSP(4)
def teleopPeriodic(self):
self.drive.move(-self.joystick_left.getY(),
self.joystick_right.getX())
# Intake
if self.btn_intake_out.get():
if self.joystick_right.getX() >= -0.1 and self.joystick_right.getX() <= 0.1:
self.intake.spin(1)
elif self.btn_intake_in.get():
self.intake.spin(-1)
if __name__ == '__main__':
wpilib.run(Robot)
|
# Copyright 2020 Google LLC
# Copyright 2020 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from airflow.models import BaseOperator, Variable
from osdu_api.libs.context import Context
from osdu_api.libs.refresh_token import AirflowTokenRefresher
from osdu_api.libs.search_record_ids import SearchId
logger = logging.getLogger()
class SearchRecordIdOperator(BaseOperator):
"""Operator to search files in SearchService by record ids.
Expects "record_ids" field in xcom.
"""
ui_color = '#10ECAA'
ui_fgcolor = '#000000'
FINISHED_STATUS = "finished"
RUNNING_STATUS = "running"
FAILED_STATUS = "failed"
def execute(self, context: dict):
"""Execute update workflow status.
If status assumed to be FINISHED then we check whether proceed files
are searchable or not.
If they are then update status FINISHED else FAILED
:param context: Airflow dagrun context
:type context: dict
"""
payload_context = Context.populate(context["dag_run"].conf)
record_ids = context["ti"].xcom_pull(key="record_ids", )
ids_searcher = SearchId(Variable.get("core__service__search__url"), record_ids, AirflowTokenRefresher(),
payload_context)
ids_searcher.check_records_searchable()
|
from pymtl import *
from lizard.util.rtl.interface import UseInterface
from lizard.msg.codes import ExceptionCode
from lizard.core.rtl.messages import FetchMsg, DecodeMsg, PipelineMsgStatus
from lizard.core.rtl.frontend.imm_decoder import ImmDecoderInterface, ImmDecoder
from lizard.core.rtl.frontend.sub_decoder import compose_decoders
from lizard.core.rtl.frontend.alu_decoder import AluDecoder
from lizard.core.rtl.frontend.csr_decoder import CsrDecoder
from lizard.core.rtl.frontend.branch_decoder import BranchDecoder
from lizard.core.rtl.frontend.jump_decoder import JumpDecoder
from lizard.core.rtl.frontend.mem_decoder import MemDecoder
from lizard.core.rtl.frontend.m_decoder import MDecoder
from lizard.core.rtl.frontend.system_decoder import SystemDecoder
from lizard.config.general import *
from lizard.util.rtl.pipeline_stage import gen_stage, StageInterface, DropControllerInterface
from lizard.util.arch import rv64g
ComposedDecoder = compose_decoders(AluDecoder, CsrDecoder, BranchDecoder,
JumpDecoder, MemDecoder, MDecoder,
SystemDecoder)
def DecodeInterface():
return StageInterface(FetchMsg(), DecodeMsg())
class DecodeStage(Model):
def __init__(s, decode_interface):
UseInterface(s, decode_interface)
s.imm_decoder = ImmDecoder(ImmDecoderInterface(DECODED_IMM_LEN))
s.decoder = ComposedDecoder()
s.connect(s.process_accepted, 1)
s.connect(s.decoder.decode_inst, s.process_in_.inst)
s.connect(s.imm_decoder.decode_inst, s.process_in_.inst)
s.connect(s.imm_decoder.decode_type_, s.decoder.decode_imm_type)
@s.combinational
def handle_decode():
s.process_out.v = 0
s.process_out.hdr.v = s.process_in_.hdr
if s.process_in_.hdr_status == PipelineMsgStatus.PIPELINE_MSG_STATUS_VALID:
if s.decoder.decode_success:
s.process_out.pc_succ.v = s.process_in_.pc_succ
s.process_out.serialize.v = s.decoder.decode_serialize
s.process_out.speculative.v = s.decoder.decode_speculative
s.process_out.store.v = s.decoder.decode_store
s.process_out.rs1_val.v = s.decoder.decode_rs1_val
s.process_out.rs1.v = s.process_in_.inst_rs1
s.process_out.rs2_val.v = s.decoder.decode_rs2_val
s.process_out.rs2.v = s.process_in_.inst_rs2
s.process_out.rd_val.v = s.decoder.decode_rd_val
s.process_out.rd.v = s.process_in_.inst_rd
s.process_out.imm_val.v = s.decoder.decode_imm_val
s.process_out.imm.v = s.imm_decoder.decode_imm
s.process_out.op_class.v = s.decoder.decode_op_class
s.process_out.pipe_msg.v = s.decoder.decode_result
else:
s.process_out.hdr_status.v = PipelineMsgStatus.PIPELINE_MSG_STATUS_EXCEPTION_RAISED
s.process_out.exception_info_mcause.v = ExceptionCode.ILLEGAL_INSTRUCTION
s.process_out.exception_info_mtval.v = zext(s.process_in_.inst, XLEN)
else:
s.process_out.exception_info.v = s.process_in_.exception_info
def line_trace(s):
return '{:<25}'.format(rv64g.isa.disassemble_inst(s.process_in_.inst))
RedirectDropControllerInterface = DropControllerInterface
class RedirectDropController(Model):
def __init__(s, interface):
UseInterface(s, interface)
s.connect(s.check_out, s.check_in_)
@s.combinational
def handle_check_keep():
s.check_keep.v = not s.check_msg
def DecodeRedirectDropController():
return RedirectDropController(
RedirectDropControllerInterface(DecodeMsg(), DecodeMsg(), 1))
Decode = gen_stage(DecodeStage, DecodeRedirectDropController)
|
# This file is auto-generated by /codegen/x86_64_test_encoding.py
# Reference opcodes are generated by:
# GNU assembler (GNU Binutils) 2.25
from peachpy.x86_64 import *
import unittest
class TestKADDB(unittest.TestCase):
def runTest(self):
pass
class TestKADDW(unittest.TestCase):
def runTest(self):
pass
class TestKADDD(unittest.TestCase):
def runTest(self):
pass
class TestKADDQ(unittest.TestCase):
def runTest(self):
pass
class TestKANDB(unittest.TestCase):
def runTest(self):
pass
class TestKANDW(unittest.TestCase):
def runTest(self):
pass
class TestKANDD(unittest.TestCase):
def runTest(self):
pass
class TestKANDQ(unittest.TestCase):
def runTest(self):
pass
class TestKANDNB(unittest.TestCase):
def runTest(self):
pass
class TestKANDNW(unittest.TestCase):
def runTest(self):
pass
class TestKANDND(unittest.TestCase):
def runTest(self):
pass
class TestKANDNQ(unittest.TestCase):
def runTest(self):
pass
class TestKORB(unittest.TestCase):
def runTest(self):
pass
class TestKORW(unittest.TestCase):
def runTest(self):
pass
class TestKORD(unittest.TestCase):
def runTest(self):
pass
class TestKORQ(unittest.TestCase):
def runTest(self):
pass
class TestKXNORB(unittest.TestCase):
def runTest(self):
pass
class TestKXNORW(unittest.TestCase):
def runTest(self):
pass
class TestKXNORD(unittest.TestCase):
def runTest(self):
pass
class TestKXNORQ(unittest.TestCase):
def runTest(self):
pass
class TestKXORB(unittest.TestCase):
def runTest(self):
pass
class TestKXORW(unittest.TestCase):
def runTest(self):
pass
class TestKXORD(unittest.TestCase):
def runTest(self):
pass
class TestKXORQ(unittest.TestCase):
def runTest(self):
pass
class TestKMOVB(unittest.TestCase):
def runTest(self):
pass
class TestKMOVW(unittest.TestCase):
def runTest(self):
pass
class TestKMOVD(unittest.TestCase):
def runTest(self):
pass
class TestKMOVQ(unittest.TestCase):
def runTest(self):
pass
class TestKNOTB(unittest.TestCase):
def runTest(self):
pass
class TestKNOTW(unittest.TestCase):
def runTest(self):
pass
class TestKNOTD(unittest.TestCase):
def runTest(self):
pass
class TestKNOTQ(unittest.TestCase):
def runTest(self):
pass
class TestKUNPCKBW(unittest.TestCase):
def runTest(self):
pass
class TestKUNPCKWD(unittest.TestCase):
def runTest(self):
pass
class TestKUNPCKDQ(unittest.TestCase):
def runTest(self):
pass
class TestKTESTB(unittest.TestCase):
def runTest(self):
pass
class TestKTESTW(unittest.TestCase):
def runTest(self):
pass
class TestKTESTD(unittest.TestCase):
def runTest(self):
pass
class TestKTESTQ(unittest.TestCase):
def runTest(self):
pass
class TestKORTESTB(unittest.TestCase):
def runTest(self):
pass
class TestKORTESTW(unittest.TestCase):
def runTest(self):
pass
class TestKORTESTD(unittest.TestCase):
def runTest(self):
pass
class TestKORTESTQ(unittest.TestCase):
def runTest(self):
pass
class TestKSHIFTLB(unittest.TestCase):
def runTest(self):
pass
class TestKSHIFTLW(unittest.TestCase):
def runTest(self):
pass
class TestKSHIFTLD(unittest.TestCase):
def runTest(self):
pass
class TestKSHIFTLQ(unittest.TestCase):
def runTest(self):
pass
class TestKSHIFTRB(unittest.TestCase):
def runTest(self):
pass
class TestKSHIFTRW(unittest.TestCase):
def runTest(self):
pass
class TestKSHIFTRD(unittest.TestCase):
def runTest(self):
pass
class TestKSHIFTRQ(unittest.TestCase):
def runTest(self):
pass
|
"""
Description: This test is to run onos Teston VTN scripts
List of test cases:
CASE1 - Northbound NBI test network/subnet/ports
CASE2 - Ovsdb test&Default configuration&Vm go online
[email protected]
"""
from adapters.client import client
if __name__=="__main__":
main = client()
main.getdefaultpara()
#scripts to run
runhandle = main.onosstart()
main.RunScript(runhandle, "FUNCvirNetNB")
main.RunScript(runhandle, "FUNCovsdbtest")
main.onosclean( runhandle )
|
Subsets and Splits