max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
pink/cogs/images/ocr.py | Fogapod/pink | 0 | 12793351 | from __future__ import annotations
import os
import math
import itertools
from io import BytesIO
from typing import Any, Dict, List, Tuple, Union, Iterator, Optional, Sequence
import PIL
from PIL import ImageDraw, ImageFont, ImageFilter
from pink_accents import Accent
from pink.context import Context
from pink.cogs.utils.errorhandler import PINKError
from .types import StaticImage
_VertexType = Dict[str, int]
_VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType]
OCR_API_URL = "https://content-vision.googleapis.com/v1/images:annotate"
# avoid making this a hard dependency by not reading it in constants.py
# since it is not used anywhere else now
PINK_PROXY = os.environ["PINK_PROXY"]
PINK_PROXY_TOKEN = f"Bearer {os.environ['PINK_PROXY_TOKEN']}"
FONT = ImageFont.truetype("DejaVuSans.ttf")
class GoogleOCRError(PINKError):
KNOWN_HINTS = {
None: "The world is on fire, something really bad happened. I have no idea.",
14: "This means Google cannot access image URL. Try using a different one.",
}
def __init__(self, code: Optional[int], message: str):
self.code = code
self.message = message
super().__init__(str(self))
@classmethod
def from_response(cls, response: Dict[str, Any]) -> GoogleOCRError:
error = response.get("error", {})
code = error.get("code")
message = error.get("message", "unknown")
return cls(code, message)
def __str__(self) -> str:
base = f"**{type(self).__name__}**[{self.code}]: {self.message}"
if (hint := self.KNOWN_HINTS.get(self.code)) is not None:
base += f"\n\nHint: {hint}"
return base
class TROCRException(Exception):
pass
class AngleUndetectable(TROCRException):
pass
class TextField:
def __init__(self, full_text: str, src: PIL.Image, padding: int = 3):
self.text = full_text
self.left: Optional[int] = None
self.upper: Optional[int] = None
self.right: Optional[int] = None
self.lower: Optional[int] = None
self.angle = 0
self._src_width, self._src_height = src.size
self._padding = padding
def add_word(self, vertices: _VerticesType, src_size: Tuple[int, int]) -> None:
if not self.initialized:
# Get angle from first word
self.angle = self._get_angle(vertices)
left, upper, right, lower = self._vertices_to_coords(
vertices, src_size, self.angle
)
self.left = left if self.left is None else min((self.left, left))
self.upper = upper if self.upper is None else min((self.upper, upper))
self.right = right if self.right is None else max((self.right, right))
self.lower = lower if self.lower is None else max((self.lower, lower))
@staticmethod
def _vertices_to_coords(
vertices: _VerticesType, src_size: Tuple[int, int], angle: int
) -> Tuple[int, int, int, int]:
"""Returns Pillow style coordinates (left, upper, right, lower)."""
# A - 0
# B - 1
# C - 2
# D - 3
#
# A----B
# | | angle = 360/0
# D----C
#
# A
# / \
# D B angle = 315
# \ /
# C
#
# D----A
# | | angle = 270
# C----B
#
# D
# / \
# C A angle = 225
# \ /
# B
#
# C---D
# | | angle = 180
# B---A
#
# C
# / \
# B D angle = 135
# \ /
# A
#
# B---C
# | | angle = 90
# A---D
#
# B
# / \
# A C angle = 45
# \ /
# D
if 0 <= angle <= 90:
left = vertices[0].get("x")
upper = vertices[1].get("y")
right = vertices[2].get("x")
lower = vertices[3].get("y")
elif 90 < angle <= 180:
left = vertices[1].get("x")
upper = vertices[2].get("y")
right = vertices[3].get("x")
lower = vertices[0].get("y")
elif 180 < angle <= 270:
left = vertices[2].get("x")
upper = vertices[3].get("y")
right = vertices[0].get("x")
lower = vertices[1].get("y")
elif 270 < angle <= 360:
left = vertices[3].get("x")
upper = vertices[0].get("y")
right = vertices[1].get("x")
lower = vertices[2].get("y")
if left is None:
left = 0
if upper is None:
upper = 0
if right is None:
right = src_size[0]
if lower is None:
lower = src_size[1]
return (left, upper, right, lower)
@staticmethod
def _get_angle(vertices: _VerticesType) -> int:
def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]:
return vertex.get("x"), vertex.get("y")
cycle = itertools.cycle(vertices)
x, y = get_coords(next(cycle))
for i in range(4):
next_x, next_y = get_coords(next(cycle))
# Any vertex coordinate can be missing
if None in (x, y, next_x, next_y):
x, y = next_x, next_y
continue
# algo: https://stackoverflow.com/a/27481611
# mypy literally does not see previous statement
delta_y = y - next_y # type: ignore
delta_x = next_x - x # type: ignore
degrees = math.degrees(math.atan2(delta_y, delta_x))
if degrees < 0:
degrees += 360
# compensate missing vertices
degrees += 90 * i
break
else:
raise AngleUndetectable
# # truncate last digit, OCR often returns 1-2 degree tilted text, ignore this
# TEMPORARY: truncate angle to 90 degrees
return 90 * round(degrees / 90)
@property
def coords(self) -> Tuple[int, int, int, int]:
return (self.left, self.upper, self.right, self.lower) # type: ignore
@property
def coords_padded(self) -> Tuple[int, int, int, int]:
return (
max((0, self.left - self._padding)), # type: ignore
max((0, self.upper - self._padding)), # type: ignore
min((self._src_width, self.right + self._padding)), # type: ignore
min((self._src_height, self.lower + self._padding)), # type: ignore
)
# TODO: implement w/h detection ASAP, this is temporary
# solutions:
# 1) https://stackoverflow.com/a/9972699
# text surrounding box dimensions are known, but i had no success implementing this
# 2) try to keep track of full coords and just calculate distance
# a lot of coordinates might be missing, 1st solution is more reliable if it worked
@property
def width(self) -> int:
if self.angle in (0, 180, 360):
return self.right - self.left # type: ignore
if self.angle in (90, 270):
return self.lower - self.upper # type: ignore
assert False # noqa
@property
def height(self) -> int:
if self.angle in (0, 180, 360):
return self.lower - self.upper # type: ignore
if self.angle in (90, 270):
return self.right - self.left # type: ignore
assert False # noqa
@property
def font_size(self) -> int:
return max((1, int(1.3333333 * self.height) - 2))
@property
def stroke_width(self) -> int:
return max((1, round(self.font_size / 12)))
@property
def initialized(self) -> bool:
return None not in self.coords
def __repr__(self) -> str:
return f"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>"
def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]:
"""Extracts language for each paragraph in Google OCR output"""
def extract_language(data: Any) -> Optional[str]:
if (properties := data.get("property")) is None:
return None
if (languages := properties.get("detectedLanguages")) is None:
return None
return sorted(languages, key=lambda l: l.get("confidence", 1))[-1][
"languageCode"
]
for block in blocks:
block_language = extract_language(block)
for paragraph in block["paragraphs"]:
paragraph_language = extract_language(paragraph)
yield paragraph_language or block_language
# line grouping differs between simple annotations and paragraph grouping in
# full annotations. "EOL_SURE_SPACE" indicates line break matching simple
# annotations
for word in paragraph["words"]:
last_symbol = word["symbols"][-1]
if (symbol_properties := last_symbol.get("property")) is None:
continue
if (detected_break := symbol_properties.get("detectedBreak")) is None:
continue
if detected_break["type"] != "EOL_SURE_SPACE":
continue
yield paragraph_language or block_language
async def ocr(ctx: Context, image_url: str) -> Dict[str, Any]:
async with ctx.session.post(
f"{PINK_PROXY}",
headers=dict(authorization=PINK_PROXY_TOKEN),
json=dict(url=image_url, ttl=3600),
) as r:
if r.status != 200:
await ctx.reply(
f"Unable to reach proxy: {r.status}\n"
f"Will try raw URL but it will most likely fail"
)
else:
json = await r.json()
image_url = f"{PINK_PROXY}/{json['id']}"
async with ctx.session.post(
OCR_API_URL,
params={
"key": os.environ["OCR_API_TOKEN"],
},
json={
"requests": [
{
"features": [{"type": "TEXT_DETECTION"}],
"image": {
"source": {
"imageUri": image_url,
}
},
}
]
},
headers={
"x-origin": "https://explorer.apis.google.com",
"x-referer": "https://explorer.apis.google.com",
},
) as r:
if r.status != 200:
if r.content_type.lower() != "application/json":
reason = await r.text()
if reason.count("\n") > 1:
# we got some garbage HTML response
reason = "unknown error"
raise PINKError(
f"Something really bad happened with underlying API[{r.status}]: {reason}"
)
json = await r.json()
raise PINKError(
f"Error in underlying API[{r.status}]: "
f'{json.get("message", "unknown error")}'
)
json = await r.json()
if len((responses := json["responses"])) == 0:
return {}
maybe_annotations = responses[0]
if "textAnnotations" not in maybe_annotations:
if "error" in maybe_annotations:
raise GoogleOCRError.from_response(maybe_annotations)
else:
raise PINKError("no text detected", formatted=False)
return maybe_annotations
def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO:
FIELD_CAP = 150
fields = fields[:FIELD_CAP]
src = src.convert("RGBA")
for field in fields:
cropped = src.crop(field.coords_padded)
# NOTE: next line causes segfaults if coords are wrong, debug from here
blurred = cropped.filter(ImageFilter.GaussianBlur(10))
# Does not work anymore for some reason, black stroke is good anyway
# field.inverted_avg_color = ImageOps.invert(
# blurred.resize((1, 1)).convert("L")
# ).getpixel((0, 0)) # ugly!!!
src.paste(blurred, field.coords_padded)
for field in fields:
# TODO: figure out how to fit text into boxes with Pillow without creating
# extra images
font = FONT.font_variant(size=field.font_size)
text_im = PIL.Image.new(
"RGBA",
size=font.getsize(field.text, stroke_width=field.stroke_width),
)
ImageDraw.Draw(text_im).text(
(0, 0),
text=field.text,
font=font,
spacing=0,
stroke_width=field.stroke_width,
stroke_fill=(0, 0, 0),
)
src.alpha_composite(
text_im.resize(
(
min((text_im.width, field.width)),
min((text_im.height, field.height)),
),
).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC),
field.coords_padded[:2],
)
result = BytesIO()
src.save(result, format="PNG")
return BytesIO(result.getvalue())
def _apply_accents(ctx: Context, lines: List[str], accent: Accent) -> List[str]:
if (accent_cog := ctx.bot.get_cog("Accents")) is None:
raise RuntimeError("No accents cog loaded")
return [
# trocr fully depends on newlines, apply accents to each line separately and
# replace any newlines with spaces to make sure text order is preserved
accent_cog.apply_accents_to_text(line, [accent]).replace("\n", " ")
for line in lines
]
async def _apply_translation(
ctx: Context,
lines: List[str],
language: str,
block_annotations: Any,
) -> List[str]:
if (translator_cog := ctx.bot.get_cog("Translator")) is None:
raise RuntimeError("No translator cog loaded")
# TODO: group by input languages to improve translation?
need_trasnslation = {}
paragraph_languages = _language_iterator(block_annotations)
for i, line in enumerate(lines):
if next(paragraph_languages) is not None:
need_trasnslation[i] = line
if not need_trasnslation:
raise PINKError(
"nothing to translate on image "
"(either entire text is in target language or language is undetected)",
formatted=False,
)
translated = await translator_cog.translate(
"\n".join(need_trasnslation.values()), language
)
translated_lines = translated.split("\n")
if len(translated_lines) != len(need_trasnslation):
raise RuntimeError(
f"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}"
)
new_lines = lines.copy()
for idx, translated_line in zip(need_trasnslation.keys(), translated_lines):
new_lines[idx] = translated_line
return new_lines
async def ocr_translate(
ctx: Context, image: StaticImage, language: Union[str, Accent]
) -> Tuple[BytesIO, str]:
src = await image.to_pil_image(ctx)
annotations = await ocr(ctx, image.url)
word_annotations = annotations["textAnnotations"][1:]
block_annotations = annotations["fullTextAnnotation"]["pages"][0]["blocks"]
# Google OCR API returns entry for each word separately, but they can be joined
# by checking full image description. In description words are combined into
# lines, lines are separated by newlines, there is a trailing newline.
# Coordinates from words in the same line can be merged
lines = annotations["fullTextAnnotation"]["text"][:-1].split("\n")
if isinstance(language, Accent):
new_lines = _apply_accents(ctx, lines, language)
else:
new_lines = await _apply_translation(ctx, lines, language, block_annotations)
# error reporting
notes = ""
current_word = 0
fields = []
for original_line, line in zip(lines, new_lines):
field = TextField(line, src)
remaining_line = original_line
# TODO: sane iterator instead of this
for word in word_annotations[current_word:]:
text = word["description"]
if remaining_line.startswith(text):
current_word += 1
remaining_line = remaining_line[len(text) :].lstrip()
# TODO: merge multiple lines into box
try:
field.add_word(word["boundingPoly"]["vertices"], src.size)
except AngleUndetectable:
notes += f"angle for `{word}` is undetectable\n"
else:
break
if field.initialized:
if line.casefold() != original_line.casefold():
fields.append(field)
if not fields:
raise PINKError("could not translate anything on image", formatted=False)
result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields)
stats = f"Words: {current_word}\nLines: {len(fields)}"
if notes:
stats += f"\nNotes: {notes}"
return result, stats
| 2.421875 | 2 |
search.py | davehadley/conan-root-recipe | 2 | 12793352 | <filename>search.py
#!/usr/bin/env python
from argparse import ArgumentParser
from subprocess import check_call
parser = ArgumentParser()
parser.add_argument("search", type=str)
args = parser.parse_args()
check_call(
f'grep {args.search} $(find /tmp/tmpbuild/ -name "CMakeLists.txt" -or -name "*.cmake")',
shell=True,
)
| 2.578125 | 3 |
saleor/lib/python3.7/site-packages/django_prices_openexchangerates/apps.py | cxsper/saleor | 34 | 12793353 | from django.apps import AppConfig
class DjangoPricesOpenExchangeRatesConfig(AppConfig):
name = 'django_prices_openexchangerates'
verbose_name = "Django prices openexchangerates integration"
| 1.234375 | 1 |
scripts/detector.py | asafch/komodo | 2 | 12793354 | #!/usr/bin/python
import roslib
import rospy
import cv2
import numpy as np
import cv_bridge
import time
from sensor_msgs.msg import Image
from std_msgs.msg import String
from common import *
from jupiter.msg import BallPosition
class Detector:
current_camera = None
camera_subscription = None
bridge = None
processed_image_publisher = None
processed_image_bw_publisher = None
offset = 100
wheel_publisher = None
state = ""
ball_at_middle_X_of_Asus_Camera = False
ball_positioned = False
front_camera_x_reference = 0
front_camera_y_reference = 0
move_robot_or_arm = ""
ball_position = None
def __init__(self):
init_arguments(self)
self.state = "NO_SEARCH"
rospy.Subscriber("/jupiter/detector/current_camera", String, self.camera_change)
rospy.Subscriber("/jupiter/detector/state_change", String, self.state_change)
self.robot_movement_publisher = rospy.Publisher("/jupiter/robot_movement/command", String, queue_size = 10)
self.state_machine_publisher = rospy.Publisher("/jupiter/robot_movement/result", String, queue_size = 10)
self.bridge = cv_bridge.CvBridge()
self.processed_image_publisher = rospy.Publisher("/jupiter/processed_image", Image, queue_size = 10)
self.processed_image_bw_publisher = rospy.Publisher("/jupiter/processed_image_bw", Image, queue_size = 10)
self.ball_position_publisher = rospy.Publisher("/jupiter/ball_position", BallPosition, queue_size = 10)
self.ball_position = BallPosition()
self.ball_position.detected = False
def camera_change(self, command):
self.current_camera = command.data
rospy.loginfo("Detector: current camera changed to %s", self.current_camera)
if self.camera_subscription:
self.camera_subscription.unregister()
if self.current_camera == "ASUS_CAMERA":
self.ball_at_middle_X_of_Asus_Camera = False
self.ball_at_bottom_message_sent = False
self.ball_positioned = False
self.offset = 100
self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, "/Asus_Camera/rgb/image_raw"), Image, self.process_image)
elif self.current_camera == "ARM_CAMERA":
self.camera_subscription = rospy.Subscriber("/Creative_Camera/rgb/image_raw" if self.is_simulation else "/komodo_1/arm_cam_node/image_raw", Image, self.process_image)
self.move_robot_or_arm = "MOVE_ROBOT"
def state_change(self, command):
if command.data == "SEARCH":
self.state = "SEARCH"
rospy.loginfo("Detector: starting to search for ball")
elif command.data == "NO_SEARCH":
self.state = "NO_SEARCH"
rospy.loginfo("Detector: stopped searching for ball")
def process_image(self, image):
if self.state == "NO_SEARCH":
return
image_cv = self.bridge.imgmsg_to_cv2(image, "bgr8")
blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0)
# The two cameras have different sensors, so their color rendition varies. Adjust for this issue when trying to filter the red colors in the image.
if self.current_camera == "ASUS_CAMERA":
(lower, upper) = ([0, 0, 100], [55, 55, 255]) # dark red
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
mask = cv2.inRange(blurred_image, lower, upper)
output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask)
else: # ARM_CAMERA
blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0)
(lower, upper) = ([0, 0, 100], [70, 100, 255])
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
mask = cv2.inRange(blurred_image, lower, upper)
output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask)
(lower2, upper2) = ([65, 50, 170], [100, 70, 255])
lower2 = np.array(lower2, dtype = "uint8")
upper2 = np.array(upper2, dtype = "uint8")
mask2 = cv2.inRange(blurred_image2, lower2, upper2)
output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2)
output = output_light_orange
cv2.bitwise_or(output_dark_orange, output_light_orange, output)
image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
(thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
params = cv2.SimpleBlobDetector_Params()
params.filterByInertia = False
params.filterByConvexity = True
params.filterByColor = False
params.filterByCircularity = True
params.filterByArea = True
params.minArea = 30 if self.current_camera == "ASUS_CAMERA" else 15
params.maxArea = 2500 if self.current_camera == "ASUS_CAMERA" else 38400
params.minConvexity = 0.2
params.maxConvexity = 1.0
params.minCircularity = 0.25
params.maxCircularity = 1.0
if self.current_camera == "FRONT_CAMERA":
params.minDistBetweenBlobs = 20.0
# Create a detector with the parameters, according to your OpenCV version (2 or 3)
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3:
detector = cv2.SimpleBlobDetector(params)
else:
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs
keypoints = detector.detect(image_binary)
circles = []
for keypoint in keypoints:
x = keypoint.pt[0]
y = keypoint.pt[1]
r = keypoint.size / 2.0
circles.append([x, y, r])
target = None
if circles:
circles = np.uint16(np.around(circles))
max_r = 0.0
target = circles[0]
for circle in circles:
if circle[2] > max_r and (circle[1] >= (image.height * 0.5) if self.current_camera == "ASUS_CAMERA" else True):
max_r = circle[2]
target = circle
if target != None:
processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
center = (target[0], target[1])
cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0)
processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.circle(processed_image, center, target[2], (255, 0, 0), 1, 8, 0)
# publish the keypoints and target circle superimposed on the source image from the camera and on the b&w image
self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, "bgr8"))
self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, "bgr8"))
if target[2]:
rospy.loginfo("x: %d, y: %d, radius: %d", target[0], target[1], target[2])
if self.current_camera == "ASUS_CAMERA" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera:
self.ball_at_middle_X_of_Asus_Camera = True
self.robot_movement_publisher.publish("STOP-BALL_FOUND")
rospy.loginfo("Detector: ball found")
elif target != None and self.current_camera == "ASUS_CAMERA" and abs(target[1] - (image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent:
self.ball_at_bottom_message_sent = True
self.robot_movement_publisher.publish("STOP-BALL_AT_BOTTOM_OF_FRAME")
rospy.loginfo("Detector: ball is at bottom of Asus Camera frame")
elif target != None and self.current_camera == "ARM_CAMERA" and self.move_robot_or_arm == "MOVE_ROBOT":
if self.is_simulation: # the real arm cam emits an upside-down image, so adjust for orientation
if target[1] < 10:
if target[0] < image.width * 0.45:
self.robot_movement_publisher.publish("FORWARD-LEFT")
elif target[0] > image.width * 0.55:
self.robot_movement_publisher.publish("FORWARD-RIGHT")
else:
self.robot_movement_publisher.publish("FORWARD_ARM")
else:
self.move_robot_or_arm = "MOVE_ARM"
self.robot_movement_publisher.publish("STOP-READY_TO_GRAB")
else:
if target[1] > 10:
if target[0] < image.width * 0.45:
self.robot_movement_publisher.publish("FORWARD-RIGHT")
elif target[0] > image.width * 0.55:
self.robot_movement_publisher.publish("FORWARD-LEFT")
else:
self.robot_movement_publisher.publish("FORWARD_ARM")
else:
self.move_robot_or_arm = "MOVE_ARM"
self.robot_movement_publisher.publish("STOP-READY_TO_GRAB")
elif target != None and self.current_camera == "ARM_CAMERA" and self.move_robot_or_arm == "MOVE_ARM":
rospy.loginfo("Detector: publishing ball position")
self.ball_position.detected = True
self.ball_position.x = target[0]
self.ball_position.y = target[1]
self.ball_position.radius = target[2]
self.ball_position.img_width = image.width
self.ball_position.img_height = image.height
self.ball_position_publisher.publish(self.ball_position)
self.state = "NO_SEARCH"
def asus_ballpark(self, x, image):
return (image.width * 0.65) <= x and x <= (image.width * 0.85)
if __name__ == "__main__":
rospy.init_node("detector")
detector = Detector()
rospy.spin() | 2.5 | 2 |
LeetCode/1365_How_Many_Numbers_Are_Smaller_Than_the_Current_Number.py | Achyut-sudo/PythonAlgorithms | 144 | 12793355 | <filename>LeetCode/1365_How_Many_Numbers_Are_Smaller_Than_the_Current_Number.py<gh_stars>100-1000
class Solution:
def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]:
ans = []
for i in range(0, len(nums)):
soln = 0
for j in range(0, len(nums)):
if(nums[j] < nums[i] and j != i):
soln += 1
ans. append(soln)
return ans | 3.46875 | 3 |
merger.py | Roshan0204/Flipshope_Coupons_Scrap | 0 | 12793356 | <filename>merger.py
"""
Python Script:
Combine/Merge multiple CSV files using the Pandas library
"""
from os import chdir
import glob
import pandas as pdlib
# Produce a single CSV after combining all files
def produceOneCSV(list_of_files,csv_merge):
for file in list_of_files:
csv_in = open(file)
for line in csv_in:
csv_merge.write(line)
csv_in.close()
# List all CSV files in the working dir
chdir("./csv_data")
extension = 'csv'
list_of_files = [i for i in glob.glob('*.{}'.format(extension))]
# print(list_of_files)
file_out = "coupons.csv"
csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note'
csv_merge = open(file_out, 'w')
csv_merge.write(csv_header)
csv_merge.write('\n')
produceOneCSV(list_of_files,csv_merge)
csv_merge.close() | 3.15625 | 3 |
AppImageBuilder/app_dir/runtimes/classic/helpers/test_dynamic_loader.py | gouchi/appimage-builder | 0 | 12793357 | # Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import unittest
from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader
class DynamicLoaderTestCase(unittest.TestCase):
def setUp(self) -> None:
self.app_dir_files = [
'AppDir/lib/',
'AppDir/lib/ld-linux-aarch64.so.1',
'AppDir/lib/aarch64-linux-gnu',
'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so',
'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so',
'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2',
'AppDir/lib/aarch64-linux-gnu/libmemusage.so',
'AppDir/lib/aarch64-linux-gnu/ld-2.27.so',
'AppDir/lib/aarch64-linux-gnu/libpthread.so.0',
'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0',
'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1',
'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1',
'AppDir/lib/aarch64-linux-gnu/libutil.so.1',
'AppDir/lib/aarch64-linux-gnu/libnsl.so.1',
]
def test_get_binary_path(self):
dl = DynamicLoader('AppDir', self.app_dir_files)
self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so')
def test_list_libs(self):
dl = DynamicLoader('AppDir', ['/path/to/file', 'path/to/shared_lib.so', 'path/to/shared_lib.so.1'])
self.assertEqual(dl._list_libs(), ['path/to/shared_lib.so', 'path/to/shared_lib.so.1']) | 1.640625 | 2 |
wallet/errors.py | iesteban/bitcoin_bazaar_backend | 18 | 12793358 | <reponame>iesteban/bitcoin_bazaar_backend<filename>wallet/errors.py
from django.db import IntegrityError
class InsufficientBalance(IntegrityError):
"""Raised when a wallet has insufficient balance to
run an operation.
We're subclassing from :mod:`django.db.IntegrityError`
so that it is automatically rolled-back during django's
transaction lifecycle.
"""
| 2.15625 | 2 |
model/training_utils.py | dsosnoski/irvideo-classification | 0 | 12793359 |
import json
import matplotlib.pyplot as plt
import numpy as np
import pickle
import tensorflow as tf
import traceback
from support.data_model import TAG_CLASS_MAP, CLASSES
def load_raw_tracks(path):
tracks = []
with open(path, 'rb') as f:
try:
while True:
tracks.append(pickle.load(f))
except Exception as e:
traceback.print_exc()
pass
return tracks
def tracks_by_tag(tracks):
tag_tracks = {t: [] for t in CLASSES}
for track in tracks:
if track.tag in TAG_CLASS_MAP:
track.tag = TAG_CLASS_MAP[track.tag]
tag_tracks[track.tag].append(track)
return tag_tracks
def flatten_tag_tracks(tag_tracks):
flat_tracks = []
for tracks in tag_tracks.values():
flat_tracks += tracks
return flat_tracks
def print_tag_track_info(infos):
for k in infos:
tracks = infos[k]
fcount = np.sum([t.frame_count for t in tracks])
print(f'{k}: {len(tracks)} tracks with {fcount} frames')
def split_training_validation(tag_tracks, validate_frame_counts):
train_tracks = {}
validate_tracks = {}
for tag in tag_tracks.keys():
if tag in CLASSES:
tracks = tag_tracks[tag]
np.random.shuffle(tracks)
vcount = 0
train_use = []
validate_use = []
for track_info in tracks:
if vcount < validate_frame_counts[tag]:
validate_use.append(track_info)
vcount += track_info.frame_count
else:
train_use.append(track_info)
train_tracks[tag] = train_use
validate_tracks[tag] = validate_use
return train_tracks, validate_tracks
def first_time_model(model, training_config_text, model_config_text, save_directory):
print(model.summary())
with open(f'{save_directory}/model.txt', 'w') as f:
def summary_print(s):
print(s, file=f)
f.write('\nTraining configuration:\n' + training_config_text + '\n')
f.write('\nModel configuration:\n' + model_config_text + '\n')
print(model.summary(print_fn=summary_print))
tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True)
def frame_count(tracks):
return int(np.sum([t.frame_count for t in tracks]))
def all_frame_counts(tag_tracks):
return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES]))
def print_track_information(training_tracks, validation_tracks):
details = f'\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\n'
print(details)
print(' Train Validate')
for key in CLASSES:
print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}')
def dense_norm_relu(n, x):
x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
return tf.keras.layers.Activation("relu")(x)
def compute_scores(tp, fp, fn):
if tp != 0:
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = 2. * precision * recall / (precision + recall)
return precision, recall, fscore
else:
return 0.0, 0.0, 0.0
def build_callback(config, save_directory):
callback_name = config['name']
config_copy = config.copy()
del config_copy['name']
if callback_name == 'checkpoint_callback':
checkpoint_filename = config_copy['filepath']
config_copy['filepath'] = save_directory + '/' + checkpoint_filename
print(f'saving checkpoints to {config_copy["filepath"]}')
return tf.keras.callbacks.ModelCheckpoint(**config_copy)
elif callback_name == 'lr_callback':
return tf.keras.callbacks.ReduceLROnPlateau(**config_copy)
elif callback_name == 'stopping_callback':
return tf.keras.callbacks.EarlyStopping(**config_copy)
else:
raise Exception(f'Unknown callback type {callback_name}')
def draw_figures(history, plots, save_directory):
plt.figure(figsize=(8, 6 * len(plots)))
plt_position = len(plots) * 100 + 11
for i, plot in enumerate(plots):
plt.subplot(plt_position + i)
plt.title(plot['title'])
legends = []
for value in plot['values']:
plt.plot(history.history[value])
legend = value.replace('_', ' ').title()
legends.append('Training ' + legend)
value = 'val_' + value
plt.plot(history.history[value])
legends.append('Validation ' + legend)
plt.xlim(left=1)
plt.ylim(0.0,1.0)
plt.ylabel(plot['y-label'])
plt.xlabel('Epoch')
plt.legend(legends, loc=plot['caption-loc'], framealpha=.5)
plt.savefig(f'{save_directory}/history.png')
plt.close()
| 2.375 | 2 |
lighting/integration_tests/tests/main.py | ovaar/reactive-testing | 0 | 12793360 | import pytest
from pytest_bdd import scenarios, scenario
@scenario(
feature_name='features/lighting.feature',
scenario_name='The lights are controlled',
example_converters=dict(
light_id=str,
light_begin_state=str,
light_function=str,
light_final_state=str
))
def test_turn_on_the_lights():
pass
| 2.015625 | 2 |
misc/python/etc/cropping_project/crop_images.py | mcqueenjordan/learning_sandbox | 1 | 12793361 | from PIL import Image
import os, pprint
old_directory = 'old'
new_directory = 'new'
new_origin = (36, 32)
for file in os.listdir(old_directory):
filename = "{}/{}".format(old_directory, file)
img = Image.open(filename)
width = img.size[0]
height = img.size[1]
if height != 1040:
print(file)
continue
cropped_img = img.crop(
(
new_origin[0],
new_origin[1],
675 + new_origin[0],
976 + new_origin[1],
)
)
save_location = "{}/{}".format(new_directory, file)
cropped_img.save(save_location)
| 2.859375 | 3 |
clairvoyant/backtest.py | uclatommy/Clairvoyant | 1 | 12793362 | <reponame>uclatommy/Clairvoyant
"""Backtest provides a way of exploring and testing various parameterizations.
This module provides classes that allow clients to experiment with different
machine learning parameterizations and test those on historical stock data.
"""
from numpy import meshgrid, arange, c_
from sklearn.preprocessing import StandardScaler
from numpy import vstack, hstack
from pytz import timezone
from clairvoyant import Clair
import matplotlib
matplotlib.use('Agg')
class Backtest(Clair):
"""Backtest is a type of machine learning classifier.
The purpose of ``Backtest`` is to collect statistics on the performance of
learned classifications while providing a quick and easy way to vary
parameters for rapid experimentation. Backtest also provides some
convenience functions for visualizing collected statistics.
:param variables: A list of columns that represent learning features.
:param trainStart: A datetime as a string that should be consistent with
the ``tz`` parameter. Defines the start date for model
training.
:param trainEnd: A datetime as a string that should be consistent with the
``tz`` parameter. Defines the end date for model training.
:param testStart: A datetime as a string that should be consistent with the
``tz`` parameter. Defines the start date for model
testing.
:param testEnd: A datetime as a string that should be consistent with the
``tz`` parameter. Defines the end date for model testing.
:param buyThreshold: Defines the confidence level at which Clair will
will recommend a buy. Default 0.65.
:param sellThreshold: Defines the confidence level at which Clair will
recommend a sell. Default 0.65.
:param C: A penalty parameter for false positives. See scikit-learn
documentation for more details. Default 1.
:param gamma: The kernel coefficient for machine learning. See scikit-learn
documentation for more details. Default 10.
:param continuedTraining: Determine if data from the testing period should
be used to continue training the model during the
testing phase. Default False.
:param tz: The timezone associated with the datetime parameters. Default
UTC.
:ivar debug: A boolean value that determines if debug strings will be
printed as backtesting is run. Warning: may result in a lot of
output.
"""
def __init__(
self, variables, trainStart, trainEnd, testStart, testEnd,
buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10,
continuedTraining=False, tz=timezone('UTC')
):
super().__init__(
variables, trainStart, trainEnd, testStart, testEnd,
buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C,
gamma=gamma, continuedTraining=continuedTraining, tz=tz
)
# Stats
self.stocks = []
self.dates = []
self.totalBuys = 0
self.correctBuys = 0
self.totalSells = 0
self.correctSells = 0
self.increases = 0
self.decreases = 0
self.periods = 0
self.debug = False
# Visualize
self.XX = None
self.yy = None
self.model = None
def runModel(self, data):
"""Run backtesting.
:param data: A ``History`` of stock data that includes observations in
both the training and test phases.
"""
# Learn and execute
model, X, y = self.learn(data)
self.execute(data, model, X, y)
# Save for vizualization purposes
self.dates.append([
self.trainStart.strftime('%m/%d/%Y'),
self.trainEnd.strftime('%m/%d/%Y'),
self.testStart.strftime('%m/%d/%Y'),
self.testEnd.strftime('%m/%d/%Y')
])
XX = vstack(X)
yy = hstack(y)
self.XX = XX
self.yy = yy
self.model = model
def buyLogic(self, *args, **kwargs):
"""Increment the buy count."""
self.totalBuys += 1
if self.debug:
super().buyLogic(*args, **kwargs)
def sellLogic(self, *args, **kwargs):
"""Increment the sell count."""
self.totalSells += 1
if self.debug:
super().sellLogic(*args, **kwargs)
def nextPeriodLogic(self, prediction, performance, *args, **kwargs):
"""Collect statistics on correct and incorrect buys and sells.
:param prediction: Value of 1 or -1 representing an up or down
performance.
:param performance: A positive or negative value representing the
actual observed performance.
"""
self.periods += 1
if performance > 0:
self.increases += 1
if prediction == 1:
self.correctBuys += 1
elif performance < 0:
self.decreases += 1
if prediction == -1:
self.correctSells += 1
if self.debug:
super().nextPeriodLogic(prediction, performance, *args, **kwargs)
def clearStats(self):
"""Reset all collected statistics."""
self.dates = []
self.totalBuys = 0
self.correctBuys = 0
self.totalSells = 0
self.correctSells = 0
self.increases = 0
self.decreases = 0
self.periods = 0
def buyStats(self):
"""Return the collected buy statistics."""
try:
return round((float(self.correctBuys)/self.totalBuys)*100, 2)
except ZeroDivisionError:
return float(0)
def sellStats(self):
"""Return the collected sell statistics."""
try:
return round((float(self.correctSells)/self.totalSells)*100, 2)
except ZeroDivisionError:
return float(0)
def displayConditions(self):
"""Print the learning and testing parameters."""
bld, end = '\033[1m', '\033[0m'
print(f'{bld}Conditions{end}')
i = 1
for var in self.variables:
print(f"X{i}: {var}")
i += 1
print(f"Buy Threshold: {self.buyThreshold*100}%")
print(f"Sell Threshold: {self.sellThreshold*100}%")
print(f"C: {self.C}")
print(f"gamma: {self.gamma}")
print(f"Continued Training: {self.continuedTraining}")
print(f"Total Testing Periods: {self.periods}")
print(f"Total Price Increases: {self.increases}")
print(f"Total Price Decreases: {self.decreases}")
def displayStats(self):
"""Print the collected backtesting statistics."""
bld, gre, red, end = '\033[1m', '\033[92m', '\033[91m', '\033[0m'
if len(self.dates) == 0:
print("Error: Please run model before displaying stats")
return
print(f'{bld}Stats{end}')
print("Stock(s):")
i = 0
for stock in self.stocks:
print(f'{stock} | ',
f"Training: {self.dates[i][0]}-{self.dates[i][1]}",
f"Testing: {self.dates[i][2]}-{self.dates[i][3]}")
i += 1
print(f"\nTotal Buys: {self.totalBuys}")
prnt = None
if self.buyStats() > 50:
prnt = f"{gre}{self.buyStats()}%{end}"
elif self.buyStats() < 50:
prnt = f"{red}{self.buyStats()}%{end}"
else:
prnt = f"{self.buyStats()}%"
print(f"Buy Accuracy: {prnt}")
print(f"Total Sells: {self.totalSells}")
if self.sellStats() > 50:
prnt = f'{gre}{self.sellStats()}%{end}'
elif self.sellStats() < 50:
prnt = f'{red}{self.sellStats()}%{end}'
else:
prnt = f'{self.sellStats()}%'
print(f"Sell Accuracy: {prnt}")
def visualizeModel(self, width=5, height=5, stepsize=0.02):
"""Output a visualization of the backtesting results.
The diagram overlays training and testing observations on top of
a color coded representation of learned recommendations. The color
intensity represents the distribution of probability.
"""
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
if len(self.variables) != 2:
print("Error: Plotting is restricted to 2 dimensions")
return
if (self.XX is None or self.yy is None or self.model is None):
print("Error: Please run model before visualizing")
return
X, y = self.XX, self.yy
X = StandardScaler().fit_transform(X)
self.model.fit(X, y)
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
xx, yy = meshgrid(
arange(x_min, x_max, stepsize), arange(y_min, y_max, stepsize)
)
plt.figure(figsize=(width, height))
cm = plt.cm.RdBu
RedBlue = ListedColormap(['#FF312E', '#6E8894'])
Axes = plt.subplot(1, 1, 1)
Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
stock = self.stocks[len(self.stocks)-1]
Axes.set_title(stock)
Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75)
Axes.scatter(X[:, 0], X[:, 1], c=y, cmap=RedBlue)
Axes.set_xlim(xx.min(), xx.max())
Axes.set_ylim(yy.min(), yy.max())
plt.savefig(stock+'.svg', format='svg')
| 3.265625 | 3 |
assignment2/src/photogallery/tests/exporter_test.py | rahulraj/web_projects | 1 | 12793363 | import unittest
from ..utils.inject import assign_injectables
from ..utils.immutabledict import ImmutableDict
from ..generator.exporter import Exporter
directory_values = ['title', 'images']
picture_values = ['alt_text', 'src', 'caption_data']
class MockJinja2Template(object):
def __init__(self, required_values):
assign_injectables(self, locals())
def render(self, template_arguments):
for argument in template_arguments:
assert (argument in self.required_values)
class StubJpegPicture(object):
def __init__(self, alt_text, src, caption_data):
assign_injectables(self, locals())
def get_contents(self):
return []
def as_view(self):
return ImmutableDict.of(alt_text=self.alt_text, src=self.src,
caption_data=self.caption_data)
def get_exporter(self):
return Exporter(MockJinja2Template(picture_values))
def get_name(self):
return self.src
def get_output_file_name(self):
return self.src
class StubJpegDirectory(object):
def __init__(self, title, images):
assign_injectables(self, locals())
def get_contents(self):
return self.images
def as_view(self):
return ImmutableDict.of(title=self.title, images=self.images)
def get_exporter(self):
return Exporter(MockJinja2Template(directory_values))
def get_name(self):
return self.title
def get_output_file_name(self):
return self.title
class SimpleExporterTest(unittest.TestCase):
def setUp(self):
self.mock_template = MockJinja2Template(picture_values)
self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption')
self.exporter = Exporter(self.mock_template)
def test_it_should_populate_the_jinja2_template(self):
self.exporter.export(self.picture)
class DirectoryExporterTest(unittest.TestCase):
def setUp(self):
self.pictures_in_dir = [
StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'),
StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')]
self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir)
self.mock_template = MockJinja2Template(directory_values)
self.exporter = Exporter(self.mock_template)
def test_it_should_populate_the_jinja2_template(self):
self.exporter.export(self.stub_directory)
if __name__ == '__main__':
unittest.main()
| 2.484375 | 2 |
ag/sorting/common_subsequence.py | justyre/jus | 0 | 12793364 | # Licensed under MIT License.
# See LICENSE in the project root for license information.
"""Longest common subsequence. The subsequence does not need to be continuous in the original sequence."""
from typing import Sequence, Tuple
from tests import jovian
import functools
##########################################
### Test cases
tests = []
# List
tests.append({
'input': {
'seq1': [1, 3, 5, 6, 7, 2, 5, 2, 3],
'seq2': [6, 2, 4, 7, 1, 5, 6, 2, 3]
},
'output': ([1, 5, 6, 2, 3], 5)
})
# Tuple
tests.append({
'input': {
'seq1': (1, 3, 5, 6, 7, 2, 5, 2, 3),
'seq2': (6, 2, 4, 7, 1, 5, 6, 2, 3)
},
'output': ((1, 5, 6, 2, 3), 5)
})
# String
tests.append({
'input': {
'seq1': 'serendipitous',
'seq2': 'precipitation'
},
'output': ('reipito', 7)
})
# One is a subseq of the other
tests.append({
'input': {
'seq1': 'dense',
'seq2': 'condensed'
},
'output': ('dense', 5)
})
# Multiple subseqs with same length
# In this case, return the first common subseq (the first from the left of seq1).
tests.append({
'input': {
'seq1': 'abcdef',
'seq2': 'badcfe'
},
'output': ('ace', 3)
})
# No common subseq
tests.append({
'input': {
'seq1': 'a',
'seq2': 'bb'
},
'output': ('', 0)
})
# One is empty
tests.append({
'input': {
'seq1': '',
'seq2': 'stone'
},
'output': ('', 0)
})
##########################################
### Methods
def memoize(obj):
"""Cache a function's return value each time it is called. If called later with the same arguments, the cached value is directly returned rather than reevaluated."""
# Initialize cache and obj.cache as an empty dict
cache = obj.cache = {}
# The decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`,
# ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig func,
# while wrapper is the func to be updated.) So obj's attributes will be copied to
# memoizer. memoizer() is returned as the replacement for the orig `obj`
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
# When args are not present in cache's keys, add them
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
# The decorator 'memoize' will go and execute function `memoize(lcs)`, return memoizer.
# Without memoization, the orig func runs too slow (impossible when len(seq) > 7)
@memoize
def lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence, int]:
"""Find the longest common subsequence (both itself and its length) of two sequences recursively.
Note
----
If there are multiple subseqs with same length, return the first common subseq from the left of `seq1`.
"""
# Time complexity: O(2 ^ (len(seq1) + len(seq2)))
if type(seq1) != type(seq2):
raise TypeError("Both input sequences should be of the same type.")
# Consider all subclasses of generic type `Sequence`
if isinstance(seq1, list):
empty = []
elif isinstance(seq1, str):
empty = ''
elif isinstance(seq1, tuple):
empty = ()
else:
raise TypeError("This type of sequence is not supported; try list, str, tuple.")
if not seq1 or not seq2:
# If any one of the seqs is empty, then return the empty seq-type
return empty, 0
if seq1[0] == seq2[0]:
if isinstance(seq1, list):
add_elem = [seq1[0]]
elif isinstance(seq1, str):
add_elem = seq1[0]
elif isinstance(seq1, tuple):
# A one-elem tuple can only be shown as (3,) but not (3)
add_elem = (seq1[0],)
return (
add_elem + lcs_recursive(seq1[1:], seq2[1:])[0],
1 + lcs_recursive(seq1[1:], seq2[1:])[1]
)
else:
# max(s1, s2, key=len) means to get from s1, s2 the one with bigger len()
return (
max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len),
max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1])
)
def lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int:
"""Find the longest common subsequence (both itself and its length) of two sequences by dynamic programming.
Note
----
If there are multiple subseqs with same length, return the first common subseq from the left of `seq1`.
"""
# Time complexity: O(len1 * len2). Space complexity: O(len1 * len2).
# Step 1: find the lcs's length
if type(seq1) != type(seq2):
raise TypeError("Both input sequences should be of the same type.")
# Consider all subclasses of generic type `Sequence`
if isinstance(seq1, list):
empty = []
elif isinstance(seq1, str):
empty = ''
elif isinstance(seq1, tuple):
empty = ()
else:
raise TypeError("This type of sequence is not supported; try list, str, tuple.")
if not seq1 or not seq2:
# If any one of the seqs is empty, then return the empty seq-type
return empty, 0
len1, len2 = len(seq1), len(seq2)
# Use nested lists to make a (len1+1) * (len2+1) 2D array (ie a table).
# table[i][j] is the lcs length of seq1[0:i] and seq2[0:j]
table = [[0] * (len2 + 1) for _ in range(len1 + 1)]
for i in range(1, len1 + 1):
for j in range(1, len2 + 1):
# We start from range(1,) since seq[0:0] is empty, so its lcs w/any seq is 0
if seq1[i - 1] == seq2[j - 1]:
table[i][j] = table[i - 1][j - 1] + 1
else:
table[i][j] = max(table[i - 1][j], table[i][j - 1])
# The next two lines are equivalent; use either
# lcs_length = table[len1][len2]
lcs_length = table[-1][-1]
# Step 2: find the lcs ITSELF
lcs = empty
# Note: The vital idea here is, now that we know the length of lcs to be index,
# ie the elem at the lower right corner of `table`, we should travel from it
# BACKWARDS (ie going up and right `table`) to find the feasible lcs.
i, j = len1, len2
while i > 0 and j > 0:
if seq1[i-1] == seq2[j-1]:
if isinstance(seq1, list):
add_elem = [seq1[i-1]]
elif isinstance(seq1, str):
add_elem = seq1[i-1]
elif isinstance(seq1, tuple):
# A one-elem tuple can only be shown as (3,) but not (3)
add_elem = (seq1[i-1],)
lcs = add_elem + lcs
i -= 1
j -= 1
elif table[i-1][j] < table[i][j-1]:
# If the current elem of seq1 & seq2 are not the same, then find the larger
# of the two predecessors and go in that direction (ie in search of lcs).
# Note: Putting this `elif <` first is important; if we swap this elif with
# the next `else`, the resulting lcs will be the 1st common subseq from the
# left of seq2, instead of the left of seq1.
j -= 1
else:
i -= 1
return lcs, lcs_length
##########################################
### Test client
jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests)
# From the next two tests, we can see that memoized recursion is faster than plain-
# vanilla dynamic programming
jovian.evaluate_test_cases_justyre(func=lcs_recursive, tests=tests)
jovian.evaluate_test_cases_justyre(func=lcs_dynamic, tests=tests) | 2.9375 | 3 |
source/StateManager.py | mccartytim/csit104-term-project | 0 | 12793365 | # This class manages the game's state
import pyglet
from pyglet import clock
from Entity import Asteroid, AsteroidDebris, Player
from Entity import ParticleSpawner, ParticleFactory, Bullet
from HUD import HUD
from pyglet.window import key
from Vect2 import Vect2
import math
# Target window size constant
WIDTH = 800
HEIGHT = 400
targetNo = 5 # number of asteroids to spawn
DEBOUNCE = 1
class StateManager(object):
def __init__(self):
self.quit = False
self._init_window()
self._init_game()
self.mode = "SPLASH"
# Prevent bouncing on switching game modes
self.debounce_timer = DEBOUNCE
# Create a window for the game
def _init_window(self):
# Window object represents the game's window
self.window = pyglet.window.Window(WIDTH, HEIGHT)
# Keys holds a handler that keeps track of keyboard state, part of pyglet
self.keys = pyglet.window.key.KeyStateHandler()
self.window.push_handlers(self.keys)
# Stage the game or return it to its initial state
def _init_game(self):
self.hud = HUD()
self.entities = []
self.spawn_player()
self.exhaust = ParticleSpawner(
self.player.pos.getCopy(),
self.player.angle + math.pi,
math.pi / 4, .01,
ParticleFactory(speed=20, color=(255, 0, 0)),
True)
self.entities.append(self.exhaust)
#Create a new instance of the Player class at the center of the screen
def spawn_player(self):
self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2))
self.entities.append(self.player)
# This function runs when the look is in game mode, and has all the updating/drawing logic
def game_loop(self, dt):
#Clear frame before looping
self.window.clear()
#print(pyglet.gl.get_current_context())
# On a proper engine the controller would probably be its own class.
# That level of abstraction makes it easier to use keyboards, mice, and
# other controllers the user may have
controller = {
'acc': self.keys[key.W],
'left': self.keys[key.A],
'right': self.keys[key.D],
'fire': self.keys[key.SPACE],
'quit': self.keys[key.ESCAPE],
'pause': self.keys[key.P]
}
self.quit = controller['quit']
if controller['pause'] and self.debounce_timer <= 0:
self.mode = "PAUSE"
self.debounce_timer = DEBOUNCE
self.player.input(controller)
#turn on thrust effect if ship is accelerating
self.exhaust.active = controller['acc']
self.exhaust.angle = (self.player.angle + math.pi)
self.exhaust.pos = self.player.pos.getCopy()
self.spawn_bullets()
self.spawn_asteroids()
self.detect_collisions()
for e in self.entities:
e.update(dt)
#for e in self.entities:
# print(e)
batch = pyglet.graphics.Batch()
for e in self.entities:
# batch.add expects a series of arguments
# most easily delivered as a tuple.
# * is the untuple argument.
batch.add(*e.draw())
# Filter out any dead objects
self.entities[:] = [e for e in self.entities if e.isAlive()]
# Draw objects to the frame
batch.draw()
self.hud.drawHUD()
# Determine if a bullet should be spawned, and then spawns a bullet
def spawn_bullets(self):
if self.player.isFiring():
self.entities.append(
Bullet(
self.player.pos.getCopy(),
self.player.angle
)
)
# Maintain a minimum asteroid population
def spawn_asteroids(self):
# Asteroid Spawning
asteroids = [e for e in self.entities if isinstance(e, Asteroid)]
if len(asteroids) < targetNo:
newAsteroid = Asteroid(3, Vect2(0, 0))
self.entities.append(newAsteroid)
# This function determines if any objects are colliding in a meaningful way for the game
def detect_collisions(self):
asteroids = [e for e in self.entities if isinstance(e, Asteroid)]
for asteroid in asteroids:
if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()):
self.player.kill()
# Check if player is actually dead, it may be in invuln
# period
if (self.player.isAlive() != True):
if (self.hud.has_lives()):
self.spawn_player()
self.hud.kill()
else: self.mode = "GAMEOVER"
# Process asteroid/bullet collisions
for bullet in [e for e in self.entities if isinstance(e, Bullet)]:
for asteroid in asteroids:
if bullet.overlaps(
asteroid.hit_radius,
asteroid.pos.getCopy()):
asteroid.kill()
self.entities.append(
AsteroidDebris(
asteroid.pos.getCopy()))
if asteroid.size > 1:
# add two baby asteroids!
self.entities.append(
Asteroid(
asteroid.size - 1,
asteroid.pos.getCopy()))
self.entities.append(
Asteroid(
asteroid.size - 1,
asteroid.pos.getCopy()))
# Remove bullet
bullet.kill()
# Log the points
self.hud.hit()
# Inform the main function if the player requested to quit
def is_quit(self):
return self.quit
# Dispatch loop to the right function
def loop(self, dt):
if self.debounce_timer > 0:
self.debounce_timer -= dt
if self.mode == "GAME":
self.game_loop(dt)
elif self.mode == "PAUSE":
self.pause_loop(dt)
elif self.mode == "SPLASH":
self.splash_loop(dt)
elif self.mode == "GAMEOVER":
self.game_over_loop(dt)
else:
self.quit == True
print("Error: Debug: state.mode == Invalid state!")
# Pause screen
def pause_loop(self, dt):
self.window.clear()
label = pyglet.text.Label("Game Paused: Press p to unpause, or ESC to quit", font_size=24,
x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center')
label.draw()
if self.keys[key.P] and self.debounce_timer <= 0:
self.mode = "GAME"
self.debounce_timer = DEBOUNCE
elif self.keys[key.ESCAPE]: self.quit = True
# Splash screen
def splash_loop(self, dt):
label = pyglet.text.Label("Rocks in Space: Press s to start", font_size=38,
x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center')
label.draw()
if self.keys[key.S]: self.mode = "GAME"
elif self.keys[key.ESCAPE]: self.quit = True
# Game over screen
def game_over_loop(self, dt):
self.window.clear()
label = pyglet.text.Label("Game over! Press S to restart, or ESC to quit", font_size=24,
x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center')
label.draw()
if self.keys[key.S]:
self.mode = "GAME"
self._init_game()
elif self.keys[key.ESCAPE]: self.quit = True
| 3.03125 | 3 |
lab2.2/highlevel.py | etozhekimm/lab2 | 0 | 12793366 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def intsin(x):
eps = 0.000000000000001
term = x
sum = x
n = 1
while (term * term) > (eps * eps):
k = 2 * n + 1
term *= -x * x * (k - 2) / (2 * k * k * n)
sum += term
n += 1
return sum
if __name__ == '__main__':
print(intsin(float(input("Введите х: ")))) | 3.84375 | 4 |
PP4E-Examples-1.4/Examples/PP4E/Gui/Clock/clock.py | AngelLiang/PP4E | 0 | 12793367 | <reponame>AngelLiang/PP4E
"""
###############################################################################
PyClock 2.1: a clock GUI in Python/tkinter.
With both analog and digital display modes, a pop-up date label, clock face
images, general resizing, etc. May be run both standalone, or embedded
(attached) in other GUIs that need a clock.
New in 2.0: s/m keys set seconds/minutes timer for pop-up msg; window icon.
New in 2.1: updated to run under Python 3.X (2.X no longer supported)
###############################################################################
"""
from tkinter import *
from tkinter.simpledialog import askinteger
import math, time, sys
###############################################################################
# Option configuration classes
###############################################################################
class ClockConfig:
# defaults--override in instance or subclass
size = 200 # width=height
bg, fg = 'beige', 'brown' # face, tick colors
hh, mh, sh, cog = 'black', 'navy', 'blue', 'red' # clock hands, center
picture = None # face photo file
class PhotoClockConfig(ClockConfig):
# sample configuration
size = 320
picture = '../gifs/ora-pp.gif'
bg, hh, mh = 'white', 'blue', 'orange'
###############################################################################
# Digital display object
###############################################################################
class DigitalDisplay(Frame):
def __init__(self, parent, cfg):
Frame.__init__(self, parent)
self.hour = Label(self)
self.mins = Label(self)
self.secs = Label(self)
self.ampm = Label(self)
for label in self.hour, self.mins, self.secs, self.ampm:
label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg)
label.pack(side=LEFT) # TBD: could expand, and scale font on resize
def onUpdate(self, hour, mins, secs, ampm, cfg):
mins = str(mins).zfill(2) # or '%02d' % x
self.hour.config(text=str(hour), width=4)
self.mins.config(text=str(mins), width=4)
self.secs.config(text=str(secs), width=4)
self.ampm.config(text=str(ampm), width=4)
def onResize(self, newWidth, newHeight, cfg):
pass # nothing to redraw here
###############################################################################
# Analog display object
###############################################################################
class AnalogDisplay(Canvas):
def __init__(self, parent, cfg):
Canvas.__init__(self, parent,
width=cfg.size, height=cfg.size, bg=cfg.bg)
self.drawClockface(cfg)
self.hourHand = self.minsHand = self.secsHand = self.cog = None
def drawClockface(self, cfg): # on start and resize
if cfg.picture: # draw ovals, picture
try:
self.image = PhotoImage(file=cfg.picture) # bkground
except:
self.image = BitmapImage(file=cfg.picture) # save ref
imgx = (cfg.size - self.image.width()) // 2 # center it
imgy = (cfg.size - self.image.height()) // 2 # 3.x // div
self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image)
originX = originY = radius = cfg.size // 2 # 3.x // div
for i in range(60):
x, y = self.point(i, 60, radius-6, originX, originY)
self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg) # mins
for i in range(12):
x, y = self.point(i, 12, radius-6, originX, originY)
self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg) # hours
self.ampm = self.create_text(3, 3, anchor=NW, fill=cfg.fg)
def point(self, tick, units, radius, originX, originY):
angle = tick * (360.0 / units)
radiansPerDegree = math.pi / 180
pointX = int( round( radius * math.sin(angle * radiansPerDegree) ))
pointY = int( round( radius * math.cos(angle * radiansPerDegree) ))
return (pointX + originX+1), (originY+1 - pointY)
def onUpdate(self, hour, mins, secs, ampm, cfg): # on timer callback
if self.cog: # redraw hands, cog
self.delete(self.cog)
self.delete(self.hourHand)
self.delete(self.minsHand)
self.delete(self.secsHand)
originX = originY = radius = cfg.size // 2 # 3.x div
hour = hour + (mins / 60.0)
hx, hy = self.point(hour, 12, (radius * .80), originX, originY)
mx, my = self.point(mins, 60, (radius * .90), originX, originY)
sx, sy = self.point(secs, 60, (radius * .95), originX, originY)
self.hourHand = self.create_line(originX, originY, hx, hy,
width=(cfg.size * .04),
arrow='last', arrowshape=(25,25,15), fill=cfg.hh)
self.minsHand = self.create_line(originX, originY, mx, my,
width=(cfg.size * .03),
arrow='last', arrowshape=(20,20,10), fill=cfg.mh)
self.secsHand = self.create_line(originX, originY, sx, sy,
width=1,
arrow='last', arrowshape=(5,10,5), fill=cfg.sh)
cogsz = cfg.size * .01
self.cog = self.create_oval(originX-cogsz, originY+cogsz,
originX+cogsz, originY-cogsz, fill=cfg.cog)
self.dchars(self.ampm, 0, END)
self.insert(self.ampm, END, ampm)
def onResize(self, newWidth, newHeight, cfg):
newSize = min(newWidth, newHeight)
#print('analog onResize', cfg.size+4, newSize)
if newSize != cfg.size+4:
cfg.size = newSize-4
self.delete('all')
self.drawClockface(cfg) # onUpdate called next
###############################################################################
# Clock composite object
###############################################################################
ChecksPerSec = 10 # second change timer
class Clock(Frame):
def __init__(self, config=ClockConfig, parent=None):
Frame.__init__(self, parent)
self.cfg = config
self.makeWidgets(parent) # children are packed but
self.labelOn = 0 # clients pack or grid me
self.display = self.digitalDisplay
self.lastSec = self.lastMin = -1
self.countdownSeconds = 0
self.onSwitchMode(None)
self.onTimer()
def makeWidgets(self, parent):
self.digitalDisplay = DigitalDisplay(self, self.cfg)
self.analogDisplay = AnalogDisplay(self, self.cfg)
self.dateLabel = Label(self, bd=3, bg='red', fg='blue')
parent.bind('<ButtonPress-1>', self.onSwitchMode)
parent.bind('<ButtonPress-3>', self.onToggleLabel)
parent.bind('<Configure>', self.onResize)
parent.bind('<KeyPress-s>', self.onCountdownSec)
parent.bind('<KeyPress-m>', self.onCountdownMin)
def onSwitchMode(self, event):
self.display.pack_forget()
if self.display == self.analogDisplay:
self.display = self.digitalDisplay
else:
self.display = self.analogDisplay
self.display.pack(side=TOP, expand=YES, fill=BOTH)
def onToggleLabel(self, event):
self.labelOn += 1
if self.labelOn % 2:
self.dateLabel.pack(side=BOTTOM, fill=X)
else:
self.dateLabel.pack_forget()
self.update()
def onResize(self, event):
if event.widget == self.display:
self.display.onResize(event.width, event.height, self.cfg)
def onTimer(self):
secsSinceEpoch = time.time()
timeTuple = time.localtime(secsSinceEpoch)
hour, min, sec = timeTuple[3:6]
if sec != self.lastSec:
self.lastSec = sec
ampm = ((hour >= 12) and 'PM') or 'AM' # 0...23
hour = (hour % 12) or 12 # 12..11
self.display.onUpdate(hour, min, sec, ampm, self.cfg)
self.dateLabel.config(text=time.ctime(secsSinceEpoch))
self.countdownSeconds -= 1
if self.countdownSeconds == 0:
self.onCountdownExpire() # countdown timer
self.after(1000 // ChecksPerSec, self.onTimer) # run N times per second
# 3.x // trunc int div
def onCountdownSec(self, event):
secs = askinteger('Countdown', 'Seconds?')
if secs: self.countdownSeconds = secs
def onCountdownMin(self, event):
secs = askinteger('Countdown', 'Minutes')
if secs: self.countdownSeconds = secs * 60
def onCountdownExpire(self):
# caveat: only one active, no progress indicator
win = Toplevel()
msg = Button(win, text='Timer Expired!', command=win.destroy)
msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy')
msg.config(padx=10, pady=10)
msg.pack(expand=YES, fill=BOTH)
win.lift() # raise above siblings
if sys.platform[:3] == 'win': # full screen on Windows
win.state('zoomed')
###############################################################################
# Standalone clocks
###############################################################################
appname = 'PyClock 2.1'
# use new custom Tk, Toplevel for icons, etc.
from PP4E.Gui.Tools.windows import PopupWindow, MainWindow
class ClockPopup(PopupWindow):
def __init__(self, config=ClockConfig, name=''):
PopupWindow.__init__(self, appname, name)
clock = Clock(config, self)
clock.pack(expand=YES, fill=BOTH)
class ClockMain(MainWindow):
def __init__(self, config=ClockConfig, name=''):
MainWindow.__init__(self, appname, name)
clock = Clock(config, self)
clock.pack(expand=YES, fill=BOTH)
# b/w compat: manual window borders, passed-in parent
class ClockWindow(Clock):
def __init__(self, config=ClockConfig, parent=None, name=''):
Clock.__init__(self, config, parent)
self.pack(expand=YES, fill=BOTH)
title = appname
if name: title = appname + ' - ' + name
self.master.title(title) # master=parent or default
self.master.protocol('WM_DELETE_WINDOW', self.quit)
###############################################################################
# Program run
###############################################################################
if __name__ == '__main__':
def getOptions(config, argv):
for attr in dir(ClockConfig): # fill default config obj,
try: # from "-attr val" cmd args
ix = argv.index('-' + attr) # will skip __x__ internals
except:
continue
else:
if ix in range(1, len(argv)-1):
if type(getattr(ClockConfig, attr)) == int:
setattr(config, attr, int(argv[ix+1]))
else:
setattr(config, attr, argv[ix+1])
#config = PhotoClockConfig()
config = ClockConfig()
if len(sys.argv) >= 2:
getOptions(config, sys.argv) # clock.py -size n -bg 'blue'...
#myclock = ClockWindow(config, Tk()) # parent is Tk root if standalone
#myclock = ClockPopup(ClockConfig(), 'popup')
myclock = ClockMain(config)
myclock.mainloop()
| 3.21875 | 3 |
exercices/28.py | haxuyennt38/python-learning | 0 | 12793368 | ## Calculer et afficher le factoriel
# taper directement le clavier
n = int(input())
# Calculer le factoriel
if n < 0 :
print('no factorial exists')
elif n == 0 :
print('the factorial is egal to 1')
else :
factorial = 1
for i in range (1, n + 1) :
factorial *= i
print(factorial)
| 4 | 4 |
core/transport/mapping.py | JohnBat26/dp-agent | 0 | 12793369 | from core.transport.gateways.rabbitmq import RabbitMQAgentGateway, RabbitMQServiceGateway, RabbitMQChannelGateway
from core.connectors import ServiceGatewayHTTPConnector
GATEWAYS_MAP = {
'AMQP': {
'agent': RabbitMQAgentGateway,
'service': RabbitMQServiceGateway,
'channel': RabbitMQChannelGateway
}
}
CONNECTORS_MAP = {
'AMQP': ServiceGatewayHTTPConnector
}
| 1.492188 | 1 |
Tests/Methods/Geometry/test_split_line.py | tobsen2code/pyleecan | 95 | 12793370 | from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.SurfLine import SurfLine
import pytest
line_list = list()
line_list.append(Segment(begin=-1j, end=1j))
line_list.append(Segment(begin=1j, end=1j + 1))
line_list.append(Segment(begin=1j + 1, end=-1j + 1))
line_list.append(Segment(begin=-1j + 1, end=-1j))
surf = SurfLine(line_list=line_list, label="test", point_ref=0.5)
split_test = list()
# Cut Square top
line_list = list()
line_list.append(Segment(begin=0, end=1j))
line_list.append(Segment(begin=1j, end=1j + 1))
line_list.append(Segment(begin=1j + 1, end=1))
line_list.append(Segment(begin=1, end=0))
exp_top_surf = SurfLine(line_list=line_list, label="test", point_ref=0.5 + 0.5j)
# Cut Square bottom
line_list = list()
line_list.append(Segment(begin=-1j, end=0))
line_list.append(Segment(begin=0, end=1))
line_list.append(Segment(begin=1, end=-1j + 1))
line_list.append(Segment(begin=-1j + 1, end=-1j))
exp_bot_surf = SurfLine(line_list=line_list, label="test", point_ref=0.5 - 0.5j)
split_test.append(
{
"surf": surf,
"exp_top_surf": exp_top_surf,
"exp_bot_surf": exp_bot_surf,
"Z1": 0,
"Z2": 2,
"is_join": True,
}
)
@pytest.mark.parametrize("test_dict", split_test)
def test_split_line(test_dict):
res_top_surf, res_bot_surf = test_dict["surf"].split_line(
Z1=test_dict["Z1"],
Z2=test_dict["Z2"],
is_join=test_dict["is_join"],
)
assert res_top_surf == test_dict["exp_top_surf"], (
"Differente Top surface:\nResult:\n"
+ str(res_top_surf)
+ "\nExpected:\n"
+ str(test_dict["exp_top_surf"])
)
assert res_bot_surf == test_dict["exp_bot_surf"], (
"Differente Bot surface:\nResult:\n"
+ str(res_bot_surf)
+ "\nExpected:\n"
+ str(test_dict["exp_bot_surf"])
)
if __name__ == "__main__":
for test_dict in split_test:
test_split_line(test_dict)
print("Done")
| 2.421875 | 2 |
suitcase/nxsas/tests/test__build_bluesky_document_path.py | jklynch/suitcase-sas | 1 | 12793371 | from suitcase.nxsas.utils import _parse_bluesky_document_path
def test__build_bluesky_document_path():
parsed_path = _parse_bluesky_document_path("#bluesky/start@abc")
assert parsed_path["doc"] == "start"
assert parsed_path["attribute"] == "abc"
parsed_path = _parse_bluesky_document_path("#bluesky/start/abc")
assert parsed_path["doc"] == "start"
assert parsed_path["keys"] == ("abc",)
parsed_path = _parse_bluesky_document_path("#bluesky/start/abc/def")
assert parsed_path["doc"] == "start"
assert parsed_path["keys"] == ("abc", "def")
parsed_path = _parse_bluesky_document_path("#bluesky/start/abc/def@ghi")
assert parsed_path["doc"] == "start"
assert parsed_path["keys"] == ("abc", "def")
assert parsed_path["attribute"] == "ghi"
parsed_path = _parse_bluesky_document_path("#bluesky/desc/primary/abc/def@ghi")
assert parsed_path["doc"] == "desc"
assert parsed_path["stream"] == "primary"
assert parsed_path["keys"] == ("abc", "def")
assert parsed_path["attribute"] == "ghi"
parsed_path = _parse_bluesky_document_path("#bluesky/stop/abc/def@ghi")
assert parsed_path["doc"] == "stop"
assert parsed_path["keys"] == ("abc", "def")
assert parsed_path["attribute"] == "ghi"
| 2.1875 | 2 |
Topic model/tfidf.py | dsh651470774/dshAlgorithm | 0 | 12793372 | <reponame>dsh651470774/dshAlgorithm<gh_stars>0
# -*- coding: utf-8 -*-
import csv
import jieba
import codecs
import re
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import xlwt
from sklearn import metrics
import matplotlib.pyplot as plt
import math
import jieba.posseg as pseg
def read_tingyongci():
path = "123.txt" #停用词分析
stop_words = []
csv_reader = codecs.open(path,"r", "gbk")
for row in csv_reader:
row = row.strip( '\r\n' ).encode("utf-8")
stop_words.append(row)
return stop_words
stop_words = read_tingyongci()
# 将utf8的列表转换成unicode
def changeListCode(b):
a = []
for i in b:
try:
i1 = i.decode('GB18030').encode('utf-8')
a.append(i.decode('GB18030').encode('utf-8'))
except:
a.append(i)
return a
def readexam():
with open('all.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
column1 = [row for row in reader]
content_exam = [i[3] for i in column1[1:]] #第一列为文本内容,并去除列名
print '测试集有 %s 条句子' % len(content_exam)
return content_exam
# 对列表进行分词并用空格连接
def segmentWord(cont):
c = []
cixing = ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an']
patt1 = re.compile(r'\[.*?\]')
for i in cont:
s=[]
line1 = re.sub(patt1, '', i)
wordList = pseg.cut(line1)
for i in wordList:
if i.word >= u'\u4e00' and i.word <= u'\u9fa5':
if i.word.encode('utf8') in stop_words:
pass
else:
if i.flag in cixing:
s.append(i.word.encode('utf8'))
b=" ".join(s)
c.append(b)
return c
def callog(X):
return math.log(X+1)
def drawPWords(wordWeightList,splitNum=200):
minVal=min(wordWeightList)
maxVal=max(wordWeightList)
stepVal=(maxVal-minVal)/float(splitNum)
freqList=[0]*splitNum
for i in range(len(wordWeightList)):
if wordWeightList[i] >= minVal and wordWeightList[i] <= minVal + stepVal:
freqList[0]+=1
continue
for j in range(1,splitNum):
if wordWeightList[i] > minVal+j*stepVal and wordWeightList[i] <= minVal + (j+1)*stepVal:
freqList[j] += 1
X=range(1,splitNum+1)
#画第一张图
plt.subplot(121)
plt.xlabel("TF-IDF")
plt.ylabel("Frequance")
plt.title("TF-IDF Explore")
plt.bar(X, freqList)
#画第二张图
plt.subplot(122)
plt.xlabel("log(TF-IDF bins)")
plt.ylabel("log(Frequance)")
plt.title("TF-IDF powerlaw Explore")
plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList)))
plt.show()
# corpus = ["我 来到 北京 清华大学", "他 来到 了 网易 杭研 大厦", "小明 硕士 毕业 与 中国 科学院"]
stopwords = read_tingyongci()
data_exam = readexam()
exam = changeListCode(data_exam)
content = segmentWord(exam)
# 计算权重
vectorizer = CountVectorizer()
tfidftransformer = TfidfTransformer()
tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值
print tfidf.shape
word=vectorizer.get_feature_names()#获取词袋模型中的所有词语
weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重
wordWeightList=[0]*len(word)
for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重
for j in range(len(word)):
wordWeightList[j]+=weight[i][j]
#看下划分结果下的词频分布
#第一次划分
orderwords=np.argsort(-np.array(wordWeightList))
threshold=np.mean(np.array(wordWeightList))
wordList2=[]
wordWeightList2=[]
for i in range(len(wordWeightList)):
if wordWeightList[orderwords[i]] < threshold:
break
wordList2.append(word[orderwords[i]])
wordWeightList2.append(wordWeightList[orderwords[i]])
#第二次划分
orderwords2=np.argsort(-np.array(wordWeightList2))
threshold2=np.mean(np.array(wordWeightList2))
# wordList3=[]
# wordWeightList3=[]
# for i in range(len(wordWeightList2)):
# if wordWeightList2[orderwords2[i]] < threshold2:
# break
# wordList3.append(wordList2[orderwords2[i]])
# wordWeightList3.append(wordWeightList2[orderwords2[i]])
# print len(wordList3)
f = codecs.open("keywordsflag3.txt", "w", "utf-8")
for i in range(len(wordWeightList2)):
if wordWeightList2[orderwords2[i]]<threshold2:
break
f.write(wordList2[orderwords2[i]])
f.write('\n')
f.close()
# #第三次划分
# orderwords3=np.argsort(-np.array(wordWeightList3))
# threshold3=np.mean(np.array(wordWeightList3))
# wordList4=[]
# wordWeightList4=[]
# for i in range(len(wordWeightList3)):
# if wordWeightList3[orderwords3[i]] < threshold3:
# break
# wordList4.append(wordList3[orderwords3[i]])
# wordWeightList4.append(wordWeightList3[orderwords3[i]])
# print len(wordList4)
# drawPWords(wordWeightList4,200) | 2.53125 | 3 |
mercury/plugin/service/__init__.py | greenlsi/mercury_mso_framework | 1 | 12793373 | from .request_profile import ServiceRequestProfile
from .session_duration import ServiceSessionDuration
from .session_profile import ServiceSessionProfile
| 1.109375 | 1 |
07_user_input_and_while_loops/7_6_tree_exits.py | simonhoch/python_basics | 0 | 12793374 | <reponame>simonhoch/python_basics
prompt = "\nHi, please enter pizza toppings you want"
prompt += "\n(Write 'quit' to exit) "
toppings = []
while True:
topping = input(prompt)
toppings.append(topping)
if topping == 'quit':
break
print ('You choose those folling toppings: ')
for topping in toppings[:-1]:
print('\t- ' + topping)
toppings = []
flag = True
while flag:
topping = input(prompt)
toppings.append(topping)
if topping == 'quit':
flag = False
print ('You choose those folling toppings: ')
for topping in toppings[:-1]:
print('\t- ' + topping)
n_toppings = input('Enter the number of topping you would like to add: ')
n_toppings = int(n_toppings)
n = 0
toppings = []
prompt = "\n Please enter pizza toppings you want: "
while n < n_toppings:
topping = input(prompt)
toppings.append(topping)
n += 1
print ('You choose those ' + str(n_toppings) + ' following toppings: ')
for topping in toppings:
print('\t- ' + topping)
| 4.09375 | 4 |
scripts/test_similarity.py | MOOC-Learner-Project/edx-extension-code-similarity | 0 | 12793375 | <reponame>MOOC-Learner-Project/edx-extension-code-similarity
from compare_trajectories import get_similarity
def test_samples():
print("1-1")
print(get_similarity('for print if', '1-1', should_validate=False))
print(get_similarity('for print if if if for', '1-1', should_validate=False))
print(get_similarity('for print if if if if if', '1-1', should_validate=False))
print(get_similarity('for', '1-1', should_validate=False))
print(get_similarity('for print', '1-1', should_validate=False))
print(get_similarity('for print if if if', '1-1', should_validate=False))
print(get_similarity('a\na\np\na\nfor i in range(10):\n print(i)\nr=2', '1-1'))
print("1-2")
print(get_similarity('for print if', '1-2', should_validate=False))
print(get_similarity('for print', '1-2', should_validate=False))
print(get_similarity('for break continue if print', '1-2', should_validate=False))
print("1-3")
print(get_similarity('for print if if else', '1-3', should_validate=False))
print(get_similarity('for', '1-3', should_validate=False))
print(get_similarity('for print', '1-3', should_validate=False))
print(get_similarity('for print for if else if', '1-3', should_validate=False))
if __name__ == "__main__":
test_samples()
| 3.359375 | 3 |
multiview_gpu/tests/test_mds.py | dani-garcia/multiview_gpu | 5 | 12793376 | <gh_stars>1-10
import numpy as np
import tensorflow as tf
from numpy.testing import assert_array_almost_equal as array_eq
from sklearn.utils.testing import assert_raises
import pytest
import multiview_gpu.mvmds as mvmds
from multiview_gpu.util import load_data_tf
def test_preprocess_mds(sess):
data = np.arange(25, dtype=float).reshape((5, 5))
data = tf.convert_to_tensor(data, dtype=tf.float32)
preprocessed_data = sess.run(mvmds.preprocess_mvmds(data))
sim = np.array([[40., 20., 0., -20., -40.],
[20., 10., 0., -10., -20.],
[0., 0., 0., 0., 0.],
[-20., -10., 0., 10., 20.],
[-40., -20., 0., 20., 40.]])
array_eq(preprocessed_data, sim, decimal=4)
def test_mvmds_error():
# Data and is_distane do not have the same length.
one = np.arange(25, dtype=float).reshape((5, 5))
two = np.arange(25, 50, dtype=float).reshape((5, 5))
data = [one, two]
is_distance = [False, False, False]
mvmds_est = mvmds.MVMDS(k=2)
assert_raises(ValueError, mvmds_est.fit, data, is_distance)
# Sample data matrices do not have the same number of rows
one = np.arange(25, dtype=float).reshape((5, 5))
two = np.arange(25, 49, dtype=float).reshape((4, 6))
data = [one, two]
is_distance = [False, False]
mvmds_est = mvmds.MVMDS(k=2)
assert_raises(ValueError, mvmds_est.fit, data, is_distance)
# k value cannot be negative
one = np.arange(25, dtype=float).reshape((5, 5))
two = np.arange(25, 50, dtype=float).reshape((5, 5))
data = [one, two]
is_distance = [False, False]
mvmds_est = mvmds.MVMDS(k=-2)
assert_raises(ValueError, mvmds_est.fit, data, is_distance)
# These test results come from the original multiview library
test_names = "data, is_distance, k, real_result"
test_params = [
(
np.arange(50, dtype=float).reshape((2, 5, 5)),
[False] * 2,
2,
np.array([[-0.632455532, -0.1989703693],
[-0.316227766, -0.6963962924],
[-0., -0.3305190213],
[0.316227766, 0.0994851846],
[0.632455532, -0.5969111078]])
),
(
np.array([[[2, 1, 8], [4, 5, 6], [3, 7, 9]],
[[1, 4, 7], [2, 5, 8], [3, 6, 9]]]),
[False] * 2,
3,
np.array([[-0.740466335, 0.344058532, 0.5773502692],
[0.0722697384, -0.8132919227, 0.5773502692],
[0.6681965966, 0.4692333907, 0.5773502692]])
),
(
np.array([[[2, 1, 8], [4, 5, 6], [3, 7, 9]],
[[1, 4, 7], [2, 5, 8], [3, 6, 9]]]),
[False, False],
3,
np.array([[-0.740466335, 0.344058532, 0.5773502692],
[0.0722697384, -0.8132919227, 0.5773502692],
[0.6681965966, 0.4692333907, 0.5773502692]])
),
(
np.arange(108, dtype=float).reshape((3, 6, 6)),
[False] * 3,
2,
np.array([[0.5976143047, 0.6346897855],
[0.3585685828, 0.1020481616],
[0.1195228609, 0.0049779591],
[-0.1195228609, -0.0049779591],
[-0.3585685828, -0.1020481616],
[-0.5976143047, 0.759138763]])
),
(
np.arange(256, dtype=float).reshape((4, 8, 8)),
[False] * 4,
5,
np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507],
[0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226],
[0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582],
[0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084],
[-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857],
[-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591],
[-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644],
[-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]])
)
]
@pytest.mark.parametrize(test_names, test_params)
def test_mvmds_multiple(sess, data, is_distance, k, real_result):
data_tf = tf.convert_to_tensor(data, dtype=tf.float32)
result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2))
from multiview.mvmds import mvmds as mds_cpu
print("Multiview Result")
np.set_printoptions(precision=10, suppress=True)
print(mds_cpu(data, is_distance, k=k))
array_eq(np.abs(result[:, 0]), np.abs(real_result[:, 0]), decimal=4)
| 2.203125 | 2 |
timefred/action/edit.py | giladbarnea/timefred | 0 | 12793377 | <filename>timefred/action/edit.py
import os
import subprocess
import tempfile
import yaml
from timefred.error import NoEditor, InvalidYAML
from timefred.store import store
def edit():
if "EDITOR" not in os.environ:
raise NoEditor("Please set the 'EDITOR' environment variable")
data = store.load()
yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True)
cmd = os.getenv('EDITOR')
fd, temp_path = tempfile.mkstemp(prefix='timefred.')
with open(temp_path, "r+") as f:
f.write(yml.replace('\n- ', '\n\n- '))
f.seek(0)
subprocess.check_call(cmd + ' ' + temp_path, shell=True)
yml = f.read()
f.truncate()
os.close(fd)
os.remove(temp_path)
try:
data = yaml.load(yml)
except:
raise InvalidYAML("Oops, that YAML doesn't appear to be valid!")
store.dump(data)
| 2.453125 | 2 |
LINKEDLIST_std.py | Furion1995/little_projects | 0 | 12793378 | class Node:
def __init__(self, d, n=None, p=None):
self.data = d
self.next_node = n
self.prev_node = p
def __str__(self):
return '(' + str(self.data) + ')'
class LinkedList:
def __init__(self, r=None):
self.root = r
self.size = 0
def add(self, d):
new_node = Node(d, self.root)
self.root = new_node
self.size += 1
def find(self, d):
this_node = self.root
while this_node is not None:
if this_node.data == d:
return d
else:
this_node = this_node.next_node
return None
def remove(self, d):
this_node = self.root
prev_node = None
while this_node is not None:
if this_node.data == d:
if prev_node is not None: # data is in non-root
prev_node.next_node = this_node.next_node
else: # data is in root node
self.root = this_node.next_node
self.size -= 1
return True # data removed
else:
prev_node = this_node
this_node = this_node.next_node
return False # data not found
def print_list(self):
this_node = self.root
while this_node is not None:
print(this_node, end='->')
this_node = this_node.next_node
print('None') | 3.984375 | 4 |
src/transformers/models/longt5/modeling_longt5.py | shangz-ai/transformers | 0 | 12793379 | # coding=utf-8
# Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch LongT5 model."""
import copy
import math
import warnings
from typing import Any, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
logging,
replace_return_docstrings,
)
from .configuration_longt5 import LongT5Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LongT5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
_CHECKPOINT_FOR_DOC = "google/long-t5-local-base"
# TODO: Update before the merge
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/long-t5-local-base",
"google/long-t5-local-large",
"google/long-t5-tglobal-base",
"google/long-t5-tglobal-large",
]
def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor:
"""Pad a tensor so that a sequence length will be a multiple of `block_len`"""
pad_len = -x.shape[dim] % block_len
# Handle cases when an empty input sequence is given
if not all(x.shape):
new_shape = list(x.shape)
new_shape[dim] += pad_len
return torch.zeros(new_shape, dtype=x.dtype)
pad = [(0, 0)] * x.ndim
pad[dim] = (0, pad_len)
pad = sum(pad[::-1], ())
x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
return x
def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor:
"""Split an input tensor into blocks of a given `block_len` along the given `dim`. If the dimension length
is not a multiple of `block_len`, it will be padded first with selected `pad_value`.
"""
# pad tensor to multiple of block_len
if x.shape[dim] % block_len != 0:
x = _pad_to_multiple(x, block_len, dim, pad_value=0)
num_blocks = x.shape[dim] // block_len
output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :]
# If 0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion
if 0 in output_shape:
return torch.empty(output_shape, dtype=x.dtype, device=x.device)
return x.reshape(output_shape)
def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor:
"""Concatenate three consecutive blocks for each input block for local attentiont.
For more information, see: https://arxiv.org/pdf/2112.07916.pdf.
"""
num_blocks = x.shape[block_dim]
pad = [(0, 0)] * x.ndim
pad[block_dim] = (1, 1)
pad = sum(pad[::-1], ())
# [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len]
x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
blocks_list: List[torch.Tensor] = []
for i in range(3):
# We use indexing approach here:
# https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs
indices = [slice(0, None)] * x.ndim
indices[block_dim] = slice(i, i + num_blocks)
indices = tuple(indices)
blocks_list.append(x[indices])
# [batch_size, num_blocks, 3 * block_len, ...]
return torch.cat(blocks_list, dim=sequence_dim)
def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor:
"""Makes 3-blocked relative position ids for local attention."""
position_ids = torch.arange(3 * block_len, dtype=torch.int32)
center_position_ids = position_ids[block_len:-block_len]
# [block_len, 3 * block_len]
relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1)
return relative_position_ids
def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor:
"""Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius."""
relative_position_ids = _make_3block_relative_position_ids(block_len)
locality_mask = torch.abs(relative_position_ids) < block_len
locality_mask = locality_mask[None, None, :, :]
locality_mask = locality_mask.to(local_attention_mask.device)
return torch.logical_and(local_attention_mask, locality_mask)
def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor:
"""Prepare attention mask to be applied for a local attention."""
# [batch_size, num_blocks, block_len]
_blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1)
# [batch_size, num_block, 3 * block_len]
_3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2)
_blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1)
_3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2)
# [batch_size, num_block, block_len, 3 * block_len]
local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask)
local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len)
# [batch_size, 1, num_block, block_len, 3 * block_len]
return local_attention_mask.unsqueeze(1).to(device)
def _make_global_fixed_block_ids(
attention_mask: torch.Tensor, global_block_size: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Obtain the "fixed block" global id corresponding to each input token.
This implementation is a simlified version of the original Flaxformr implementation adopted from:
https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py.
In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for
the whole fixed block, are assigned to the preceding block.
Padding tokens from the original sequence are represented by -1.
"""
batch_size, seq_len = attention_mask.shape[:2]
def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor:
block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1
block_ends = block_ends.to(block_ids.device)
true_block_ends = torch.logical_and(block_ends, block_ids >= 0)
full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1
block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks)
return block_ids
fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size
fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask
mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype)
global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype)
_global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device)
global_block_ids = torch.where(
global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound
)
# set padding tokens to -1
global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1)
# [batch_size, seq_len]
global_block_ids = handle_orphan_tokens(global_block_ids)
num_globals = seq_len // global_block_size
# [batch_size, seq_len // global_block_size]
if num_globals > 0:
_sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1)
else:
_sequence_block_ids_max = torch.zeros(
batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device
)
global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1
global_segment_ids = global_segment_ids.to(attention_mask.device)
global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0)
return global_block_ids.type(torch.int), global_segment_ids.type(torch.int)
def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor:
"""Create the relative position tensor for local -> global attention."""
block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size)
global_seq_len = global_segment_ids.shape[-1]
global_positions = torch.arange(global_seq_len, device=block_ids.device)
side_relative_position = global_positions - block_ids[..., None]
return side_relative_position.type(torch.int64)
def _create_global_aggregates(
hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int
) -> torch.Tensor:
"""Compute individual block aggregates by summing over individual blocks."""
# (batch..., seq_len, global_seq_len))
block_ids = block_ids.where(
block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device)
)
one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1]
return torch.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids.type(hidden_states.dtype))
# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5
class LongT5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
try:
from apex.normalization import FusedRMSNorm
LongT5LayerNorm = FusedRMSNorm # noqa
logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm")
except ImportError:
# using the normal LongT5LayerNorm
pass
except Exception:
logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm")
pass
# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5
class LongT5DenseActDense(nn.Module):
def __init__(self, config: LongT5Config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5
class LongT5DenseGatedActDense(nn.Module):
def __init__(self, config: LongT5Config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5
class LongT5LayerFF(nn.Module):
def __init__(self, config: LongT5Config):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = LongT5DenseGatedActDense(config)
else:
self.DenseReluDense = LongT5DenseActDense(config)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5
class LongT5Attention(nn.Module):
def __init__(self, config: LongT5Config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class LongT5LocalAttention(nn.Module):
def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.local_radius = config.local_radius
self.block_len = self.local_radius + 1
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
# Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
# Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, block_length: int):
"""Compute binned relative position bias"""
memory_position = torch.arange(
3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)
context_position = memory_position[block_length:-block_length]
# (block_length, 3 * block_length)
relative_position = memory_position[None, :] - context_position[:, None]
relative_position_bucket = self._relative_position_bucket(
relative_position, # (block_length, 3 * block_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
# (block_length, 3 * block_length, num_heads)
values = self.relative_attention_bias(relative_position_bucket)
# (1, 1, num_heads, block_length, 3 * block_length)
values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
return values
def forward(
self,
hidden_states,
mask=None,
position_bias=None,
layer_head_mask=None,
output_attentions=False,
):
batch_size, seq_length = hidden_states.shape[:2]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
def unshape(states):
"""reshape"""
return states.contiguous().view(batch_size, -1, self.inner_dim)
# get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head)
query_states = shape(self.q(hidden_states))
key_states = shape(self.k(hidden_states))
value_states = shape(self.v(hidden_states))
# Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
query_states = _split_into_blocks(query_states, self.block_len, dim=1)
key_states = _split_into_blocks(key_states, self.block_len, dim=1)
value_states = _split_into_blocks(value_states, self.block_len, dim=1)
# Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
# Compute scores
scores = torch.einsum(
"...qhd,...khd->...hqk", query_states, key_states
) # (batch_size, num_block, n_heads, block_len, 3 * block_len)
if position_bias is None:
# position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(self.block_len)
if mask is not None:
# Replace masked positions with -1e10 (according to the original implementation)
mask = torch.where(mask > 0, 0.0, -1e10)
# We need to adjust position bias shape to be sum with mask
position_bias = position_bias + mask.transpose(1, 2)
scores += position_bias
# (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
# (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_weights = attn_weights.type(value_states.dtype)
attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
attn_output = attn_output[:, :seq_length, :]
attn_output = self.o(attn_output)
present_key_value_state = None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class LongT5TransientGlobalAttention(nn.Module):
def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.local_radius = config.local_radius
self.block_len = self.local_radius + 1
self.global_block_size = config.global_block_size
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
# Relativen attention bias & Layer norm for global attention
if self.has_relative_attention_bias:
self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
# Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
# Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, block_length: int):
"""Compute binned relative position bias"""
memory_position = torch.arange(
3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)
context_position = memory_position[block_length:-block_length]
# (block_length, 3 * block_length)
relative_position = memory_position[None, :] - context_position[:, None]
relative_position_bucket = self._relative_position_bucket(
relative_position, # (block_length, 3 * block_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
# (block_length, 3 * block_length, num_heads)
values = self.relative_attention_bias(relative_position_bucket)
# (1, 1, num_heads, block_length, 3 * block_length)
values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
return values
def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor:
# (batch_size, 1, seq_len, global_seq_len)
side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...]
attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10)
# (batch_size, seq_len, global_seq_len)
side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size)
side_relative_position_bucket = self._relative_position_bucket(
side_relative_position,
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
# (batch_size, seq_len, global_seq_len, num_heads)
side_bias = self.global_relative_attention_bias(side_relative_position_bucket)
# (batch_size, num_heads, seq_len, global_seq_len)
side_bias = side_bias.permute([0, 3, 1, 2])
# (batch_size, num_heads, seq_len, global_seq_len)
attention_side_bias = attention_side_bias + side_bias
return attention_side_bias
def forward(
self,
hidden_states,
mask=None,
position_bias=None,
layer_head_mask=None,
output_attentions=False,
):
batch_size, seq_length = hidden_states.shape[:2]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
def unshape(states):
"""reshape"""
return states.contiguous().view(batch_size, -1, self.inner_dim)
# Prepare components for transient-global attention
# Obtain block_ids and global_segment_ids
# global_seq_len := seq_len // self.global_block_size
# shapes: (batch_size, seq_len) & (batch_size, global_seq_len)
block_ids, global_segment_ids = _make_global_fixed_block_ids(
mask if mask is not None else torch.ones(hidden_states.shape[:-1]),
self.global_block_size,
)
# Create global inputs
_global_seq_len = global_segment_ids.shape[-1]
global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)
global_inputs = self.global_input_layer_norm(global_inputs)
# get query states -> (batch_size, seq_length, n_heads, dim_per_head)
query_states = shape(self.q(hidden_states))
key_states = shape(self.k(hidden_states))
value_states = shape(self.v(hidden_states))
# Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head)
side_key_states = shape(self.k(global_inputs))
side_value_states = shape(self.v(global_inputs))
# Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
query_states = _split_into_blocks(query_states, self.block_len, dim=1)
key_states = _split_into_blocks(key_states, self.block_len, dim=1)
value_states = _split_into_blocks(value_states, self.block_len, dim=1)
# Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
# Tile side inputs across local key/value blocks
# New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)
reps = [1] * (side_key_states.ndim + 1)
reps[1] = key_states.shape[1]
side_key_states = side_key_states.unsqueeze(1).repeat(reps)
side_value_states = side_value_states.unsqueeze(1).repeat(reps)
# Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones
# New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)
key_states = torch.cat([key_states, side_key_states], dim=2)
value_states = torch.cat([value_states, side_value_states], dim=2)
# Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len)
scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states)
if mask is not None:
# We need to adjust position bias shape to be sum with mask
local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device)
# Replace masked positions with -10_000 (according to the original implementation)
local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10)
else:
local_attention_mask = None
if position_bias is None:
# position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, 1, self.n_heads, self.block_len, 3 * self.block_len),
device=scores.device,
dtype=scores.dtype,
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(self.block_len)
if local_attention_mask is not None:
# (batch_size, 1, n_heads, block_len, 3 * block_len)
position_bias = position_bias + local_attention_mask.transpose(1, 2)
position_bias = position_bias.type(scores.dtype)
# Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)
if mask is None:
mask = torch.ones(batch_size, seq_length)
# (batch_size, num_heads, seq_len, global_seq_len)
side_position_bias = self.compute_side_bias(mask, global_segment_ids)
# (batch_size, num_blocks, num_heads, block_len, global_seq_len)
side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2)
side_position_bias = side_position_bias.type(scores.dtype).to(scores.device)
# (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len)
position_bias = torch.cat([position_bias, side_position_bias], dim=-1)
scores += position_bias
# (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_weights = attn_weights.type(value_states.dtype)
attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
attn_output = attn_output[:, :seq_length, :]
attn_output = self.o(attn_output)
present_key_value_state = None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5
class LongT5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
class LongT5LayerLocalSelfAttention(nn.Module):
"""Local self attention used in encoder"""
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
output_attentions=False,
**kwargs: Any, # to accept past_key_value and use_cache kwargs
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.LocalSelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
class LongT5LayerTransientGlobalSelfAttention(nn.Module):
"""Transient-Global self attention used in encoder"""
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention(
config, has_relative_attention_bias=has_relative_attention_bias
)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
output_attentions=False,
**kwargs: Any, # to accept past_key_value and use_cache kwargs
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.TransientGlobalSelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5
class LongT5LayerCrossAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class LongT5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
if config.is_decoder:
attention_layer = LongT5LayerSelfAttention
elif config.encoder_attention_type == "local":
attention_layer = LongT5LayerLocalSelfAttention
elif config.encoder_attention_type == "transient-global":
attention_layer = LongT5LayerTransientGlobalSelfAttention
else:
raise ValueError(
"For encoder attention mechanism, either `local` or `transient-global` attention type is expected, "
f"but got {config.encoder_attention_type}."
)
self.layer = nn.ModuleList()
self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(LongT5LayerCrossAttention(config))
self.layer.append(LongT5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
if not self.is_decoder:
logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.")
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
if len(past_key_value) != expected_num_past_key_values:
raise ValueError(
f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
f"Got {len(past_key_value)} past key / value states"
)
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value_state,) + attention_outputs
else:
outputs = outputs + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
class LongT5PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LongT5Config
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
@property
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, LongT5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, LongT5DenseActDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, LongT5DenseGatedActDense):
module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
module.wi_0.bias.data.zero_()
module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
module.wi_1.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
if isinstance(module, LongT5TransientGlobalAttention):
module.global_relative_attention_bias.weight.data.normal_(
mean=0.0, std=factor * ((d_model) ** -0.5)
)
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (LongT5Attention, LongT5Stack)):
module.gradient_checkpointing = value
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert decoder_start_token_id is not None, (
"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the"
" pad_token_id. See LongT5 docs for more information"
)
# shift inputs to the right
if is_torch_fx_proxy(input_ids):
# Item assignment is not supported natively for proxies.
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class LongT5Stack(LongT5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.local_radius = config.local_radius
self.block_len = self.local_radius + 1
self.block = nn.ModuleList(
[LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
# Initialize weights and apply final processing
self.post_init()
self.gradient_checkpointing = False
# Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings
def get_input_embeddings(self):
return self.embed_tokens
# Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder"
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
# We use local attention in encoder self-attention, otherwise standard self & cross attentions are used
if self.is_decoder:
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, inputs_embeds.device
)
elif self.config.encoder_attention_type == "local":
extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device)
else: # we need to use both local attention mask and standard extended mask for transient-global attention
extended_attention_mask = attention_mask
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(
create_custom_forward(layer_module),
hidden_states,
extended_attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
LONGT5_START_DOCSTRING = r"""
The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long
Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. It's an encoder-decoder transformer pre-trained in a text-to-text denoising
generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different
efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`LongT5Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
LONGT5_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
Training](./longt5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5
Training](./longt5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
LONGT5_ENCODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
Training](./longt5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
__HEAD_MASK_WARNING_MSG = """
The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
num_heads)`.
"""
@add_start_docstrings(
"The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.",
LONGT5_START_DOCSTRING,
)
class LongT5Model(LongT5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
def __init__(self, config: LongT5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = LongT5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = LongT5Stack(decoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import T5Tokenizer, LongT5Model
>>> tokenizer = T5Tokenizer.from_pretrained("google/long-t5-local-base")
>>> model = LongT5Model.from_pretrained("google/long-t5-local-base")
>>> # Let's try a very long encoder input.
>>> input_ids = tokenizer(
... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING)
class LongT5ForConditionalGeneration(LongT5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
r"lm_head.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
def __init__(self, config: LongT5Config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = LongT5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = LongT5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Returns:
Examples:
```python
>>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps")
>>> model = LongT5ForConditionalGeneration.from_pretrained(
... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps"
... )
>>> # Let's try a very long input.
>>> input_ids = tokenizer(
... "summarize: " + 100 * "studies have shown that owning a dog is good for you ", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
abstractthe aim of this article is to summarize the studies have shown that owning a dog
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
@add_start_docstrings(
"The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
LONGT5_START_DOCSTRING,
)
class LongT5EncoderModel(LongT5PreTrainedModel):
authorized_missing_keys = [
r"encoder.embed_tokens.weight",
]
def __init__(self, config: LongT5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = LongT5Stack(encoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base")
>>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base")
>>> input_ids = tokenizer(
... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return encoder_outputs
| 1.632813 | 2 |
BGD_sample.py | AnitaPetzler/BayesGauss | 2 | 12793380 | import BGD
import pickle
'''
Sample code showing implementation of the Bayesian Gaussian Decomposition algorithm BGD
(https://github.com/AnitaPetzler/BayesGauss/)
'''
def FindVelIndex(min_vel, max_vel, vel_axis):
'''
Finds the min and max indices corresponding to
the min and max velocities given.
'''
dv = vel_axis[1] - vel_axis[0]
v_at_0 = vel_axis[0]
min_v_index = int(min((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv))
max_v_index = int(max((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv))
if min_v_index < 0:
min_v_index = 0
return (min_v_index, max_v_index)
vel_range = {'4C+25.14': (-85, 75), 'ch002': (-52, 10)}
for source_name in ['4C+25.14', 'ch002']:
for em in ['absorption', 'emission']:
save_as_name = source_name + '_' + em
# Loading main line data
with open('pickles/' + source_name + '_1665_' + em + '.pickle', 'r') as f:
spectrum_1665 = pickle.load(f)
with open('pickles/' + source_name + '_1665_vel.pickle', 'r') as f:
vel_axis_1665 = pickle.load(f)
with open('pickles/rms_' + source_name + '_1665_' + em + '.pickle', 'r') as f:
rms_1665 = pickle.load(f)
with open('pickles/' + source_name + '_1667_' + em + '.pickle', 'r') as f:
spectrum_1667 = pickle.load(f)
with open('pickles/' + source_name + '_1667_vel.pickle', 'r') as f:
vel_axis_1667 = pickle.load(f)
with open('pickles/rms_' + source_name + '_1667_' + em + '.pickle', 'r') as f:
rms_1667 = pickle.load(f)
# Loading satellite lines. Current version of BGD requires both satellite lines, so if only
# one is present just duplicate it for the other one.
try:
with open('pickles/' + source_name + '_1612_' + em + '.pickle', 'r') as f:
spectrum_1612 = pickle.load(f)
with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f:
vel_axis_1612 = pickle.load(f)
with open('pickles/rms_' + source_name + '_1612_' + em + '.pickle', 'r') as f:
rms_1612 = pickle.load(f)
except IOError:
with open('pickles/' + source_name + '_1720_' + em + '.pickle', 'r') as f:
spectrum_1612 = pickle.load(f)
with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f:
vel_axis_1612 = pickle.load(f)
with open('pickles/rms_' + source_name + '_1720_' + em + '.pickle', 'r') as f:
rms_1612 = pickle.load(f)
print '1612 replaced by 1720 for ' + save_as_name
try:
with open('pickles/' + source_name + '_1720_' + em + '.pickle', 'r') as f:
spectrum_1720 = pickle.load(f)
with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f:
vel_axis_1720 = pickle.load(f)
with open('pickles/rms_' + source_name + '_1720_' + em + '.pickle', 'r') as f:
rms_1720 = pickle.load(f)
except IOError:
with open('pickles/' + source_name + '_1612_' + em + '.pickle', 'r') as f:
spectrum_1720 = pickle.load(f)
with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f:
vel_axis_1720 = pickle.load(f)
with open('pickles/rms_' + source_name + '_1612_' + em + '.pickle', 'r') as f:
rms_1720 = pickle.load(f)
print '1720 replaced by 1612 for ' + save_as_name
# Trim spectra so all 4 cover the same velocity range.
(min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612)
(min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665)
(min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667)
(min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720)
vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]]
spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]]
rms = [rms_1612, rms_1665, rms_1667, rms_1720]
expected_min_fwhm = 1.
# Run BGD
final_parameters = BGD.Main(source_name, vel_axes, spectra, rms, expected_min_fwhm,save_as_name)
# Print results to terminal
BGD.ResultsReport(final_parameters, save_as_name)
| 3.078125 | 3 |
alphago_zero_sim-master/tournament.py | shreshthtuli/AlphaGoZero | 1 | 12793381 | <gh_stars>1-10
import os
import numpy as np
import importlib
import sys
import time
class Tournament():
def __init__(self, student_list, num_matches, board_size, komi):
self.student_list = student_list
self.num_matches = num_matches
self.board_size = board_size
self.komi = komi
self.folder_name = 'Tournament'
self.module_folder = 'modules'
if not os.path.exists(self.folder_name):
os.makedirs(self.folder_name)
if not os.path.exists(self.module_folder):
os.makedirs(self.module_folder)
def run_tournament(self):
n = len(self.student_list)
for i in range(n):
for j in range(i + 1, n):
p1 = self.student_list[i]
p2 = self.student_list[j]
root_folder = self.folder_name + '/' + str(p1) + '_' + str(p2)
head_to_head = RunMatches(p1, p2, self.num_matches, root_folder, self.board_size, self.komi)
head_to_head.run_matches()
class RunMatches():
def __init__(self, p1, p2, num_matches, root_folder, board_size, komi):
self.player1 = p1
self.player2 = p2
self.num_matches = num_matches
self.root_folder = root_folder
self.board_size = board_size
self.komi = komi
if not os.path.exists(self.root_folder):
os.makedirs(self.root_folder)
def run_matches(self):
for match_num in range(self.num_matches):
first_player = None
second_player = None
if match_num % 2 == 0:
first_player = self.player1
second_player = self.player2
else:
first_player = self.player2
second_player = self.player1
match_folder = self.root_folder + '/match' + str(match_num + 1)
with open('modules/tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' + str(match_num) + '.py', 'w') as fw:
fw.write('import AlphaGoPlayer_' + str(first_player) + ' as Player_1\n')
fw.write('import AlphaGoPlayer_' + str(second_player) + ' as Player_2\n')
lines = None
with open('single_match.py', 'r') as fr:
lines = fr.readlines()
fr.close()
for line in lines:
fw.write(line)
fw.close()
time.sleep(1)
tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' + str(match_num))
match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder)
winner, final_score = match.run_match()
print(winner, final_score)
t = Tournament([1,5], 1, 13, 7.5)
t.run_tournament() | 2.765625 | 3 |
producer.py | CloudBreadPaPa/azure-eventhub-kafka-python | 0 | 12793382 | <reponame>CloudBreadPaPa/azure-eventhub-kafka-python
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Copyright 2016 Confluent Inc.
# Licensed under the MIT License.
# Licensed under the Apache License, Version 2.0
#
# Original Confluent sample modified for use with Azure Event Hubs for Apache Kafka Ecosystems
import os
import sys
from confluent_kafka import Producer
ssl_ca_location = os.environ['ssl_ca_location']
bootstrap_servers = os.environ['bootstrap_servers']
sasl_password = os.environ['sasl_password']
topic_name = os.environ['topic_name']
if __name__ == '__main__':
# Producer configuration
conf = {
'bootstrap.servers': bootstrap_servers, #replace
'security.protocol': 'SASL_SSL',
'ssl.ca.location': ssl_ca_location,
'sasl.mechanism': 'PLAIN',
'sasl.username': '$ConnectionString',
'sasl.password': <PASSWORD>, #replace
'client.id': 'python-example-producer'
}
# Create Producer instance
p = Producer(**conf)
def delivery_callback(err, msg):
if err:
sys.stderr.write('%% Message failed delivery: %s\n' % err)
else:
sys.stderr.write('%% Message delivered to %s [%d] @ %o\n' % (msg.topic(), msg.partition(), msg.offset()))
# Write 1-10 to topic
for i in range(0, 10):
try:
p.produce(topic_name, str(i), callback=delivery_callback)
except BufferError as e:
sys.stderr.write('%% Local producer queue is full (%d messages awaiting delivery): try again\n' % len(p))
p.poll(0)
# Wait until all messages have been delivered
sys.stderr.write('%% Waiting for %d deliveries\n' % len(p))
p.flush() | 2.03125 | 2 |
graph_construct/variable_table.py | eecshope/GraphPC | 0 | 12793383 | <reponame>eecshope/GraphPC
SPECIAL_TOKENS = ("cin", "cout", "endl", "fixed", "EOF", "stdin", "stdout", "N", "M", "L", "MAX", "MIN", "NUM", "MAXN")
UPDATE_MODE = {"ro", "wo", "rw"} # read-only, write-only, read-and-write
class VariableTable:
"""
There's one thing to be mention that all of the variables references follow the lazy strategy, which means only
inherent from the outer scope when truly mentioned
"""
def __init__(self, father=None):
"""
One basic unit is a key-value pair: (token, {"lr": set of "lr ref", "lw": set of "lw ref"}
token is a string, reference are cpp parser's node
One variable table is for one sequential process, for example, a compound statement, or a single statement
:param father: the direct father scope
"""
self.father = father
self.child = None
self.local_variables = dict({})
self.outer_variables = dict({})
def find_reference(self, token):
if token in self.local_variables:
return self.local_variables[token]
elif token in self.outer_variables:
return self.outer_variables[token]
elif self.father is None:
return None
else:
unit = self.father.find_reference(token)
if unit is not None: # be sure to deep copy the elements
self.outer_variables[token] = {"lr": set([]) | unit["lr"], "lw": set([]) | unit["lw"]}
unit = self.outer_variables[token]
return unit
def add_reference(self, token, node):
assert token == node.token
"""
if token in SPECIAL_TOKENS:
return 1
"""
self.local_variables[token] = {"lr": {node}, "lw": {node}}
return self.local_variables[token]
def find_and_update(self, token: str, node, mode: str):
assert token == node.token
unit = self.find_reference(token)
if unit is None:
if node.token in SPECIAL_TOKENS:
return 1
else:
raise ValueError(f"{token} not found")
if mode not in UPDATE_MODE:
raise ValueError(f"{mode} is not an available update mode")
node.last_read |= unit["lr"]
node.last_write |= unit["lw"]
if mode == "ro":
unit["lr"] = {node}
elif mode == "wo":
unit["lw"] = {node}
else:
unit["lr"] = {node}
unit["lw"] = {node}
return 0
"""
def find_or_add_reference(self, token):
unit = self.find_reference(token)
if unit is None:
unit = self.add_reference(token)
return unit
"""
def add_variable_table(self):
self.child = VariableTable(self)
return self.child
def pop_self(self):
self.father.child = None
for token in self.outer_variables:
if token in self.father.local_variables:
self.father.local_variables[token] = self.outer_variables[token]
elif token in self.outer_variables:
self.father.outer_variables[token] = self.outer_variables[token]
else:
raise ValueError(f"Outer variable {token} is not found in outer tables")
def merge_and_pop_self(self):
self.father.child = None
for token in self.outer_variables:
if token in self.father.local_variables:
self.father.local_variables[token]["lr"] |= self.outer_variables[token]["lr"]
self.father.local_variables[token]["lw"] |= self.outer_variables[token]["lw"]
elif token in self.father.outer_variables:
self.father.outer_variables[token]["lr"] |= self.outer_variables[token]["lr"]
self.father.outer_variables[token]["lw"] |= self.outer_variables[token]["lw"]
else:
raise ValueError(f"Outer variable {token} is not found in outer tables")
def __str__(self):
return f" local: {str(self.local_variables)}\n global: {str(self.outer_variables)}"
| 3.03125 | 3 |
code_challenge/CC_29.py | ben-rd/Weebouo | 0 | 12793384 | <reponame>ben-rd/Weebouo
import string
print("type the input: ")
a = input(">")
b=list(a)
c = list(dict.fromkeys(b))
d = len([int(s) for s in c if s.isdigit()])
invalidChars = set(string.punctuation.replace("_", ""))
if any(char in invalidChars for char in a):
print(a, "=>","Error")
elif d > 0:
print(a, "=>","Error")
else:
i=1
e=[]
for x in c:
e.append(i)
i=i+1
dictionary = dict(zip(e, c))
reverse_subs = { v:k for k,v in dictionary.items()}
converted_list=[reverse_subs.get(item,item) for item in b]
s = [str(y) for y in converted_list]
number = int("".join(s))
print("".join(b), "->", number) | 3.5625 | 4 |
api_google/google_api_directory.py | Ragnaruk/api_integration | 0 | 12793385 | <filename>api_google/google_api_directory.py
"""
https://developers.google.com/admin-sdk/directory/v1/quickstart/python
https://developers.google.com/resources/api-libraries/documentation/admin/directory_v1/python/latest/index.html
https://developers.google.com/identity/protocols/googlescopes
https://developers.google.com/admin-sdk/directory/v1/guides/manage-group-members
"""
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from config.config import path_credentials_directory
def get_directory_service():
"""
Authorize in Google via OAuth Flow.
:return: Authenticated service object.
"""
google_api_scopes = [
'https://www.googleapis.com/auth/admin.directory.user',
'https://www.googleapis.com/auth/admin.directory.group'
]
credentials = None
if os.path.exists(path_credentials_directory / 'token_directory.pickle'):
with open(path_credentials_directory / 'token_directory.pickle', 'rb') as token:
credentials = pickle.load(token)
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
path_credentials_directory / 'credentials.json',
google_api_scopes)
credentials = flow.run_local_server(port=0)
with open(path_credentials_directory / 'token_directory.pickle', 'wb') as token:
pickle.dump(credentials, token)
service = build('admin', 'directory_v1', credentials=credentials)
return service
def get_groups_for_domain(service, domain):
"""
Get all groups for a specified domain via Google API.
:param service: Authenticated directory service object
:param domain: The domain for groups
:return: List of all groups
"""
results = service.groups().list(domain=domain, maxResults=500).execute()
groups = results.get('groups', [])
while 'nextPageToken' in results:
results = service.groups().list(domain=domain, maxResults=500,
pageToken=results['nextPageToken']).execute()
groups += results.get('groups', [])
return groups
def get_members_for_group(service, group):
"""
Get all members for a specified group via Google API.
:param service: Authenticated directory service object
:param group: The id for the group
:return: List of all members
"""
results = service.members().list(
groupKey=group,
maxResults=500
).execute()
direct_members = results.get('members', [])
while 'nextPageToken' in results:
results = service.members().list(
groupKey=group,
maxResults=500,
pageToken=results['nextPageToken']
).execute()
direct_members += results.get('members', [])
members = []
for member in direct_members:
if member['type'] == 'GROUP':
members.extend(get_members_for_group(service, member['email']))
else:
members.append(member)
return members
def get_users_for_domain(service, domain, query):
"""
Get all users for a specified domain via Google API.
:param service: Authenticated directory service object
:param domain: The domain for users
:return: List of all users
"""
results = service.users().list(
domain=domain,
maxResults=500,
query=query,
).execute()
users = results.get('users', [])
while 'nextPageToken' in results:
results = service.users().list(
domain=domain,
maxResults=500,
query=query,
pageToken=results['nextPageToken']
).execute()
users += results.get('users', [])
return users
def create_group(service, email, name, description):
"""
Create a Google Group via Google API.
Groups created en masse might appear after 6-72 hours pass.
:param service: Authenticated directory service object
:param name: Name of the group
:return: Results of the query
"""
results = service.groups().insert(
body={
"kind": "admin#directory#group",
"email": email,
"name": name,
"description": description,
}
).execute()
return results
def add_user_to_group(service, group_key, user_email, role):
"""
Add user to a Google Group.
:param service: Authenticated directory service object
:param group_key: Unique identifier of the group (string, email, or id)
:param user_email: Email of the user
:param role: Role of the member
:return: Results of the query
"""
results = service.members().insert(
groupKey=group_key,
body={
"email": user_email,
"role": role
}
).execute()
return results
if __name__ == '__main__':
get_directory_service()
| 2.734375 | 3 |
p_ticker.py | z33pX/Poloniex-Ticker-Flask-WebApp | 0 | 12793386 | <gh_stars>0
import json
from multiprocessing.dummy import Process as Thread
import websocket
from poloniex import Poloniex
class PWSTicker(object):
def __init__(self, api=None):
self.api = api
if not self.api:
self.api = Poloniex(jsonNums=float)
self.tick = {}
iniTick = self.api.returnTicker()
self._ids = {market: iniTick[market]['id'] for market in iniTick}
for market in iniTick:
self.tick[self._ids[market]] = iniTick[market]
self._ws = websocket.WebSocketApp("wss://api2.poloniex.com/",
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close)
def on_message(self, ws, message):
message = json.loads(message)
if 'error' in message:
return logger.error(message['error'])
if message[0] == 1002:
if message[1] == 1:
return logger.info('Subscribed to ticker')
if message[1] == 0:
return logger.info('Unsubscribed to ticker')
data = message[2]
data = [float(dat) for dat in data]
self.tick[data[0]] = {'id': data[0],
'last': data[1],
'lowestAsk': data[2],
'highestBid': data[3],
'percentChange': data[4],
'baseVolume': data[5],
'quoteVolume': data[6],
'isFrozen': data[7],
'high24hr': data[8],
'low24hr': data[9]
}
def on_error(self, ws, error):
print(error)
def on_close(self, ws):
if self._t._running:
try:
self.stop()
except Exception as e:
print(e)
try:
self.start()
except Exception as e:
print(e)
self.stop()
else:
print("Websocket closed!")
def on_open(self, ws):
self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002}))
@property
def status(self):
"""
Returns True if the websocket is running, False if not
"""
try:
return self._t._running
except:
return False
def start(self):
""" Run the websocket in a thread """
self._t = Thread(target=self._ws.run_forever)
self._t.daemon = True
self._t._running = True
self._t.start()
print('Websocket thread started')
def stop(self):
""" Stop/join the websocket thread """
self._t._running = False
self._ws.close()
self._t.join()
print('Websocket thread stopped/joined')
def __call__(self, market=None):
""" returns ticker from mongodb """
if market:
return self.tick[self._ids[market]]
return self.tick
| 2.390625 | 2 |
crashbin_app/migrations/0009_unique_names.py | The-Compiler/crashbin | 0 | 12793387 | <filename>crashbin_app/migrations/0009_unique_names.py
# Generated by Django 2.2.1 on 2019-05-20 09:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("crashbin_app", "0008_create_mailbox")]
operations = [
migrations.AlterField(
model_name="bin",
name="name",
field=models.CharField(max_length=255, unique=True),
),
migrations.AlterField(
model_name="label",
name="name",
field=models.CharField(max_length=255, unique=True),
),
]
| 1.820313 | 2 |
docs/02.AI_ML/code-1905/day05/demo05_gridsearch.py | mheanng/PythonNote | 2 | 12793388 | """
demo05_gridsearch.py 网格搜索
"""
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp
data = np.loadtxt('../ml_data/multiple2.txt',
delimiter=',', dtype='f8')
x = data[:, :-1]
y = data[:, -1]
# 选择svm做分类
train_x, test_x, train_y, test_y = \
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(probability=True)
# 根据网格搜索选择最优模型
params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]},
{'kernel':['poly'], 'C':[1], 'degree':[2, 3]},
{'kernel':['rbf'], 'C':[1,10,100,1000],
'gamma':[1, 0.1, 0.01, 0.001]}]
model = ms.GridSearchCV(model, params, cv=5)
model.fit(train_x, train_y)
print(model.best_params_)
print(model.best_score_)
print(model.best_estimator_)
# 输出每个超参数组合信息及其得分
for param, score in zip(
model.cv_results_['params'],
model.cv_results_['mean_test_score']):
print(param, '->', score)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))
# 新增样本
prob_x = np.array([
[2, 1.5],
[8, 9],
[4.8, 5.2],
[4, 4],
[2.5, 7],
[7.6, 2],
[5.4, 5.9]])
pred_prob_y = model.predict(prob_x)
probs = model.predict_proba(prob_x)
print(probs)
# 绘制分类边界线
n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
mp.figure('Probability', facecolor='lightgray')
mp.title('Probability', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap='gray')
mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80)
mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y,
cmap='jet_r', s=80, marker='D')
for i in range(len(probs)):
mp.annotate(
'{}% {}%'.format(
round(probs[i, 0] * 100, 2),
round(probs[i, 1] * 100, 2)),
xy=(prob_x[i, 0], prob_x[i, 1]),
xytext=(12, -12),
textcoords='offset points',
horizontalalignment='left',
verticalalignment='top',
fontsize=9,
bbox={'boxstyle': 'round,pad=0.6',
'fc': 'orange', 'alpha': 0.8})
mp.show() | 2.9375 | 3 |
proxy/buffer_io.py | CMA2401PT/Phoenix-Transfer | 3 | 12793389 | import struct
import numpy as np
from .nbt import NBTFile
import io
class BufferDecoder(object):
def __init__(self,bytes) -> None:
self.bytes=bytes
self.curr=0
def read_var_uint32(self):
# 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧??
i,v=0,0
while i<35:
b=self.read_byte()
v|=(b&0x7f)<<i
if b&0x80==0:
return v
i+=7
assert False,f'read_var_uint32 fail i:{i} v:{v} {self}'
def read_var_int32(self):
v_=self.read_var_uint32()
v= np.int32(v_>>1)
if (v_&1)!=0:
v=~v
return int(v)
def read_var_uint64(self):
# 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧??
i,v=0,0
while i<70:
b=self.read_byte()
v|=(b&0x7f)<<i
if b&0x80==0:
return v
i+=7
assert False,f'read_var_uint64 fail i:{i} v:{v} {self}'
def read_var_int64(self):
v_=self.read_var_uint64()
v= np.int64(v_>>1)
if (v_&1)!=0:
v=~v
return int(v)
def read_vec3(self):
self.curr+=12
return struct.unpack('fff',self.bytes[self.curr-12:self.curr])
def read_float32(self):
self.curr+=4
return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0]
def read_tail(self):
return self.bytes[self.curr:]
def read_byte(self):
self.curr+=1
return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0]
def read_boolen(self):
return self.read_byte()==1
def read_str(self):
length=self.read_var_uint32()
self.curr+=length
return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8')
@staticmethod
def reverseUUIDBytes(bytes):
bytes[8:]+bytes[:8]
return bytes
def read_UUID(self):
self.curr+=16
uuid_bytes=self.bytes[self.curr-16:self.curr]
return self.reverseUUIDBytes(uuid_bytes)
def read_uint8(self):
self.curr+=1
return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0]
def read_int16(self):
self.curr+=2
return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0]
def read_int32(self):
self.curr+=4
return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0]
def read_uint32(self):
self.curr+=4
return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0]
def read_bytes(self,_len):
self.curr+=_len
return self.bytes[self.curr-_len:self.curr]
def read(self,_len):
self.curr+=_len
return self.bytes[self.curr-_len:self.curr]
def read_nbt(self,_len=None):
if _len==None:
nbt=NBTFile(self)
return nbt.to_py()
else:
self.curr+=_len
bio=io.BytesIO(self.bytes[self.curr-_len:self.curr])
nbt=NBTFile(bio)
return nbt.to_py()
class BufferEncoder(object):
def __init__(self) -> None:
self._bytes_elements=[]
self._bytes_elements_count=0
self._bytes=b''
@property
def bytes(self):
if len(self._bytes_elements)!=self._bytes_elements_count:
self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:])
self._bytes_elements_count=len(self._bytes_elements)
return self._bytes
def append(self,bs:bytes):
self._bytes_elements.append(bs)
def write_float32(self,f):
self.append(struct.pack('f',f))
def write_byte(self,b):
self.append(struct.pack('B',b))
def write_boolen(self,b:bool):
self.append(struct.pack('B',b))
def write_uint32(self,i:int):
self.append(struct.pack('I',i))
def write_var_uint32(self,x):
while x>=0x80:
self.write_byte(int((x%128)+0x80))
x>>=7
self.write_byte(x)
def write_var_int32(self,x):
uv=np.uint32(np.uint32(x)<<1)
if x<0:
uv=~uv
self.write_var_uint32(uv)
def write_var_uint64(self,x):
while x>=0x80:
self.write_byte(int((x%128)+0x80))
x//=128
self.write_byte(int(x))
def write_var_int64(self,x):
uv=np.uint64(np.uint64(x)*2)
if x<0:
uv=~uv
self.write_var_uint64(uv)
def write_str(self,s:str):
es=s.encode(encoding='utf-8')
self.write_var_uint32(len(es))
self.append(es)
def write_UUID_bytes(self,uuid_bytes:bytes):
self.append(uuid_bytes)
| 2.625 | 3 |
src/ufdl/annotations_plugin/image/object_detection/__init__.py | waikato-ufdl/ufdl-annotations-plugin | 0 | 12793390 | <gh_stars>0
"""
The image object-detection data domain.
"""
| 0.773438 | 1 |
web/frontend/hass_db/__init__.py | tcsvn/activity-assistant | 45 | 12793391 | from .hass_db import url_from_hass_config
| 1.09375 | 1 |
autotest/t036_test.py | hansonmcoombs/flopy | 0 | 12793392 | <filename>autotest/t036_test.py
"""
Test loading and preserving existing unit numbers
"""
import os
import pymake
from ci_framework import FlopyTestSetup, base_test_dir
import flopy
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
pth = os.path.join("..", "examples", "data", "mf2005_test")
cpth = os.path.join("temp", "t036")
exe_name = "mf2005"
v = flopy.which(exe_name)
run = True
if v is None:
run = False
def test_uzf_unit_numbers():
model_ws = f"{base_dir}_test_uzf_unit_numbers"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
mfnam = "UZFtest2.nam"
orig_pth = os.path.join("..", "examples", "data", "uzf_examples")
# copy files
pymake.setup(os.path.join(orig_pth, mfnam), model_ws)
m = flopy.modflow.Modflow.load(
mfnam,
verbose=True,
model_ws=model_ws,
forgive=False,
exe_name=exe_name,
)
assert m.load_fail is False, "failed to load all packages"
# reset the oc file
m.remove_package("OC")
output = ["save head", "print budget"]
spd = {}
for iper in range(1, m.dis.nper):
for istp in [0, 4, 9, 14]:
spd[(iper, istp)] = output
spd[(0, 0)] = output
spd[(1, 1)] = output
spd[(1, 2)] = output
spd[(1, 3)] = output
oc = flopy.modflow.ModflowOc(m, stress_period_data=spd)
oc.write_file()
if run:
try:
success, buff = m.run_model(silent=False)
except:
success = False
assert success, "base model run did not terminate successfully"
fn0 = os.path.join(model_ws, mfnam)
# change uzf iuzfcb2 and add binary uzf output file
m.uzf.iuzfcb2 = 61
m.add_output_file(m.uzf.iuzfcb2, extension="uzfcb2.bin", package="UZF")
# change the model work space
model_ws2 = os.path.join(model_ws, "flopy")
m.change_model_ws(model_ws2, reset_external=True)
# rewrite files
m.write_input()
# run and compare the output files
if run:
try:
success, buff = m.run_model(silent=False)
except:
success = False
assert success, "new model run did not terminate successfully"
fn1 = os.path.join(model_ws2, mfnam)
# compare budget terms
if run:
fsum = os.path.join(
model_ws, f"{os.path.splitext(mfnam)[0]}.budget.out"
)
try:
success = pymake.compare_budget(
fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
)
except:
success = False
print("could not perform ls" "budget comparison")
assert success, "budget comparison failure"
return
def test_unitnums_load_and_write():
model_ws = f"{base_dir}_test_unitnums_load_and_write"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
mfnam = "testsfr2_tab.nam"
# copy files
import pymake
pymake.setup(os.path.join(pth, mfnam), model_ws)
m = flopy.modflow.Modflow.load(
mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name
)
assert m.load_fail is False, "failed to load all packages"
msg = (
"modflow-2005 testsfr2_tab does not have "
"1 layer, 7 rows, and 100 columns"
)
v = (m.nlay, m.nrow, m.ncol, m.nper)
assert v == (1, 7, 100, 50), msg
if run:
try:
success, buff = m.run_model(silent=False)
except:
success = False
assert success, "base model run did not terminate successfully"
fn0 = os.path.join(model_ws, mfnam)
# rewrite files
model_ws2 = os.path.join(model_ws, "flopy")
m.change_model_ws(model_ws2, reset_external=True)
m.write_input()
if run:
try:
success, buff = m.run_model(silent=False)
except:
success = False
assert success, "base model run did not terminate successfully"
fn1 = os.path.join(model_ws2, mfnam)
if run:
fsum = os.path.join(
model_ws, f"{os.path.splitext(mfnam)[0]}.budget.out"
)
try:
success = pymake.compare_budget(
fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
)
except:
success = False
print("could not perform ls" "budget comparison")
assert success, "budget comparison failure"
return
if __name__ == "__main__":
test_uzf_unit_numbers()
test_unitnums_load_and_write()
| 2.25 | 2 |
miprometheus/grid_workers/grid_analyzer.py | vincentalbouy/mi-prometheus | 0 | 12793393 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
grid_analyzer.py:
- This script post-processes the output of the ``GridTrainers`` and ``GridTesters``. \
It gathers the test results into one `.csv` file.
"""
__author__ = "<NAME> & <NAME>"
import os
import csv
import yaml
import torch
import logging
from datetime import datetime
from miprometheus.grid_workers.grid_worker import GridWorker
class GridAnalyzer(GridWorker):
"""
Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`.
Post-processes the test results of a grid of experiments and gather them in a csv file.
This csv file will gather the training statistics (seeds, accuracies, terminal conditions,...), \
the validation statistics and the test statistics.
Inherits from :py:class:`miprometheus.grid_workers.GridWorker`.
"""
def __init__(self, name="GridAnalyzer"):
"""
Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`:
- Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker`
:param name: Name of the worker (DEFAULT: "GridAnalyzer").
:type name: str
"""
# call base constructor
super(GridAnalyzer, self).__init__(name=name, use_gpu=False)
@staticmethod
def check_if_file_exists(dir_, filename_):
"""
Checks if ``filename_`` exists in ``dir_``.
:param dir_: Path to file.
:type dir_: str
:param filename_: Name of the file to be opened and analysed.
:type filename_: str
:return: True if the file exists in the directory, else False
"""
return os.path.isfile(os.path.join(dir_, filename_))
def check_file_content(self, dir_, filename_):
"""
Checks if the number of lines in the file is > 1.
:param dir_: Path to file.
:type dir_: str
:param filename_: Name of the file to be opened and analyzed.
:type filename_: str
:return: True if the number of lines in the file is strictly greater than one.
"""
return self.get_lines_number(os.path.join(dir_, filename_)) > 1
@staticmethod
def get_lines_number(filename_):
"""
Returns the number of lines in ``filename_``.
:param filename_: Filepath to be opened and line-read.
:type filename_: str
:return: Number of lines in the file.
"""
with open(filename_) as f:
return sum(1 for _ in f)
def get_experiment_tests(self, experiment_path_):
"""
Returns a list of folders containing valid test experiments data:
- A configuration (`testing_configuration.yaml`),
- A csv file containing a data point for the aggregated statistics (`testing_set_agg_statistics.csv`)
:param experiment_path_: Path to experiment (training) folder.
:type experiment_path_: str
:return: A list of valid test experiment folders.
"""
experiments_tests = []
for root, dirs, _ in os.walk(experiment_path_, topdown=True):
for name in dirs:
experiments_tests.append(os.path.join(root, name))
# Keep only the folders that contain a test configuration file and a csv statistics file.
experiments_tests = [elem for elem in experiments_tests if
self.check_if_file_exists(elem, 'testing_configuration.yaml') and
self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')]
# Check if the csv file contains at least one data point.
experiments_tests = [elem for elem in experiments_tests if
self.check_file_content(elem, 'testing_set_agg_statistics.csv')]
return experiments_tests
def setup_grid_experiment(self):
"""
Setups the overall experiment:
- Parses arguments and sets logger level,
- Checks the presence of experiments folder,
- Recursively traverses the experiment folders, cherry-picking subfolders containing:
- (a) 'training_configuration.yaml' (training configuration file),
- (b) 'models/model_best.pt' (checkpoint of the best saved model).
"""
# Parse arguments.
self.flags, self.unparsed = self.parser.parse_known_args()
# Set logger depending on the settings.
self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None))
# Check if experiments directory was indicated.
if self.flags.expdir == '':
print('Please pass the experiments directory as --expdir')
exit(-1)
# Get experiment directory.
self.experiment_rootdir = self.flags.expdir
# Get all sub-directories paths in expdir.
self.experiments_list = []
for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True):
for name in dirs:
self.experiments_list.append(os.path.join(root, name))
# Keep only the folders that contain training_configuration.yaml, training_statistics.csv and
# training.csv and model (which contains aggregated validation statistics).
self.experiments_list = [elem for elem in self.experiments_list if
self.check_if_file_exists(elem, 'training_configuration.yaml') and
self.check_if_file_exists(elem, 'models/model_best.pt')]
# Check if there are some valid folders.
if len(self.experiments_list) == 0:
self.logger.error("There are no valid experiment folders in {} directory!".format(self.experiment_rootdir))
exit(-2)
# List folders with "valid" experiment data.
exp_str = "Found the following valid experiments in directory: {} \n".format(self.experiment_rootdir)
exp_str += '='*80 + '\n'
for exp in self.experiments_list:
exp_str += " - {}\n".format(exp)
exp_str += '='*80 + '\n'
self.logger.info(exp_str)
# Ask for confirmation - optional.
if self.flags.user_confirm:
try:
input('Press <Enter> to confirm and start the grid analyzis\n')
except KeyboardInterrupt:
exit(0)
def run_experiment(self, experiment_path: str):
"""
Collects the training / validation / test statistics for a given experiment path.
Analyzes whether the given training experiment folder contains subfolders with test experiments data:
- Loads and parses training configuration file,
- Loads checkpoint with model and training and validation statistics,
- Recursively traverses subdirectories looking for test experiments,
.. note::
We require that the test statistics csv files are valid, i.e. contain at least one line with \
collected statistics (excluding the header).
- Collects statistics from training, validation (from model checkpoint) and test experiments \
(from test csv files found in subdirectories).
:param experiment_path: Path to an experiment folder containing a training statistics.
:type experiment_path: str
:return: Four dictionaries containing:
- Status info (model, problem etc.),
- Training statistics,
- Validation statistics,
- Test statistics.
"""
self.logger.info('Analyzing experiments from: {}'.format(experiment_path))
# Create dictionaries.
status_dict = dict()
train_dict = dict()
valid_dict = dict()
# Load yaml file, to get model name, problem name and random seeds.
with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file:
params = yaml.load(yaml_file)
# Get problem and model names - from config.
status_dict['problem'] = params['testing']['problem']['name']
status_dict['model'] = params['model']['name']
# Load checkpoint from model file.
chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'),
map_location=lambda storage, loc: storage)
status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp'])
status_dict['training_terminal_status'] = chkpt['status']
status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp'])
# Create "empty" equivalent.
status_dict_empty = dict.fromkeys(status_dict.keys(), ' ')
# Copy training status stats.
train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml')
train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path))
train_dict['training_seed_torch'] = params['training']['seed_torch']
train_dict['training_seed_numpy'] = params['training']['seed_numpy']
# Copy the training statistics from the checkpoint and add the 'train_' prefix.
for key, value in chkpt['training_stats'].items():
train_dict['training_{}'.format(key)] = value
# Create "empty" equivalent.
train_dict_empty = dict.fromkeys(train_dict.keys(), ' ')
# Copy the validation statistics from the checkpoint and add the 'valid_' prefix.
for key, value in chkpt['validation_stats'].items():
valid_dict['validation_{}'.format(key)] = value
# Create "empty" equivalent.
valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ')
# Get all tests for a given training experiment.
experiments_tests = self.get_experiment_tests(experiment_path)
list_test_dicts = []
if len(experiments_tests) > 0:
self.logger.info(' - Found {} test(s)'.format(len(experiments_tests)))
# "Expand" status, train and valid dicts by empty ones, prop. to the number of test folders.
list_status_dicts = [status_dict, *[status_dict_empty for _ in range(len(experiments_tests) - 1)]]
list_train_dicts = [train_dict, *[train_dict_empty for _ in range(len(experiments_tests) - 1)]]
list_valid_dicts = [valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests) - 1)]]
# Get tests statistics.
for experiment_test_path in experiments_tests:
self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path))
# Create test dict:
test_dict = dict()
test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml')
test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:]
# Load yaml file and get random seeds.
with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file:
test_params = yaml.load(yaml_file)
# Get seeds.
test_dict['test_seed_torch'] = test_params['testing']['seed_torch']
test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy']
# Load csv file and copy test statistics
with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f:
# Open file.
test_reader = csv.DictReader(f)
# Copy training statistics.
for row in test_reader:
for key, value in row.items():
test_dict['test_{}'.format(key)] = value
list_test_dicts.append(test_dict)
else:
self.logger.info(' - Could not find any valid tests')
list_status_dicts = [status_dict]
list_train_dicts = [train_dict]
list_valid_dicts = [valid_dict]
# Add "empty test entry"
list_test_dicts.append({})
# Return all dictionaries with lists
return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts
@staticmethod
def merge_list_dicts(list_dicts):
"""
Merges a list of dictionaries by filling the missing fields with spaces into one dict.
:param list_dicts: List of dictionaries, potentially containing different headers, which will be merged.
:type list_dicts: list
:return: dict, resulting of the merge.
"""
# Create a "unified" header.
header = set(k for d in list_dicts for k in d)
# Create an "empty" dict from the unified header.
empty_dict = {k: ' ' for k in header}
# "Fill" all lists with empty gaps.
list_filled_dicts = []
for i, _ in enumerate(list_dicts):
list_filled_dicts.append({**empty_dict, **(list_dicts[i])})
# Zip lists of dicts.
final_dict = dict(zip(header, zip(*[d.values() for d in list_filled_dicts])))
# Return the result.
return final_dict
def run_grid_experiment(self):
"""
Collects four list of dicts from each experiment path contained in ``self.experiments_lists``.
Merges all them together and saves result to a single csv file.
"""
try:
# Go through the experiments one by one and collect data.
list_statuses = []
list_trains = []
list_valids = []
list_tests = []
for exp in self.experiments_list:
statuses, trains, valids, tests = self.run_experiment(exp)
list_statuses.extend(statuses)
list_trains.extend(trains)
list_valids.extend(valids)
list_tests.extend(tests)
# Merge lists.
statuses = self.merge_list_dicts(list_statuses)
trains = self.merge_list_dicts(list_trains)
valids = self.merge_list_dicts(list_valids)
tests = self.merge_list_dicts(list_tests)
# Merge everything into one big dictionary..
exp_values = {**statuses, **trains, **valids, **tests}
# create results file
results_file = os.path.join(self.experiment_rootdir,
"{0:%Y%m%d_%H%M%S}_grid_analysis.csv".format(datetime.now()))
with open(results_file, "w") as outfile:
writer = csv.writer(outfile, delimiter=',')
writer.writerow(exp_values.keys())
writer.writerows(zip(*exp_values.values()))
self.logger.info('Analysis finished')
self.logger.info('Results stored in {}.'.format(results_file))
except KeyboardInterrupt:
self.logger.info('Grid analysis interrupted!')
def main():
"""
Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`.
"""
grid_analyzer = GridAnalyzer()
# parse args, load configuration and create all required objects.
grid_analyzer.setup_grid_experiment()
# GO!
grid_analyzer.run_grid_experiment()
if __name__ == '__main__':
main()
| 2.421875 | 2 |
mmdet/datasets/coco_rgb_2.py | arthur801031/3d-multi-resolution-rcnn | 16 | 12793394 | import numpy as np
from pycocotools_local.coco import *
import os.path as osp
from .utils import to_tensor, random_scale
from mmcv.parallel import DataContainer as DC
import mmcv
from .custom import CustomDataset
class CocoDatasetRGB2(CustomDataset):
CLASSES = ('microbleed', 'full_bounding_box')
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.img_ids = self.coco.getImgIds()
img_infos = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
img_infos.append(info)
return img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return self._parse_ann_info(ann_info, self.with_mask)
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.img_ids[i] not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, ann_info, with_mask=True):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, mask_polys, poly_lens.
"""
slices_ann_info = {'r': [], 'g': [], 'b': []}
for info in ann_info:
if info['slice_label'] == 'r':
slices_ann_info['r'].append(info)
elif info['slice_label'] == 'g':
slices_ann_info['g'].append(info)
elif info['slice_label'] == 'b':
slices_ann_info['b'].append(info)
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
if with_mask:
gt_masks = []
gt_mask_polys = []
gt_poly_lens = []
for key in slices_ann_info:
cur_ann_info = slices_ann_info[key]
cur_slice_bboxes = []
cur_slice_labels = []
cur_slice_bboxes_ignore = []
cur_masks = []
cur_mask_polys = []
cur_poly_lens = []
for i, ann in enumerate(cur_ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if ann['iscrowd']:
cur_slice_bboxes_ignore.append(bbox)
else:
cur_slice_bboxes.append(bbox)
cur_slice_labels.append(self.cat2label[ann['category_id']])
if with_mask:
cur_masks.append(self.coco.annToMask(ann))
mask_polys = [
p for p in ann['segmentation'] if len(p) >= 6
] # valid polygons have >= 3 points (6 coordinates)
poly_lens = [len(p) for p in mask_polys]
cur_mask_polys.append(mask_polys)
cur_poly_lens.extend(poly_lens)
if cur_slice_bboxes:
cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32)
cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64)
else:
cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32)
cur_slice_labels = np.array([], dtype=np.int64)
if cur_slice_bboxes_ignore:
cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32)
else:
cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
gt_bboxes.append(cur_slice_bboxes)
gt_labels.append(cur_slice_labels)
gt_bboxes_ignore.append(cur_slice_bboxes_ignore)
gt_masks.append(cur_masks)
gt_mask_polys.append(cur_mask_polys)
gt_poly_lens.append(cur_poly_lens)
ann = dict(
bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)
if with_mask:
ann['masks'] = gt_masks
# poly format is not used in the current implementation
ann['mask_polys'] = gt_mask_polys
ann['poly_lens'] = gt_poly_lens
return ann
def insert_to_dict(self, data, key, tensors):
if key in data:
data[key].append(tensors)
else:
data[key] = [tensors]
def prepare_train_img(self, idx):
img_info = self.img_infos[idx]
# load image
orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
# load proposals if necessary
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
ann = self.get_ann_info(idx)
gt_bboxes_list = ann['bboxes']
gt_labels_list = ann['labels']
# if self.with_crowd:
gt_bboxes_ignore_list = ann['bboxes_ignore']
gt_masks_list = ann['masks']
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
data = None
for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list):
# skip the image if there is no valid gt bbox
if len(gt_bboxes) == 0:
return None
# extra augmentation
if self.extra_aug is not None:
img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes,
gt_labels)
else:
img = orig_img
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img, img_shape, pad_shape, scale_factor = self.img_transform(
img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
if self.with_seg:
gt_seg = mmcv.imread(
osp.join(self.seg_prefix, img_info['file_name'].replace(
'jpg', 'png')),
flag='unchanged')
gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
gt_seg = mmcv.imrescale(
gt_seg, self.seg_scale_factor, interpolation='nearest')
gt_seg = gt_seg[None, ...]
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack(
[proposals, scores]) if scores is not None else proposals
gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
flip)
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(gt_masks, pad_shape,
scale_factor, flip)
if data is None:
ori_shape = (img_info['height'], img_info['width'], 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip,
image_id=img_info['id'])
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True))
self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes)))
if self.proposals is not None:
self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals)))
if self.with_label:
self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels)))
if self.with_crowd:
self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore)))
if self.with_mask:
self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True))
if self.with_seg:
self.insert_to_dict(data, 'gt_semantic_seg', DC(to_tensor(gt_seg), stack=True))
return data | 2.046875 | 2 |
bokeh-app/main.py | Yoshinta/GWaviz | 0 | 12793395 | <reponame>Yoshinta/GWaviz<filename>bokeh-app/main.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) 2021 <NAME> <<EMAIL>>
# Visualization of SXS and analytic waveform model
# Use by executing: bokeh serve main.py
# command to run at your command prompt.
# Then navigate to the URL http://localhost:5006/main in your browser.
import numpy as np
import os
import h5py
import json
import glob
from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants
from pycbc import types
from bokeh.models import TableColumn,ColumnDataSource,DataTable
from bokeh.plotting import figure, output_file, show, output_notebook
from bokeh.models.widgets import Select, PreText,Panel, Tabs, Slider
from bokeh.io import curdoc
from bokeh.layouts import column, row, grid, layout
from scipy.interpolate import InterpolatedUnivariateSpline as unispline
# =============================================================================
# First panel
# =============================================================================
def open_sxs(sxs_data):
#https://data.black-holes.org/waveforms/documentation.html
#AhA=apparent horizon A
#AhB=apparent horizon B
#AhC=common apparent horizon
with h5py.File( os.path.join(sxs_data, "Horizons.h5" ), 'r') as f:
AhA = f['AhA.dir/CoordCenterInertial.dat'][:]
AhB = f['AhB.dir/CoordCenterInertial.dat'][:]
AhC = f['AhC.dir/CoordCenterInertial.dat'][:]
return AhA,AhB,AhC
def sep_time(horizon1, horizon2):
hor_times = horizon1[:,0]
dx=horizon1[:,1]-horizon2[:,1]
dy=horizon1[:,2] - horizon2[:,2]
sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]])
sep = np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. )
return hor_times,sep,dx[:2000],dy[:2000]
def moving_average(t, x, seglen, nseg):
dt = t[1] - t[0]
means = []
times = []
st = 0
for i in range(int(len(x)/nseg)):
en = int(st + seglen/dt)
try:
times.append(t[st])
means.append(np.mean(x[st:en]))
except:
break
st = st + seglen
return times, means
def get_h22(sxs_data):
with h5py.File( os.path.join(sxs_data, "rhOverM_Asymptotic_GeometricUnits_CoM.h5" ), 'r') as f:
h22 = f["OutermostExtraction.dir/Y_l2_m2.dat"][:]
h2m2 = f["OutermostExtraction.dir/Y_l2_m-2.dat"][:]
times = h22[:,0]
for t1, t2 in zip(times, h2m2[:,0]):
assert t1 == t2
h22 = h22[:,1] + 1.j * h22[:,2]
h2m2 = h2m2[:,1] + 1.j * h2m2[:,2]
return times,h22, h2m2
def get_hlm(sxs_data):
with h5py.File( os.path.join(sxs_data, "rhOverM_Asymptotic_GeometricUnits_CoM.h5" ), 'r') as f:
h21 = f["OutermostExtraction.dir/Y_l2_m1.dat"][:]
h2m1 = f["OutermostExtraction.dir/Y_l2_m-1.dat"][:]
h20 = f["OutermostExtraction.dir/Y_l2_m0.dat"][:]
h22 = f["OutermostExtraction.dir/Y_l2_m2.dat"][:]
h2m2 = f["OutermostExtraction.dir/Y_l2_m-2.dat"][:]
times = h22[:,0]
for t1, t2 in zip(times, h2m2[:,0]):
assert t1 == t2
h21 = h21[:,1] + 1.j * h21[:,2]
h2m1 = h2m1[:,1] + 1.j * h2m1[:,2]
h20 = h20[:,1] + 1.j * h20[:,2]
h2m2 = h2m2[:,1] + 1.j * h2m2[:,2]
h22 = h22[:,1] + 1.j * h22[:,2]
norm_time=times-times[np.argmax(h22)]
return norm_time,h21, h2m1,h2m2, h22,h20
def get_data(which_data):
AhA,AhB,AhC=open_sxs(which_data)
hor_times,sep,dx,dy=sep_time(AhA,AhB)
mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep, seglen=1000, nseg=10)
times,h22,h2m2=get_h22(which_data)
phase22 = np.unwrap(np.angle(h22))
freq22 = unispline(times, phase22).derivative()(times)
norm_time=times-times[np.argmax(h22)]
return AhA, AhB, norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy
# =============================================================================
# Second panel
# =============================================================================
def q_to_masses(mass_rat,total_m):
mass1=mass_rat/(mass_rat+1)*total_m
mass2=total_m-mass1
return mass1,mass2
def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1):
mass1,mass2=q_to_masses(mass_rat,total_m)
hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity)
hs=hp+hc*1j
amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f))
phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f))
return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data
freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0)
dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq}
sourcep2=ColumnDataSource(data=dic_p2)
pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400)
n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}')
n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}')
pn21.toolbar.logo = None
pn21.legend.click_policy="hide"
lines=[n11,n12]
pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400)
n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}')
n22=pn22.line(x='freq', y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}')
pn22.toolbar.logo = None
pn22.legend.click_policy="hide"
lines=[n21,n22]
pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400)
pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3)
pn23.toolbar.logo = None
pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400)
pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3)
pn24.toolbar.logo = None
q_sliderFD = Slider(start=1, end=10, value=1, step=.5, title="Mass ratio (q)")
e_sliderFD = Slider(start=0., end=0.9, value=0, step=.05, title="Eccentricity (e)")
model_selectFD = Select(title="FD Models", options=fd_approximants())
def update_slider(attrname, old, new):
# Get the current slider values
q = q_sliderFD.value
e = e_sliderFD.value
approximant = model_selectFD.value
freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant)
sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq}
for w in [q_sliderFD,e_sliderFD,model_selectFD]:
w.on_change('value', update_slider)
layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD))
# =============================================================================
# Third panel
# =============================================================================
def update_table2(attrname, old, new):
try:
selected_index = source31.selected.indices[0]
sval=source31.data["Simulation"][selected_index]
which_data = data_path+sval # changed this to the dict
time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data)
source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag}
except IndexError:
pass
# =============================================================================
# Fourth panel
# =============================================================================
def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024):
nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2']
if approximant in nonspinning_models:
s1z=0
s2z=0
mass1,mass2=q_to_masses(mass_rat,total_m)
hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity)
hs=hp+hc*1j
amp=abs(hs)
phase=np.unwrap(np.angle(hs))
return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase
timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.)
dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD}
sourcep42=ColumnDataSource(data=dic_p42)
pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400)
n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}')
n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}')
pn41.toolbar.logo = None
pn41.legend.click_policy="hide"
lines=[n41,n42]
pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400)
n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}')
n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}')
pn42.toolbar.logo = None
pn42.legend.click_policy="hide"
lines=[n41,n42]
pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400)
pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3)
pn43.toolbar.logo = None
pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400)
pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3)
pn44.toolbar.logo = None
q_slider = Slider(start=1, end=10, value=1, step=.5, title="Mass ratio (q)")
e_slider = Slider(start=0., end=0.9, value=0, step=.05, title="Eccentricity (e)")
s1z_slider = Slider(start=-1, end=1, value=0, step=.05, title="Spin1z")
s2z_slider = Slider(start=-1, end=1, value=0, step=.05, title="Spin2z")
model_select = Select(title="TD Models", options=td_approximants())
def update_slider2(attrname, old, new):
# Get the current slider values
q = q_slider.value
e = e_slider.value
s1z = s1z_slider.value
s2z = s2z_slider.value
approximant = model_select.value
timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant)
sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD}
for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]:
w.on_change('value', update_slider2)
layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select))
#tab1 = Panel(child=layoutNR, title="NR data")
tab2 = Panel(child=layoutan,title="Analytic FD")
#tab3 = Panel(child=layout3,title="NR l=2")
tab4 = Panel(child=layoutTD,title="Analytic TD")
#tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width')
tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width')
#layout = row(column(p,data_table),column(k,s),r)
curdoc().add_root(tabs)
curdoc().title = "Eccentric Waveforms Visualization"
| 2.265625 | 2 |
app/util.py | kyledemeule/firstpick | 0 | 12793396 | <reponame>kyledemeule/firstpick
from lib.picker import make_person
def parse_params(params):
player_names = []
for p in params.getlist('player_name[]'):
player_names.append(p)
player_skills = []
for p in params.getlist('player_skill[]'):
player_skills.append(int(p))
players = []
for name, skill in zip(player_names, player_skills):
players.append(make_person(name, skill))
return players | 2.953125 | 3 |
scripts/cwl_eval.py | leifos/cwl | 6 | 12793397 | <reponame>leifos/cwl
__author__ = "<NAME>"
import os
import argparse
from seeker.trec_qrel_handler import TrecQrelHandler
from ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler
def read_in_cost_file(cost_file):
costs = dict()
with open(cost_file, "r") as cf:
while cf:
line = cf.readline()
if not line:
break
(element_type, cost) = line.split()
element_type = element_type.strip()
costs[element_type] = float(cost)
return costs
def check_file_exists(filename):
if filename and not os.path.exists(filename):
print("{0} Not Found".format(filename))
quit(1)
def main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames = False):
qrh = TrecQrelHandler(qrel_file)
costs = None
# read in cost file - if cost file exists
if cost_file:
costs = read_in_cost_file(cost_file)
cwl_ruler = CWLRuler(metrics_file)
curr_topic_id = None
ranking_maker = None
if colnames:
print("Topic\tMetric\tEU/I\tEU\tEC/I\tEC\tI")
with open(results_file,"r") as rf:
while rf:
line = rf.readline()
if not line:
break
(topic_id, element_type, doc_id, rank, score, run_id) = line.split()
doc_id = doc_id.strip()
if (topic_id == curr_topic_id):
# build vectors
ranking_maker.add(doc_id, element_type)
else:
if curr_topic_id is not None:
#Perform the Measurements
#ranking.report()
cwl_ruler.measure(ranking_maker.get_ranking())
cwl_ruler.report()
# new topic
curr_topic_id = topic_id
# reset seen list
ranking_maker = RankingMaker(curr_topic_id, qrh, costs)
ranking_maker.add(doc_id, element_type)
#Perform the Measurements on the last topic
#ranking_maker.report()
cwl_ruler.measure(ranking_maker.get_ranking())
cwl_ruler.report()
#Perform aggregration over all topics
#Compute residuals?
if bib_file:
cwl_ruler.save_bibtex(bib_file)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="CWL Evaluation Metrics")
arg_parser.add_argument("gain_file", help="A TREC Formatted Qrel File with relevance scores used as gains. Gain values should be between zero and one. Four column tab/space sep file with fields: topic_id unused doc_id gain")
arg_parser.add_argument("result_file", help="TREC formatted results file. Six column tab/space sep file with fields: topic_id element_type doc_id rank score run_id")
arg_parser.add_argument("-c", "--cost_file", help="Costs associated with each element type specified in result file.", required=False)
arg_parser.add_argument("-m", "--metrics_file", help="The list of metrics that are to be reported. If not specified, a set of default metrics will be reported. Tab/space sep file with fields: metric_name params", required=False)
arg_parser.add_argument("-b", "--bib_file", help="If specified, then the BibTeX for the measures used will be saved to the filename given.", required=False)
arg_parser.add_argument("-n", "--colnames", help="Includes headings in the output", required=False, action="store_true")
args = arg_parser.parse_args()
gain_file = args.gain_file
result_file = args.result_file
cost_file = None
if args.cost_file:
cost_file = args.cost_file
metrics_file = None
if args.metrics_file:
metrics_file = args.metrics_file
bib_file = None
if args.bib_file:
bib_file = args.bib_file
colnames = False
if args.colnames:
colnames = True
check_file_exists(result_file)
check_file_exists(gain_file)
check_file_exists(cost_file)
check_file_exists(metrics_file)
main(result_file, gain_file, cost_file, metrics_file, bib_file, colnames)
| 2.40625 | 2 |
tests/loggingex/context/test_logging_context_filter.py | open-things/loggingex | 2 | 12793398 | <filename>tests/loggingex/context/test_logging_context_filter.py
from logging import DEBUG, LogRecord
from pytest import fixture, mark
from loggingex.context import LoggingContextFilter
from loggingex.context.filter import IGNORED_VARIABLE_NAMES
from .helpers import InitializedContextBase
class FilterTests(InitializedContextBase):
@fixture()
def record(self):
return LogRecord(
"test", DEBUG, "test.py", 1337, "message %d", (1,), None
)
def test_log_record_is_injected_with_context_variables(self, store, record):
store.replace({"foo": 1, "bar": 2.3, "baz": "dummy"})
assert LoggingContextFilter().filter(record) == 1
assert record.foo == 1
assert record.bar == 2.3
assert record.baz == "dummy"
@mark.parametrize("field", IGNORED_VARIABLE_NAMES)
def test_ignores_variables_that_would_overwrite_record_fields(
self, store, record, field
):
store.replace({field: "overwrite", "foo": 1})
LoggingContextFilter().filter(record)
assert record.foo == 1
assert getattr(record, field, "undefined") != "overwrite"
| 2.46875 | 2 |
bridson/__init__.py | emulbreh/pds2d | 32 | 12793399 | from random import random
from math import cos, sin, floor, sqrt, pi, ceil
def euclidean_distance(a, b):
dx = a[0] - b[0]
dy = a[1] - b[1]
return sqrt(dx * dx + dy * dy)
def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random):
tau = 2 * pi
cellsize = r / sqrt(2)
grid_width = int(ceil(width / cellsize))
grid_height = int(ceil(height / cellsize))
grid = [None] * (grid_width * grid_height)
def grid_coords(p):
return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize))
def fits(p, gx, gy):
yrange = list(range(max(gy - 2, 0), min(gy + 3, grid_height)))
for x in range(max(gx - 2, 0), min(gx + 3, grid_width)):
for y in yrange:
g = grid[x + y * grid_width]
if g is None:
continue
if distance(p, g) <= r:
return False
return True
p = width * random(), height * random()
queue = [p]
grid_x, grid_y = grid_coords(p)
grid[grid_x + grid_y * grid_width] = p
while queue:
qi = int(random() * len(queue))
qx, qy = queue[qi]
queue[qi] = queue[-1]
queue.pop()
for _ in range(k):
alpha = tau * random()
d = r * sqrt(3 * random() + 1)
px = qx + d * cos(alpha)
py = qy + d * sin(alpha)
if not (0 <= px < width and 0 <= py < height):
continue
p = (px, py)
grid_x, grid_y = grid_coords(p)
if not fits(p, grid_x, grid_y):
continue
queue.append(p)
grid[grid_x + grid_y * grid_width] = p
return [p for p in grid if p is not None]
| 3 | 3 |
examples/run_pre_tuned_algorithm/deepar/run.py | arangatang/Crayon | 0 | 12793400 | from crayon import benchmark
benchmark("deepar.yml", "deepar", benchmark_id="deepar_100", runs=100)
| 1.367188 | 1 |
VPN_Configuration.py | rphaley/DePaul_Class_VPN | 0 | 12793401 | def makeVPN(classNum, inNet, sslNet,users):
f = open('VPNconfig.txt','a')
f.write('''
object network SSL_CNS{}_NET
subnet 10.{}.0.0 255.255.0.0
!
nat (inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup
'''.format(classNum,inNet,classNum,classNum))
for i in range(1,users):
f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0
group-policy CNS{}_Student{}_GP internal
group-policy CNS{}_Student{}_GP attributes
vpn-tunnel-protocol ssl-client
split-tunnel-policy tunnelspecified
split-tunnel-network-list value CNS{}_Student{}_SSL_ACL
!
ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0
!
tunnel-group CNS{}_Student{}_TG type remote-access
tunnel-group CNS{}_Student{}_TG general-attributes
address-pool SSL_CNS{}_Student{}_POOL
default-group-policy CNS{}_Student{}_GP
tunnel-group CNS{}_Student{}_TG webvpn-attributes
group-alias CNS{}_Student{}_NET enable
!
username CNS{}-student{} attributes
group-lock value CNS{}_Student{}_TG
service-type remote-access\n!\n!\n'''.format(classNum,i,inNet,i,classNum,i,classNum,i,classNum,i,classNum,i,sslNet,i,sslNet,i,classNum,i,classNum,i,
classNum,i,classNum,i,classNum,i,classNum,i,classNum,i,classNum,i,classNum,i))
f.close()
| 2.6875 | 3 |
ccc/compliancedb.py | busunkim96/cc-utils | 0 | 12793402 | <reponame>busunkim96/cc-utils<filename>ccc/compliancedb.py<gh_stars>0
import ci.util
from dso.compliancedb.db import ComplianceDB
def default_with_cfg_name(
cfg_name: str,
):
cfg_fac = ci.util.ctx().cfg_factory()
cfg = cfg_fac.compliancedb(cfg_name)
return ComplianceDB(
username=cfg.credentials().username(),
password=cfg.credentials().password(),
hostname=cfg.hostname(),
port=cfg.port(),
)
| 1.828125 | 2 |
synthpy/client/ingest.py | brokad/synthpy | 5 | 12793403 | <filename>synthpy/client/ingest.py<gh_stars>1-10
from .transport import Method
from .utils import NamespacedClient, scoped
from ..exceptions import ImproperlyConfigured
from ..model import Model
class IngestClient(NamespacedClient):
"""Base class for the Ingest API.
.. note::
Do not construct this class directly. Access it from the root
:class:`.Synth` client instead.
Example:
.. code-block:: python
>>> from synthpy import Synth
>>> client = Synth()
>>> client.put_documents(namespace="my_namespace", collection="my_collection", document={"yes?": True})
"""
@scoped("namespace")
def put_documents(
self, collection=None, document=None, batch=None, hint=None, namespace=None
):
"""Ingest one or more documents.
This supports both individual and batch document ingestion. In
general, batch is favored as it results in fewer individual
API requests and allows ``synth`` to optimize its internal
updating of the collection's model.
:param namespace: The name of the namespace we should ingest the document(s) to.
:type namespace: str
:param collection: The name of the collection we should ingest the document(s) to.
:type collection: str
:param document: The document we should ingest in the collection. This uses the API in individual ingestion mode.
:type document: dict, optional
:param batch: An iterable of documents we should ingest in the collection. This uses the API in batch mode.
:type batch: Iterable[dict], optional
:param hint: Hint about the content of the ingest. If specified, must follow the same format as the `override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`.
:type hint: dict, optional
.. note::
Exactly one of ``document`` or ``batch`` must be set.
"""
has_document = document is not None
has_batch = batch is not None
if has_document and has_batch or (not has_document and not has_batch):
raise ImproperlyConfigured(
"batch, document", "exactly one of 'document' or 'batch' must be set"
)
request = self.transport.request(Method.PUT)
if not namespace or not collection:
raise ImproperlyConfigured(
"namespace, collection",
"'namespace' and 'collection' are required arguments",
)
request.path.push(namespace).push(collection)
kwargs = {}
if has_document:
kwargs.update({"document": document})
elif has_batch:
kwargs.update({"batch": batch})
if hint is not None:
if isinstance(hint, Model):
hint = hint._into_content()._into_repr()
kwargs.update({"hint": hint})
request.body(**kwargs)
return request.execute()
| 2.609375 | 3 |
setup.py | haidi-ustc/scikit-nano | 21 | 12793404 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python toolkit for generating and analyzing nanostructure data"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
__docformat__ = 'restructuredtext en'
import os
import sys
import shutil
import subprocess
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
raise RuntimeError("Python version 3.4+ required.\n\n"
"Sorry, but there are features of Python 3\n"
"that I want to take advantage of and without\n"
"worrying about Python 2 compatibility.\n"
"Therefore, Python 2 support was removed starting\n"
"in v0.3.7. Once/if I learn how to automate the\n"
"backporting process from the setup script,\n"
"I will restore Python 2 support that way.\n"
"Until then, if you must install this for Python 2\n"
"you're on your own. It shouldn't be difficult\n"
"but you'll have to manually backport the package\n"
"source code using a Python 3 to Python 2\n"
"compatibility library such as the python `future`\n"
"module, which provides a python script called\n"
"`pasteurize` that can be run on the source\n"
"directory to automate the backporting process.\n"
"You'll also need to hack this setup script\n"
"to remove any exceptions that are raised when\n"
"executed under Python 2.")
#if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4):
if (3, 0) <= sys.version_info[:2] < (3, 4):
raise RuntimeError("Python 3.4+ required.")
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
try:
import setuptools
except ImportError:
sys.exit("setuptools required for Python3 install.\n"
"`pip install --upgrade setuptools`")
DISTNAME = 'scikit-nano'
DESCRIPTION = __doc__
LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:])
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
URL = 'http://scikit-nano.org/doc'
DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano'
KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure',
'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure',
'analysis']
LICENSE = 'BSD 2-Clause'
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
Programming Language :: Python
Programming Language :: Python :: 3.4
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Chemistry
Topic :: Scientific/Engineering :: Physics
Topic :: Scientific/Engineering :: Visualization
Topic :: Software Development
Topic :: Software Development :: Libraries :: Python Modules
"""
MAJOR = 0
MINOR = 3
MICRO = 21
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
STABLEVERSION = None
if STABLEVERSION is None:
if ISRELEASED:
STABLEVERSION = VERSION
else:
STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1)
# Return the GIT version as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit (!) hackish: we are setting a global variable so that the main
# sknano __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet.
builtins.__SKNANO_SETUP__ = True
class CleanCommand(Clean):
description = \
"Remove build directories, __pycache__ directories, " \
".ropeproject directories, and compiled files in the source tree."
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sknano'):
for filename in filenames:
if filename.endswith(('.so', '.pyd', '.pyc', '.dll')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname in ('__pycache__', '.ropeproject'):
shutil.rmtree(os.path.join(dirpath, dirname))
for dirpath, dirnames, filenames in os.walk('doc'):
for dirname in dirnames:
if dirname in ('__pycache__', '.ropeproject'):
shutil.rmtree(os.path.join(dirpath, dirname))
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of sknano.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('sknano/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load sknano/__init__.py
import imp
version = imp.load_source('sknano.version', 'sknano/version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
# FULLVERSION += '.dev'
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='sknano/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
stable_version = '%(stable_version)s'
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED),
'stable_version': STABLEVERSION})
finally:
a.close()
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sknano')
config.get_version('sknano/version.py')
return config
def setup_package():
# Rewrite the version file everytime
write_version_py()
# Figure out whether to add ``*_requires = ['numpy>=`min version`',
# 'scipy>=`min version`']``. We don't want to do that unconditionally,
# because we risk updating an installed numpy/scipy which fails too often.
# Just if the minimum version is not installed, we may give it a try.
build_requires = []
try:
import numpy
numpy_version = \
tuple(
list(map(int, numpy.version.short_version.split('.')[:3]))[:2])
if numpy_version < (1, 9):
raise RuntimeError
except (AttributeError, ImportError, RuntimeError):
build_requires += ['numpy==1.10.1']
install_requires = build_requires[:]
try:
import scipy
scipy_version = \
tuple(
list(map(int, scipy.version.short_version.split('.')[:3]))[:2])
if scipy_version < (0, 14):
raise RuntimeError
except (AttributeError, ImportError, RuntimeError):
install_requires += ['scipy==0.16.1']
# # Add six module to install_requires (used in numpydoc git submodule)
# install_requires += ['six>=1.9']
# # Add future module to install requires
# install_requires += ['future>=0.14.3']
install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4']
metadata = dict(
name=DISTNAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
keywords=KEYWORDS,
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
setup_requires=build_requires,
install_requires=install_requires,
extras_require={
'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1']
},
entry_points={
'console_scripts': [
'analyze_structure = sknano.scripts.analyze_structure:main',
'nanogen = sknano.scripts.nanogen:main',
'nanogenui = sknano.scripts.nanogenui:main',
'sknano = sknano.scripts.sknano:main'],
},
cmdclass={'clean': CleanCommand},
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
if len(sys.argv) >= 2 and \
('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean')):
# For these actions, NumPy/SciPy are not required.
# They are required to succeed without them when, for example,
# pip is used to install Scipy when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
FULLVERSION, GIT_REVISION = get_version_info()
metadata['version'] = FULLVERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == '__main__':
setup_package()
| 2.390625 | 2 |
enhanced_stepping/game.py | pauleveritt/visual_debugging_games | 1 | 12793405 | import arcade
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 600
MOVEMENT_SPEED = 5
class Player(arcade.Sprite):
def update(self):
self.center_x += self.change_x
if self.left < 0:
self.left = 0
elif self.right > SCREEN_WIDTH - 1:
self.right = SCREEN_WIDTH - 1
class MyGame(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
arcade.set_background_color(arcade.color.WHEAT)
self.player = Player('player.png', 0.5)
self.player.center_y = 20
self.all_sprites_list = arcade.SpriteList()
self.all_sprites_list.append(self.player)
def on_draw(self):
arcade.start_render()
self.all_sprites_list.draw()
def update(self, delta_time):
self.all_sprites_list.update()
self.player.change_x = MOVEMENT_SPEED
def main():
game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game')
arcade.run()
return game
if __name__ == '__main__':
main()
| 3.21875 | 3 |
services/recommender/api/utils.py | TimothyNguyen/CS497-B | 2 | 12793406 | import os
def get_host(service: str):
'''
Retrieves the host. (Helps with debugging locally)
- Arguments:
- service: a Docker service
- Returns:
a string of either localhost or a Docker service
'''
inside_docker = os.environ.get('IS_DOCKER_CONTAINER', False)
return service if inside_docker else 'localhost' | 3.1875 | 3 |
fishbowl/body-frame-calc/configure.py | cuauv/software | 70 | 12793407 | #!/usr/bin/env python
from build import ninja_common
build = ninja_common.Build("fishbowl/body-frame-calc")
files = [
'main.cpp',
]
build.build_cmd('auv-body-frame-calc',
files,
pkg_confs=['eigen3'],
auv_deps=[],
lflags=[],
cflags=[])
| 1.523438 | 2 |
xs/utils/data/dataset.py | eLeVeNnN/xshinnosuke | 290 | 12793408 | <filename>xs/utils/data/dataset.py
class DataSet:
def __init__(self, *datas):
self.datas = list(datas)
def __len__(self):
return len(self.datas[0])
def __getitem__(self, item):
ret_list = []
for data in self.datas:
ret_list.append(data[item])
return ret_list
| 2.921875 | 3 |
masking.py | Pineapple-1/open-cv | 1 | 12793409 | <reponame>Pineapple-1/open-cv
# FOR FOCUSING ANY THING WE USE MASKING
import cv2 as cv
import numpy as np
CATS=cv.imread('Photos/cats.jpg')
# IMPORTANT MASK IMAGE AND OTHER SHOULD HAVE THE SAME DIMENSIONS OTHER WISE IT WONT WORK
BLANK = np.zeros(CATS.shape[:2],dtype='uint8')
# WE CAN ALSO DO THIS WITH OTHER SHAPES
MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1)
# MASKED IMAGE
MASKIMG = cv.bitwise_and(CATS,CATS,mask=MASK)
cv.imshow('MASKED',MASKIMG)
cv.imshow('MASK',MASK)
cv.imshow('CATS',CATS)
cv.waitKey(0)
| 3.046875 | 3 |
pot-stat.py | wp-persian/pot-stat | 0 | 12793410 | <filename>pot-stat.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributer(s): <NAME>. (manionline.<EMAIL>)
from __future__ import print_function
import sys
import re
import operator
class WordCounter:
def __init__(self, blacklist = None):
self.tokenizer = re.compile("\W+")#re.compile("\b(\w)+\b")
self.blacklist = blacklist if isinstance(blacklist, set) else set()
self.reset()
def reset(self):
self.words = dict()
self.update_counter = 0
def update(self, text):
self.update_counter += 1
words = self.tokenizer.split(text)
for w in words:
w = w.lower()
if len(w)>1 and w not in self.blacklist:
if w in self.words.keys():
self.words[w] += 1
else:
self.words[w] = 1
def toCSV(self):
for word, count in sorted(
self.words.items(), key=operator.itemgetter(1), reverse=True):
print("%s, %s" % (word, count), file=sys.stdout)
def main():
if len(sys.argv) == 1:
print("Usage: python pot-stat.py potfile1.pot potfile2.pot ...")
exit(1)
msgid = re.compile("msgid \"(.*)\"")
wc = WordCounter()
prev_msgs = 0
prev_tokens = 0
for filename in sys.argv[1:]:
with open(filename) as lines:
for l in lines:
match = msgid.split(l)
if len(match) == 3:
wc.update(match[1])
print("%s: %s messages, %s tokens" % (filename, wc.update_counter - prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr)
prev_tokens = len(wc.words)
prev_msgs = wc.update_counter
print("Total: %s messages, %s tokens" % (wc.update_counter, len(wc.words)), file=sys.stderr)
wc.toCSV()
if __name__ == "__main__":
main()
| 2.6875 | 3 |
certifico/handlers/certificate.py | pantuza/certifico | 0 | 12793411 | from flask import request
from flask import abort
from flask import url_for
from flask import render_template
from bson.objectid import ObjectId
from certifico import app
from certifico import mongo
from certifico import redis_queue
from certifico.mail import send_email
from certifico.forms import CertificateForm
def create_certificate():
form = CertificateForm()
if form.validate_on_submit():
certificate = mongo.db.certificates.insert_one({
'logo': form.data['logo'],
'message': form.data['message'],
'participants': form.participants_list
})
for p in form.participants_list:
redis_queue.enqueue(
send_email,
to_email=p.get('email'),
certificateLink=url_for(
'print_certificate',
certificate=certificate.inserted_id,
email=p.get('email'),
_external=True
)
)
return 'Os certificados do evento %s foram ' \
'enviados.' % certificate.inserted_id
return render_template('index.html', form=form,
analytics=app.config.get('GOOGLE_ANALYTICS')), 400
def print_certificate(certificate):
email = request.args.get('email')
if not email:
return abort(404)
certificate = mongo.db.certificates.find_one_or_404({
'_id': ObjectId(certificate)
})
try:
participant = next(filter(
lambda p: p.get('email') == email, certificate.get('participants')
))
except StopIteration:
return abort(404)
message = certificate.get('message')
message = message.replace(
'[participante]',
participant.get('name').upper())
return render_template(
'print.html',
logo=certificate.get('logo'),
message=message
)
| 2.078125 | 2 |
acidipy/util.py | jhtut/acidipy | 6 | 12793412 | '''
Created on 2016. 10. 26.
@author: "comfact"
'''
import re
from .model import *
def deployACI(desc, verbose=False, debug=False):
try: dom_ip = desc['Controller']['ip']
except: exit(1)
try: dom_user = desc['Controller']['user']
except: exit(1)
try: dom_pwd = desc['Controller']['pwd']
except: exit(1)
try: delete_empty_tenant = desc['Option']['deleteEmptyTenant']
except: delete_empty_tenant = False
try: deploy_incremental = desc['Option']['deployIncremental']
except: deploy_incremental = False
try:
dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug)
except:
if verbose: print('Connection Failed : %s, %s, %s\n' % (dom_ip, dom_user, dom_pwd))
exit(1)
if verbose: print('Get Controller : %s, %s, %s\n' % (dom_ip, dom_user, dom_pwd))
common = dom.Tenant('common')
tenant_objs = {}
flt_objs = {}
ctr_objs = {}
ctx_objs = {}
l3e_objs = {}
bd_objs = {}
fe_objs = {}
sj_objs = {}
sn_objs = {}
ap_objs = {}
epg_objs = {}
delete_tenants = []
def parse_desc_unit(unit):
ret = {}
for key in unit:
if re.search('^[a-z]\w*', key): ret[key] = unit[key]
return ret
tenant_list = desc['Tenant'] if 'Tenant' in desc and isinstance(desc['Tenant'], list) else []
for tenant in tenant_list:
tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant))
tenant_objs[tenant_obj['dn']] = tenant_obj
if verbose: print('UPDATE >> fvTenant.dn=%s\n' % tenant_obj['dn'])
tenant_flt_objs = {}
tenant_ctr_objs = {}
tenant_ctx_objs = {}
tenant_l3e_objs = {}
tenant_bd_objs = {}
tenant_fe_objs = {}
tenant_sj_objs = {}
tenant_sn_objs = {}
tenant_ap_objs = {}
tenant_epg_objs = {}
#=======================================================================
# Create & Update
#=======================================================================
flt_list = tenant['Filter'] if 'Filter' in tenant and isinstance(tenant['Filter'], list) else []
for flt in flt_list:
flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt))
if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\n' % flt_obj['dn'])
flt_objs[flt_obj['dn']] = flt_obj
tenant_flt_objs[flt_obj['name']] = flt_obj
fe_list = flt['FilterEntry'] if 'FilterEntry' in flt and isinstance(flt['FilterEntry'], list) else []
for fe in fe_list:
fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe))
if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\n' % fe_obj['dn'])
fe_objs[fe_obj['dn']] = fe_obj
tenant_fe_objs[fe_obj['name']] = fe_obj
ctr_list = tenant['Contract'] if 'Contract' in tenant and isinstance(tenant['Contract'], list) else []
for ctr in ctr_list:
ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr))
if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\n' % ctr_obj['dn'])
ctr_objs[ctr_obj['dn']] = ctr_obj
tenant_ctr_objs[ctr_obj['name']] = ctr_obj
sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else []
for sj in sj_list:
sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj))
if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\n' % sj_obj['dn'])
sj_objs[sj_obj['dn']] = sj_obj
tenant_sj_objs[sj_obj['name']] = sj_obj
ctx_list = tenant['Context'] if 'Context' in tenant and isinstance(tenant['Context'], list) else []
for ctx in ctx_list:
ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx))
if verbose: print('UPDATE >> Context:fvCtx.dn=%s\n' % ctx_obj['dn'])
ctx_objs[ctx_obj['dn']] = ctx_obj
tenant_ctx_objs[ctx_obj['name']] = ctx_obj
l3e_list = tenant['L3External'] if 'L3External' in tenant and isinstance(tenant['L3External'], list) else []
for l3e in l3e_list:
l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e))
if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\n' % l3e_obj['dn'])
l3e_objs[l3e_obj['dn']] = l3e_obj
tenant_l3e_objs[l3e_obj['name']] = l3e_obj
bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list) else []
for bd in bd_list:
bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd))
if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\n' % bd_obj['dn'])
bd_objs[bd_obj['dn']] = bd_obj
tenant_bd_objs[bd_obj['name']] = bd_obj
sn_list = bd['Subnet'] if 'Subnet' in bd and isinstance(bd['Subnet'], list) else []
for sn in sn_list:
sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn))
if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\n' % sn_obj['dn'])
sn_objs[sn_obj['dn']] = sn_obj
tenant_sn_objs[sn_obj['name']] = sn_obj
ap_list = tenant['AppProfile'] if 'AppProfile' in tenant and isinstance(tenant['AppProfile'], list) else []
for ap in ap_list:
ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap))
if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\n' % ap_obj['dn'])
ap_objs[ap_obj['dn']] = ap_obj
tenant_ap_objs[ap_obj['name']] = ap_obj
epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else []
for epg in epg_list:
epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg))
if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\n' % epg_obj['dn'])
epg_objs[epg_obj['dn']] = epg_obj
tenant_epg_objs[epg_obj['name']] = epg_obj
#=======================================================================
# Relations
#=======================================================================
for ctr in ctr_list:
sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else []
for sj in sj_list:
if 'Filter' in sj:
for flt in sj['Filter']:
try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt])
except:
try: tenant_sj_objs[sj['name']].relate(common.Filter(flt))
except:
if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\n' % (sj['name'], flt))
if verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\n' % (sj['name'], flt))
for l3e in l3e_list:
if 'Context' in l3e:
try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']])
except:
try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context']))
except:
if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\n' % (bd['name'], bd['Context']))
if verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\n' % (bd['name'], bd['Context']))
for bd in bd_list:
if 'Context' in bd:
try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']])
except:
try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context']))
except:
if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\n' % (bd['name'], bd['Context']))
if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\n' % (bd['name'], bd['Context']))
if 'L3External' in bd:
try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']])
except:
try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External']))
except:
if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\n' % (bd['name'], bd['L3External']))
if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\n' % (bd['name'], bd['L3External']))
for ap in ap_list:
epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else []
for epg in epg_list:
if 'BridgeDomain' in epg:
try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']])
except:
try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain']))
except:
if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\n' % (epg_obj['name'], epg['BridgeDomain']))
if verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\n' % (epg_obj['name'], epg['BridgeDomain']))
if 'Consume' in epg:
for cons in epg['Consume']:
try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons])
except:
try: tenant_epg_objs[epg['name']].relate(common.Contract(cons))
except:
if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\n' % (epg_obj['name'], cons))
if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\n' % (epg_obj['name'], cons))
if 'Provide' in epg:
for prov in epg['Provide']:
try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov])
except:
try: tenant_epg_objs[epg['name']].relate(common.Contract(prov))
except:
if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\n' % (epg_obj['name'], prov))
if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\n' % (epg_obj['name'], prov))
if 'Path' in epg:
for path in epg['Path']:
ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf'])
tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path))
if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\n' % (epg_obj['name'], path['Pod'] + '/' + path['Node'] + '/' + path['Intf']))
if delete_empty_tenant and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) == 0 and len(tenant_ap_objs) == 0:
delete_tenants.append(tenant['name'])
def object_delete(obj):
dn = obj['dn']
obj.delete()
if verbose: print('DELETE >> %s.dn=%s\n' % (obj.class_name, dn))
def recursive_delete(obj):
children = obj.children()
for child in children:
if isinstance(child, aciFilterModel): recursive_delete(child)
elif isinstance(child, aciContractModel): recursive_delete(child)
elif isinstance(child, aciContextModel): recursive_delete(child)
elif isinstance(child, aciL3OutModel): recursive_delete(child)
elif isinstance(child, aciBridgeDomainModel): recursive_delete(child)
elif isinstance(child, aciFilterEntryModel): recursive_delete(child)
elif isinstance(child, aciSubjectModel): recursive_delete(child)
elif isinstance(child, aciSubnetModel): recursive_delete(child)
elif isinstance(child, aciAppProfileModel): recursive_delete(child)
elif isinstance(child, aciEPGModel): recursive_delete(child)
if isinstance(obj, aciFilterModel):
if obj['dn'] not in flt_objs: object_delete(obj)
elif isinstance(obj, aciContractModel):
if obj['dn'] not in ctr_objs: object_delete(obj)
elif isinstance(obj, aciContextModel):
if obj['dn'] not in ctx_objs: object_delete(obj)
elif isinstance(obj, aciL3OutModel):
if obj['dn'] not in l3e_objs: object_delete(obj)
elif isinstance(obj, aciFilterEntryModel):
if obj['dn'] not in fe_objs: object_delete(obj)
elif isinstance(obj, aciSubjectModel):
if obj['dn'] not in sj_objs: object_delete(obj)
elif isinstance(obj, aciBridgeDomainModel):
if obj['dn'] not in bd_objs: object_delete(obj)
elif isinstance(obj, aciAppProfileModel):
if obj['dn'] not in ap_objs: object_delete(obj)
elif isinstance(obj, aciSubnetModel):
if obj['dn'] not in sn_objs: object_delete(obj)
elif isinstance(obj, aciEPGModel):
if obj['dn'] not in epg_objs: object_delete(obj)
if not deploy_incremental:
for tenant in tenant_list:
try: tenant_obj = dom.Tenant(tenant['name'])
except: continue
recursive_delete(tenant_obj)
if tenant['name'] in delete_tenants:
object_delete(tenant_obj)
dom.close()
return {'Tenant' : tenant_objs.keys(),
'Filter' : flt_objs.keys(),
'Contract' : ctr_objs.keys(),
'Context' : ctx_objs.keys(),
'L3External' : l3e_objs.keys(),
'BridgeDomain' : bd_objs.keys(),
'FilterEntry' : fe_objs.keys(),
'Subject' : sj_objs.keys(),
'Subnet' : sn_objs.keys(),
'AppProfile' : ap_objs.keys(),
'EPG' : epg_objs.keys()}
| 1.867188 | 2 |
aionpc/raw_connection.py | PrVrSs/aionpc | 0 | 12793413 | <reponame>PrVrSs/aionpc<gh_stars>0
import asyncio
import socket
from functools import partial
from .struct import Address
from .protocol_behavior import ProtocolBehavior
class RawProtocol(asyncio.Protocol):
def __init__(
self,
pysocket: socket.socket,
address: Address,
protocol_behavior: ProtocolBehavior,
):
self._socket = pysocket
self._dst_address = address
self._protocol_behavior = protocol_behavior
self._transport = None
def connection_made(self, transport):
self._transport = transport
def data_received(self, data: bytes):
self._protocol_behavior.response(data)
def connection_lost(self, exc):
self._protocol_behavior.cancel()
def _send(self, packet):
self._socket.sendto(packet.data, self._dst_address)
return self._protocol_behavior.complete_condition()
async def request(self, packet):
try:
return await self._protocol_behavior.request(self._send, packet)
except asyncio.CancelledError:
raise
except Exception:
raise
class RawConnection:
def __init__(
self,
address: Address,
family: int,
proto: int,
protocol_behavior: ProtocolBehavior,
*,
loop=None
):
self._loop = loop or asyncio.get_running_loop()
self._transport = None
self._client = None
self._pipe = self._create_pipe(family, proto)
protocol_factory = partial(
RawProtocol,
pysocket=self._pipe,
address=address,
protocol_behavior=protocol_behavior,
)
self._create_connection = partial(
self._loop.connect_read_pipe,
protocol_factory=protocol_factory,
pipe=self._pipe,
)
@staticmethod
def _create_pipe(family: int, proto: int) -> socket.socket:
return socket.socket(
family=family,
type=socket.SOCK_RAW | socket.SOCK_NONBLOCK,
proto=proto,
)
def __await__(self):
return self.__await_impl__().__await__()
async def __aenter__(self):
return await self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self._transport.close()
self._pipe.close()
async def __await_impl__(self):
self._transport, self._client = await self._create_connection()
return self._client
__iter__ = __await__
| 2.53125 | 3 |
src/airbnb_priceforecaster/features/host_location.py | andersbogsnes/airbnb_priceforecaster | 0 | 12793414 | """
host_location
=============
Where the host is located. Hypothesis that the host being somewhere else affects the price
Text of where the host is located. Could be used to extract features from
dtype: string
"""
| 2.5625 | 3 |
venv/lib/python3.8/site-packages/urllib3/exceptions.py | GiulianaPola/select_repeats | 2 | 12793415 | <filename>venv/lib/python3.8/site-packages/urllib3/exceptions.py
/home/runner/.cache/pip/pool/d0/c9/e7/a372874cd7d745f63beb7f0db9f38f9146fa9973a6f8baa3fb8c76c3c0 | 1.210938 | 1 |
bokbokbok/eval_metrics/regression/regression_eval_metrics.py | orchardbirds/bokbokbok | 8 | 12793416 | <filename>bokbokbok/eval_metrics/regression/regression_eval_metrics.py
import numpy as np
def LogCoshMetric(XGBoost=False):
"""
Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to
Mean Absolute Error.
Args:
XGBoost (Bool): Set to True if using XGBoost. We assume LightGBM as default use.
Note that you should also set `maximize=False` in the XGBoost train function
"""
def log_cosh_error(yhat, dtrain, XGBoost=XGBoost):
"""
Root Mean Squared Log Error.
All input labels are required to be greater than -1.
yhat: Predictions
dtrain: The XGBoost / LightGBM dataset
XGBoost (Bool): If XGBoost is to be implemented
"""
y = dtrain.get_label()
elements = np.log(np.cosh(yhat - y))
if XGBoost:
return 'LogCosh', float(np.sum(elements) / len(y))
else:
return 'LogCosh', float(np.sum(elements) / len(y)), False
return log_cosh_error
def RMSPEMetric(XGBoost=False):
"""
Calculates the Root Mean Squared Percentage Error:
https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation
The corresponding Loss function is Squared Percentage Error.
Args:
XGBoost (Bool): Set to True if using XGBoost. We assume LightGBM as default use.
Note that you should also set `maximize=False` in the XGBoost train function
"""
def RMSPE(yhat, dtrain, XGBoost=XGBoost):
"""
Root Mean Squared Log Error.
All input labels are required to be greater than -1.
yhat: Predictions
dtrain: The XGBoost / LightGBM dataset
XGBoost (Bool): If XGBoost is to be implemented
"""
y = dtrain.get_label()
elements = ((y - yhat) / y) ** 2
if XGBoost:
return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y)))
else:
return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))), False
return RMSPE | 3.09375 | 3 |
config_reader.py | Shreyas2512/Text-Clustering | 8 | 12793417 | """
Config Reader
@author: <NAME>
"""
#!/usr/bin/python
from ConfigParser import ConfigParser
class ConfigParse():
'''
This class reads config.ini file and sets the required user inputs
in the class attributes.
Attributes
----------
1. word2vec_model
Type: str
Description: Path of word2vec trained model file.
Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin'
2. threshold
Type: float
Description: Threshold value to be used for clustering
Default Value: 0.80
3. input_file_path
Type: str
Description: Path of input text file containing sentences to be clustered.
Default Value: None
4. output_dir_path
Type: str
Description: Path of directory where output clusters are to be kept.
Default Value: output_clusters
5. cluster_overlap
Type: bool
Description: If set to False, then no two clusters will have same sentence.
Default Value: True
6. word_vector_dim
Type: int
Description: Dimension of word vectors.
Default Value: 300
7. representative_word_vector
Type: str
Description: Specify whether the representative sentence of each cluster is to be
computed using "add" or "average".
Default Value: average
'''
def __init__(self):
'''
This method declares the class attributes.
'''
self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin'
self.threshold = 0.80
self.input_file_path = None
self.output_dir_path = './output_clusters'
self.cluster_overlap = True
self.word_vector_dim = 300
self.representative_word_vector = 'average'
def config_reader(self):
'''
This method parses the config file and read the variables defined by
the user in the config.ini file. The values of the variables are then
set in the corresponding class attributes.
'''
parser = ConfigParser()
# Read config.ini
parser.read('config.ini')
# Read input variables for the code
if parser.get('Input Variables','word2vec_model'):
self.word2vec_model = parser.get('Input Variables','word2vec_model')
if parser.get('Input Variables','threshold'):
self.threshold = parser.getfloat('Input Variables','threshold')
if parser.get('Input Variables','input_file_path'):
self.input_file_path = parser.get('Input Variables','input_file_path')
if parser.get('Input Variables', 'output_dir_path'):
self.output_dir_path = parser.get('Input Variables', 'output_dir_path')
if parser.get('Input Variables', 'cluster_overlap'):
self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap')
if parser.get('Input Variables', 'word_vector_dim'):
self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim')
if parser.get('Input Variables', 'representative_word_vector'):
self.representative_word_vector = parser.get('Input Variables', 'representative_word_vector') | 3.1875 | 3 |
mmdet/core/utils/__init__.py | JustWeZero/mmdetection | 314 | 12793418 | # Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import (center_of_mass, flip_tensor, generate_coordinate,
mask2ndarray, multi_apply, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate'
]
| 1.25 | 1 |
rdfs/core.py | Caterpillar3211/respect_your_dfs | 1 | 12793419 | <gh_stars>1-10
import pandas as pd
import numpy as np
from sklearn.base import TransformerMixin
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
from .helpers import encoded_array_to_df_compatible_array
from .base import ShapeException, NotADataFrameException, Transformer
class Merger(TransformerMixin):
"""
Merger Object
It is used to merge dataframes with given columns.
It is probably useful only for pipelines, as you
can easily achieve the same result with basic pandas operations.
Unlike other objects, it does not inherit Transformer class,
as it doesn't need to transform dataframe to array or vice-versa.
You can specify column_names and column_values upon creating the object,
or call 'new_merge' method with those parameters.
If X parameter of transformation is not a dataframe, raises an exception.
"""
def __init__(self, cols_names=None, cols_values=None):
self._cols_names = cols_names
self._cols_values = cols_values
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if not isinstance(X, pd.DataFrame):
raise NotADataFrameException
for name, values in zip(self._cols_names, self._cols_values):
X[name] = values
return X
def new_merge(self, cols_names, cols_values):
self._cols_names = cols_names
self._cols_values = cols_values
return self
class CategoryEncoder(Transformer, TransformerMixin):
"""
CategoryEncoder object
Upon creation, you should specify column names that will be encoded.
Alternatively you can set them with set_columns method, or display them
with get_columns method.
It is used to encode categorical attributes of the dataframe.
It contains it's merger _merger , as well as specified encoder _encoder .
Possible encodings:
- 'onehot'
"""
def __init__(self, columns, encoder='onehot', encoder_params=[]):
self._columns = columns
self._merger = Merger()
self._encoder_type = encoder
if self._encoder_type == 'onehot':
self._encoder = OneHotEncoder(*encoder_params)
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if not isinstance(X, pd.DataFrame):
raise NotADataFrameException
if self._encoder_type == 'onehot':
values = self._encoder.fit_transform(X[self._columns]).toarray()
values = encoded_array_to_df_compatible_array(values)
features = self._encoder.get_feature_names()
features = [feature[3:] for feature in features]
return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y)
class Imputer(Transformer, TransformerMixin):
"""
Imputer object
It is a wrapper around sklearn.impute.SimpleImputer,
all it does, is that it takes dataframe as an input, which is transformed
into np.ndarray, fed into actual SimpleImputer object, and the result is returned
as a dataframe, with the same exact columns.
"""
def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False):
self._imputer = SimpleImputer(
missing_values, strategy, fill_value, verbose, copy, add_indicator)
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if isinstance(X, pd.DataFrame):
self._columns = X.columns
X_tr = self._imputer.fit_transform(self._to_array(X))
X_tr = self._to_df(X_tr)
return X_tr
else:
return self._imputer.fit_transform(X, y)
class AttributeAdder(TransformerMixin):
"""
AttributeAdder object
It is used to add new columns (features) to the dataframe.
Methods:
__init__(name, function, parameters) <- 'name' is the label of new column,
'function' is a function upon which the values will be created,
'parameters' is a list of column names (str) and/or constant parameters.
new_attribute(name, function, parameters) <- ...
fit(X, y) <- returns itself.
transform(X, y) <- performs the transformation (adds new attribute) on the dataframe and returns it.
fit_transform(X, y) <- combined fit(X, y) and transform(X, y).
This is advised in most cases just to stay friendly with sklearn module.
"""
def __init__(self, name, function, parameters):
self.name = name
self.function = function
self.parameters = parameters
def new_attribute(self, name, function, parameters):
self.name = name
self.function = function
self.parameters = parameters
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
parameters = []
for parameter in self.parameters:
if isinstance(parameter, str):
parameter = X[parameter]
parameters.append(parameter)
X[self.name] = self.function(*parameters)
return X
class Pipesystem(TransformerMixin):
"""
Pipesystem object
It works (it has less features though) as sklearn.Pipeline .
Methods:
__init__(verbose[, False]) <- if 'verbose' is True, then everytime a transformation is made,
it will print out the information about it.
new_pipe(pipe_set, always_active[, True]) <- creates a new pipe (ordered),
pipe_set is expected to be a tuple of name and object
( in that order ). always_active does not have any functionality
at this moment. It is expected for it to be a indicator for automatic
dataframe modeling for best predictions later on.
show_pipeline() <- returns an ordered list with all current pipes.
fit(X, y) <- returns itself.
transform(X, y) <- performs all transformations (from all pipes) on the dataframe and returns it.
fit_transform(X, y) <- combined fit(X, y) and transform(X, y).
This is advised in most cases just to stay friendly with sklearn module.
"""
def __init__(self, verbose=False):
self._pipes = []
self._activated = {}
self._verbose = verbose
def new_pipe(self, pipe_set, always_active=True):
name, pipe = pipe_set
self._pipes.append((name, pipe))
self._activated[name] = True
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
for name, pipe in self._pipes:
if self._activated[name] == False:
continue
if self._verbose:
print(f'> pushing through \'{name}\' with {pipe}')
X = pipe.fit_transform(X)
return X
def show_pipeline(self):
out = []
for name, _ in self._pipes:
if self._activated[name]:
out.append(name)
return out
def _activate_array(self, array):
for value, name, _ in zip(array, self._pipes):
if not value:
self._disable_pipe(name)
def _disable_pipe(self, name):
self._activated[name] = False
class OptimizedPipesystem(Pipesystem):
"""
OptimizedPipesystem object
Enhanced rdfs.Pipesystem , it uses one of the optimiztion methods to determine
the most promising features without actually training a model.
One way of optimization (and currently, the only one implemented) is correlation.
Upon object creation, specify optimization parameter to 'corr_<int>',
the integer will be the percent rate from 0 to 100 and will act like a filter,
every feature that is less significant than that, will not be a part of returned dataframe.
Methods:
__init__(optimize_for, optimization[, 'corr_20'], verbose[, False]) <- optimize_for (str) are the target columns (labels).
optimization (str) is the method used to optimize.
If 'verbose' is True, then everytime a transformation is made,
it will print out the information about it.
new_pipe(pipe_set, always_active[, True]) <- creates a new pipe (ordered),
pipe_set is expected to be a tuple of name and object
( in that order ). always_active does not have any functionality
at this moment. It is expected for it to be a indicator for automatic
dataframe modeling for best predictions later on.
show_pipeline() <- returns an ordered list with all current pipes.
fit(X, y) <- returns itself.
transform(X, y) <- performs all transformations (from all pipes) on the dataframe, chooses the
most meaningful features and returns the dataframe.
fit_transform(X, y) <- combined fit(X, y) and transform(X, y).
This is advised in most cases just to stay friendly with sklearn module.
"""
def __init__(self, optimize_for, optimization='corr_20', verbose=False):
Pipesystem.__init__(self, verbose)
self._target = optimize_for
self._optimization = optimization
self._best_parameters = []
def transform(self, X, y=None):
X = Pipesystem.transform(self, X, y)
opt = getattr(self, '_optimization', 'corr_20')
if opt[:5] == 'corr_':
threshold = int(opt[5:]) / 100
corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict()
self._best_parameters = [name for name in corr_table if abs(corr_table[name]) >= threshold]
return X[self._best_parameters] | 2.90625 | 3 |
Scripts/allele_specific_expression/assign_each_read_to_each_allele_of_each_gene.py | LijiangLong/2020-peel-paper | 0 | 12793420 | <filename>Scripts/allele_specific_expression/assign_each_read_to_each_allele_of_each_gene.py
import subprocess,argparse,os
from Bio import SeqIO
import pysam
import pdb
import scipy.stats
N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|',
'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|',
'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|',
'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|',
'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|',
'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|',
'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'}
N2_CB4856_chrom = {'CHROMOSOME_I': 'I',
'CHROMOSOME_II': 'II',
'CHROMOSOME_III': 'III',
'CHROMOSOME_IV': 'IV',
'CHROMOSOME_V': 'V',
'CHROMOSOME_X': 'X',
'CHROMOSOME_MtDNA': 'MtDNA'}
nucleotides=['A','T','C','G']
class gene():
def __init__(self, gene_name, public_name):
self.gene_name = gene_name
self.public_name = public_name
self.SNV_list = []
self.N2_read_list = []
self.CB4856_read_list = []
def add_SNV(self, SNV):
self.SNV_list.append(SNV)
def merge_read_list(self):
read_list = {}
for SNV in self.SNV_list:
for read in SNV.N2_read_list:
try:
read_list[read]+=1
except:
read_list[read]=1
for SNV in self.SNV_list:
for read in SNV.CB4856_read_list:
try:
read_list[read]-=1
except:
read_list[read]=-1
return read_list
def merge_N2_read_list(self):
for SNV in self.SNV_list:
self.N2_read_list += SNV.N2_read_list
self.N2_read_list = list(set(self.N2_read_list))
def merge_CB4856_read_list(self):
for SNV in self.SNV_list:
self.CB4856_read_list += SNV.CB4856_read_list
self.CB4856_read_list = list(set(self.CB4856_read_list))
def count_N2_CB4856_reads(self):
read_list = self.merge_read_list()
N2 = 0
CB4856 = 0
for k,v in read_list.items():
if v > 0:
N2 += 1
elif v < 0:
CB4856 += 1
return N2, CB4856
def cal_Binom_p(self):
self.merge_N2_read_list()
self.merge_CB4856_read_list()
N2_count = len(self.N2_read_list)
CB4856_count = len(self.CB4856_read_list)
binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count)
self.binom_p = binom_p
return binom_p
class SNV():
def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature):
self.chrom = chrom
self.N2_position = N2_position
self.CB4856_position = CB4856_position
self.N2_base = N2_base
self.CB4856_base = CB4856_base
self.feature = feature
self.N2_read_list = []
self.CB4856_read_list = []
def add_N2_read(self, read_name):
if read_name not in self.N2_read_list:
self.N2_read_list.append(read_name)
def add_CB4856_read(self, read_name):
if read_name not in self.CB4856_read_list:
self.CB4856_read_list.append(read_name)
parser = argparse.ArgumentParser()
HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/'
parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam')
parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam')
parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856')
parser.add_argument('-o_1','--output_file', help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1')
# parser.add_argument('-r','--reference_files', type = str,help='reference file the bam file mapped to')
args = parser.parse_args()
# pdb.set_trace()
try:
output_f = open(args.output_file,'w')
except:
output_f = open('snp_info_verification_output','w')
f = open(args.snp_annotation_file_between_CB4856_WS230,'r')
bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2)
bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856)
i=0
gene_name_list = []
gene_list = []
for line in f:
if line.startswith('#'):
continue
i+=1
if i%500==0:
print(str(i))
chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5]
feature = line.split()[6]
SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature)
gene_name, public_gene_name = line.split()[8:10]
if gene_name == 'NA':
continue
if gene_name not in gene_name_list:
gene_object = gene(gene_name, public_gene_name)
gene_name_list.append(gene_name)
gene_list.append(gene_object)
chrom = 'CHROMOSOME_'+chrom
N2_position = int(N2_position)-1
CB4856_position = int(CB4856_position)-1
pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1)
try:
for column in pileups:
if column.pos == N2_position:
break
for read in column.pileups:
if not read.is_del and not read.is_refskip and read.alignment.mapq != 0:
alignment = read.alignment
base = alignment.seq[read.query_position]
if base == N2_nucleotide:
SNV_object.add_N2_read(alignment.qname)
except:
pass
pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1)
try:
for column in pileups:
if column.pos == CB4856_position:
break
for read in column.pileups:
if not read.is_del and not read.is_refskip and read.alignment.mapq != 0:
alignment = read.alignment
base = alignment.seq[read.query_position]
if base == CB4856_nucleotide:
SNV_object.add_CB4856_read(alignment.qname)
except:
pass
for gene_object in gene_list:
if gene_object.gene_name == gene_name:
break
gene_object.add_SNV(SNV_object)
# output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\n')
# for gene in gene_list:
# file_name = '/Volumes/Lijiang_data/datasets/Tajima\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv'
# output_file = open(file_name,'w')
# binom_p = gene.cal_Binom_p()
# for SNV in gene.SNV_list:
# SNV_info = ''
# SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+','
# # +':feature:'+SNV.feature
# SNV_info += str(len(SNV.N2_read_list))+','
# # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+
# SNV_info += str(len(SNV.CB4856_read_list))+','
# # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+
# SNV_info += SNV.feature +'\n'
# output_file.write(SNV_info)
# output_file.close()
# output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\n')
output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\n')
for gene in gene_list:
N2_count,CB4856_count = gene.count_N2_CB4856_reads()
output_f.write(gene.gene_name+','+gene.public_name+','+str(N2_count)+','+str(CB4856_count)+'\n')
output_f.close()
f.close()
| 2.71875 | 3 |
lfd_hw1_introd/hw1_test.py | MahmutOsmanovic/machine-learning-mooc-caltech | 0 | 12793421 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 12:21:09 2021
@author: Mahmu
"""
import random
import pylab
import numpy as np
x1 = random.uniform(-1, 1)
y1 = random.uniform(-1, 1)
print(str(x1) + "\n" + str(y1)) | 3.359375 | 3 |
algs2e_python/Chapter 06/python/quicksort_in_place.py | bqmoreland/EASwift | 0 | 12793422 | <reponame>bqmoreland/EASwift<filename>algs2e_python/Chapter 06/python/quicksort_in_place.py
import tkinter as tk
import time
def quicksort(values):
""" Use quicksort to sort the array."""
# Sort the whole array.
do_quicksort(values, 0, len(values) - 1)
def do_quicksort(values, start, end):
""" Sort the indicated part of the array."""
# If the list has no more than 1 element, it's sorted.
if start >= end:
return
# Use the first item as the dividing item.
divider = values[start]
# Move items < divider to the front of the array and
# items >= divider to the end of the array.
lo = start
hi = end
while True:
# Look down from hi for a value < divider.
while values[hi] >= divider:
hi -= 1
if hi <= lo:
break
if hi <= lo:
# Put the divider here and break out of the outer While loop.
values[lo] = divider
break
# Move the value we found to the lower half.
values[lo] = values[hi]
# Look up from lo for a value >= divider.
lo += 1
while values[lo] < divider:
lo += 1
if lo >= hi:
break
if lo >= hi:
# Put the divider here and break out of the outer While loop.
lo = hi
values[hi] = divider
break
# Move the value we found to the upper half.
values[hi] = values[lo]
# Recursively sort the two halves.
do_quicksort(values, start, lo - 1)
do_quicksort(values, lo + 1, end)
class App:
def kill_callback(self):
self.window.destroy()
def __init__(self):
self.window = tk.Tk()
self.window.title("quicksort_in_place")
self.window.protocol("WM_sort_WINDOW", self.kill_callback)
self.window.geometry("300x300")
frame = tk.Frame(self.window)
frame.pack(padx=5, pady=5, fill=tk.X)
label = tk.Label(frame, text="# Items:")
label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W)
self.num_items_entry = tk.Entry(frame, width=12)
self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W)
self.num_items_entry.insert(0, "1000")
generate_button = tk.Button(frame, width=8, text="Generate", command=self.generate)
generate_button.grid(padx=5, pady=2, row=0, column=2)
sort_button = tk.Button(frame, width=8, text="Sort", command=self.sort)
sort_button.grid(padx=5, pady=2, row=0, column=3)
frame = tk.Frame(self.window)
frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
scrollbar = tk.Scrollbar(frame)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.listbox = tk.Listbox(frame)
self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True)
self.listbox.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=self.listbox.yview)
# Bind some keys.
self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke()))
# Force focus so Alt+F4 closes this window and not the Python shell.
self.num_items_entry.focus_force()
self.window.mainloop()
def generate(self):
""" Make random items."""
num_items = int(self.num_items_entry.get())
self.items = []
for i in range(num_items):
self.items.append(random.randint(100000, 999999))
self.show_values()
def sort(self):
""" Sort the items."""
start_time = time.time()
quicksort(self.items)
elapsed_time = time.time() - start_time
print(f"{elapsed_time} seconds")
self.show_values()
# Verify the sort.
for i in range(1, len(self.items)):
assert self.items[i] >= self.items[i - 1], f"Item {i} ({self.items[i]}) is smaller than item {i-1} ({self.items[i-1]})"
def show_values(self):
""" Show up to 1000 values."""
self.listbox.delete(0, tk.END)
for i in range(min(len(self.items), 1000)):
self.listbox.insert(tk.END, self.items[i])
if __name__ == '__main__':
app = App()
# app.root.destroy()
| 3.859375 | 4 |
app.py | fossabot/VeraBot | 0 | 12793423 | #External
import discord
from discord.ext import commands
from discord.ext.commands.errors import CommandNotFound
#Python
import asyncio
from datetime import datetime as dtime
from datetime import timezone, timedelta
import re
#Internal
from membership_handling import MembershipHandler
from settings import Settings
from membership import Membership
from utility import Utility
from ocr import OCR
from sending import Sending
from pymongo import MongoClient
import os
### Setup data
# Set variable to true for local testing
local = False
# Customizable Settings
# For local testing
token = os.<PASSWORD>("TOKEN")
owner_id = int(os.getenv("OWNER_ID"))
embed_color = int(os.getenv("EMBED_COLOR"), 16)
db_user = os.getenv("DB_USER")
db_pass = <PASSWORD>("<PASSWORD>")
db_url = os.getenv("DB_LINK")
dm_log = int(os.getenv("DM_LOG"))
# Intents
intents = discord.Intents.default()
intents.members = True
intents.invites = False
intents.emojis = False
intents.typing = False
intents.integrations = False
intents.webhooks = False
intents.voice_states = False
intents.guild_typing = False
async def determine_prefix(bot, message):
if isinstance(message.channel, discord.channel.DMChannel):
return "$"
guild = message.guild
if guild:
prefixes = db_cluster[str(guild.id)]["settings"].find_one({"kind": "prefixes"})["values"]
if prefixes:
return prefixes
return "$"
# Set up bot
bot = commands.Bot(command_prefix=determine_prefix, description='Bot to verify and manage Memberships.\nlogChannel, Vtuber name and memberRole need to be set!', intents=intents, case_insensitive=True, owner_id=owner_id)
# database settings
db_cluster = MongoClient(db_url.format(db_user, db_pass))
# set up classes
member_handler = MembershipHandler(bot, db_cluster, embed_color)
Utility.setup(bot, db_cluster, embed_color)
OCR.setup(bot, local)
Sending.setup(bot, embed_color)
#add cogs
bot.add_cog(Settings(bot, db_cluster))
bot.add_cog(Membership(bot, member_handler))
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, CommandNotFound):
# Ignore this error
pass
elif isinstance(error, commands.MissingPermissions):
await ctx.send("You are not allowed to use this command!")
elif isinstance(error, commands.NoPrivateMessage):
await ctx.send("This command should not be used in the DMs")
elif hasattr(ctx.command, 'on_error'):
#skip already locally handled errors
pass
else:
raise error
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
@bot.event
async def on_guild_join(guild):
"""
Creates the database and settings collection when the bot joins a server.
"""
print("Joined new Guild: " + str(guild.id))
dbnames = db_cluster.list_database_names()
if not str(guild.id) in dbnames:
new_guild_db = db_cluster[str(guild.id)]
settings = new_guild_db["settings"]
# Create base configuration
json = { "kind": "prefixes", "values" : ['$']}
settings.insert_one(json)
json = {"kind": "member_role", "value" : 0}
settings.insert_one(json)
json = {"kind": "log_channel", "value" : 0}
settings.insert_one(json)
json = {"kind": "mod_role", "value" : 0}
settings.insert_one(json)
json = {"kind": "picture_link", "value" : "https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg"} #hololive logo
settings.insert_one(json)
json = {"kind": "automatic_role", "value" : False}
settings.insert_one(json)
json = {"kind": "require_additional_proof", "value" : False}
settings.insert_one(json)
json = {"kind": "tolerance_duration", "value" : 1}
settings.insert_one(json)
json = {"kind": "inform_duration", "value" : 1}
settings.insert_one(json)
@bot.event
async def on_guild_remove(guild):
"""
Removes the guild from the supported idols so that memberships are not checked.
"""
print("Left Guild: " + str(guild.id))
settings = db_cluster["settings"]["general"]
settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id': guild.id}}})
@bot.event
async def on_raw_reaction_add(payload):
# get reaction from payload
if not payload.guild_id:
return
channel = bot.get_channel(payload.channel_id)
try:
msg = await channel.fetch_message(payload.message_id)
reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name)
# only the first react by somebody else than the bot should be processed
if reaction:
if reaction.count != 2:
return
msg = reaction.message
# this handling is not for DMs
# Only process reactions that also were also made by the bot
if not reaction.me:
return
if msg.embeds:
embed = msg.embeds[0]
automatic_role = db_cluster[str(msg.guild.id)]["settings"].find_one({"kind": "automatic_role"})["value"]
# always only the id
target_member_id = int(embed.title)
if reaction.emoji == '✅':
if not automatic_role:
membership_date = embed.fields[0].value
# set membership
await member_handler.set_membership(msg, target_member_id, membership_date)
#always clear
await msg.clear_reactions()
await msg.add_reaction(emoji='👌')
# deny option
elif reaction.emoji == u"\U0001F6AB":
user = bot.get_user(payload.user_id)
text = "Is there an issue with the proof (Faked or no date on screenshot) -> :white_check_mark:\n"
text += "Or is the date recognized incorrectly/was not recognized -> :no_entry_sign:"
confirm_msg = await channel.send(text, reference=msg, mention_author=False)
if await Utility.confirm_action(confirm_msg, user):
confirm_msg = await channel.send("Please write a message that will be sent to the User.", reference=msg, mention_author=False)
def check(m):
return m.author == user and m.channel == channel
text_msg = await bot.wait_for('message', check=check)
target_member = bot.get_user(target_member_id)
await target_member.send(text_msg.content)
await channel.send("Message was sent to user.", reference=text_msg, mention_author=False)
if automatic_role:
await member_handler.del_membership(msg, target_member_id, None, False)
await msg.clear_reactions()
await msg.add_reaction(emoji='👎')
else:
await asyncio.sleep(1)
confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id)
if confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count == 1:
await channel.send("The reaction took too long! Please remove you reaction from this message and add it again.", reference=msg, mention_author=False)
else:
m = "Please write the correct date from the screenshot in the format dd/mm/yyyy."
await channel.send(m, reference=msg, mention_author=False)
def check(m):
return m.author == user and m.channel == channel
date_msg = await bot.wait_for('message', check=check)
await member_handler.set_membership(msg, target_member_id, date_msg.content)
await msg.clear_reactions()
await msg.add_reaction(emoji='👎')
except discord.errors.Forbidden:
print(payload.channel_id)
print(payload.guild_id)
@bot.command(
help="Can be called with just $verify but also with $verify <VTuber name>\n" +
"Both versions require a screenshot sent with it.",
brief=" Tries to verify a screenshot for membership in the DMs"
)
@commands.dm_only()
async def verify(ctx, *vtuber):
"""
Command in the DMs that tries to verify a screenshot for membership.
"""
# log content to dm log channel for record
dm_lg_ch = bot.get_channel(dm_log)
await dm_lg_ch.send("{}\n{}".format(str(ctx.author),ctx.message.content))
for attachment in ctx.message.attachments:
await dm_lg_ch.send(attachment.url)
if vtuber:
server = map_vtuber_to_server(vtuber[0])
if server:
await member_handler.verify_membership(ctx.message, server)
else:
embed = Utility.create_supported_vtuber_embed()
await ctx.send(content ="Please use a valid supported VTuber!", embed = embed)
else:
await member_handler.verify_membership_with_server_detection(ctx.message)
@verify.error
async def verify_error(ctx, error):
if isinstance(error, commands.PrivateMessageOnly):
await ctx.send("This command only works in DMs!")
@bot.command(hidden = True, name = "checkIdols")
@commands.is_owner()
async def check(ctx):
Utility.create_supported_vtuber_embed()
await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols'])
@bot.command(hidden = True, name = "forceCheck")
@commands.is_owner()
async def force_member_check(ctx):
await member_handler.delete_expired_memberships(True)
@bot.command(hidden = True, name = "broadcast")
@commands.is_owner()
async def broadcast(ctx, title, text):
serverlist = db_cluster["settings"]['general'].find_one({'name': "supported_idols"})['supported_idols']
#create Embed
embed = discord.Embed(title = title, description = text, colour = embed_color)
#send to every server
for server in serverlist:
server_db = db_cluster[str(server['guild_id'])]
lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': "log_channel"})['value'])
await lg_ch.send(content = None, embed = embed)
@bot.command(name = "dmMe",
help="Sends a DM containg \"hi\" to the user using the command.",
brief="Sends a DM to the user")
async def send_dm(ctx):
await ctx.author.send("Hi")
@send_dm.error
async def dm_error(ctx, error):
if isinstance(error, discord.errors.Forbidden):
await ctx.send("You need to allow DMs!")
error = None
@bot.command(name="proof",
help = "Allows to send additional proof. Requires the name of the vtuber. Only available in DMs",
brief = "Send additional proof")
@commands.dm_only()
async def send_proof(ctx, vtuber: str):
if not ctx.message.attachments:
await ctx.send("Please include a screenshot of the proof!")
return
server_id = map_vtuber_to_server(vtuber)
member_veri_ch =bot.get_channel(db_cluster[str(server_id)]["settings"].find_one({"kind": "log_channel"})["value"])
# Send attachment and message to membership verification channel
desc = "{}\n{}".format(str(ctx.author), "Additional proof")
title = ctx.author.id
embed = discord.Embed(title = title, description = None, colour = embed_color)
embed.set_image(url = ctx.message.attachments[0].url)
await member_veri_ch.send(content = "```\n{}\n```".format(desc), embed = embed)
#send confirmation
await ctx.send("Your additional proof was delivered safely!")
@send_proof.error
async def proof_error(ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send("Please do only send a valid name")
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Please include the server name!")
embed = Utility.create_supported_vtuber_embed()
await ctx.send(content=None, embed=embed)
def map_vtuber_to_server(name):
settings_db = db_cluster["settings"]["general"]
result = settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'name' : name}}})
if 'supported_idols' in result:
return result['supported_idols'][0]['guild_id']
#Time in status
async def jst_clock():
while not bot.is_closed():
try:
now = dtime.now(tz = timezone.utc) + timedelta(hours = 9)
timestr = now.strftime("%H:%M JST, %d/%m/%Y")
await bot.change_presence(activity=discord.Game(name=timestr))
await asyncio.sleep(60)
except ConnectionResetError:
print("Could not update JST Clock!")
# List Coroutines to be executed
coroutines = (
jst_clock(),
member_handler.check_membership_routine(),
)
# Main Coroutine
async def background_main():
await bot.wait_until_ready()
await asyncio.gather(*coroutines)
bot.loop.create_task(background_main())
bot.run(token) | 2.078125 | 2 |
rejected_article_tracker/tests/test_Result.py | sagepublishing/rejected_article_tracker_pkg | 10 | 12793424 | <reponame>sagepublishing/rejected_article_tracker_pkg<gh_stars>1-10
import unittest
import pandas as pd
from ..src.Result import Result
class TestResult(unittest.TestCase):
def test__to_dict(self):
original = {
"manuscript_id": 'TVA-18-057',
"decision_date": pd.to_datetime("2020-09-01", errors='coerce', utc=True),
"submission_date": pd.to_datetime("2020-08-01", errors='coerce', utc=True),
}
winner = {
'DOI': '10.1016/j.jnt.2017.08.038',
'type': 'journal-article',
'title': ['New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)'],
'full_title': 'New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)',
'authors_list': ['<NAME>', '<NAME>'],
'publisher': 'SAGE',
'issued': {'date-parts': [[2020, 4, 1]], 'timestamp': 1585730172000},
'created': {'date-parts': [[2018, 4, 23]], 'timestamp': 1524472572000},
'indexed': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000},
'deposited': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000},
'similarity': 97,
'author_match_one': 1,
'author_match_all': 1,
'score': 95.2,
'is-referenced-by-count': 3,
'rank': 1,
'container-title': [
'Taxes and Taxation Trends'
]
}
res = Result(original=original, winner=winner).to_dict()
self.assertEqual(res['manuscript_id'], original['manuscript_id'])
self.assertEqual(res['decision_date'], '2020-09-01')
self.assertEqual(res['submission_date'], '2020-08-01')
self.assertEqual(res['match_doi'], winner['DOI'])
self.assertEqual(res['match_type'], winner['type'])
self.assertEqual(res['match_title'], winner['full_title'])
self.assertEqual(res['match_authors'], '<NAME>, <NAME>')
self.assertEqual(res['match_publisher'], 'SAGE')
self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends')
self.assertEqual(res['match_pub_date'], '2020-4-1')
self.assertEqual(res['match_earliest_date'], '2018-04-23')
self.assertEqual(res['match_similarity'], 97)
self.assertEqual(res['match_one'], True)
self.assertEqual(res['match_all'], True)
self.assertEqual(res['match_crossref_score'], 95.2)
self.assertEqual(res['match_crossref_cites'], 3)
self.assertEqual(res['match_rank'], 1)
self.assertEqual(res['match_total_decision_days'], -862)
def test__missing_values(self):
original = {
"manuscript_id": 'TVA-18-057',
"decision_date": "2020-09-01",
"submission_date": pd.to_datetime("2020-08-01", errors='coerce', utc=True),
}
winner = {
'DOI': '10.1016/j.jnt.2017.08.038',
'type': 'journal-article',
'title': ['New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)'],
'full_title': 'New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)',
'authors_list': ['<NAME>', '<NAME>'],
'publisher': 'SAGE',
'issued': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000},
'similarity': 97,
'author_match_one': 1,
'author_match_all': 1,
'score': 95.2,
'is-referenced-by-count': 3,
'rank': 1,
'container-title': [
'Taxes and Taxation Trends'
]
}
res = Result(original=original, winner=winner).to_dict()
self.assertEqual(res['decision_date'], '')
| 2.46875 | 2 |
saywiti/regions/models.py | erickgnavar/saywiti | 2 | 12793425 | # -*- coding: utf-8 -*-
from django.contrib.gis.db import models
from django.contrib.postgres.fields.jsonb import JSONField
from django.utils.translation import ugettext_lazy as _
from saywiti.common.models import TimeStampedModel
class Level(TimeStampedModel):
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
name = models.CharField(_('Name'), max_length=100)
description = models.CharField(_('Description'), max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Region(TimeStampedModel):
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
level = models.ForeignKey('Level', related_name='regions')
name = models.CharField(_('Name'), max_length=100)
is_osm_relation = models.BooleanField(_('Is an OSM relation?'), default=False)
osm_tags = JSONField(_('OSM Tags'), null=True, blank=True)
osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True, blank=True)
polygon = models.PolygonField()
def __str__(self):
return self.name
| 2.203125 | 2 |
backend/test/test_toggles.py | lkoehl/doppelkopf | 0 | 12793426 | from doppelkopf.toggles import Toggle
from datetime import datetime, timedelta
toggles_from_db = [
Toggle(name="db-only", enabled=False),
Toggle(name="db-and-code", enabled=True),
]
toggles_from_code = [
Toggle(name="code-only", enabled=False),
Toggle(name="db-and-code", enabled=False),
]
def test_merge_toggles():
merged = Toggle.merge(toggles_from_db, toggles_from_code)
code_and_db = Toggle(
name="db-and-code",
description="a toggle thats defined in code and database",
enabled=True,
)
code_only = Toggle(
name="code-only",
description="a toggle thats only defined in code",
enabled=False,
)
assert code_and_db in merged
assert code_only in merged
assert len(merged) == 2
def test_serialize():
toggle = Toggle(id=1, name="some", enabled=True, description="some description")
expected_serialization = {"id": 1, "name": "some", "enabled": True}
assert toggle.serialize() == expected_serialization
def test_update_toggle_state():
last_changed = datetime.utcnow() - timedelta(days=2)
t = Toggle(name="some-toggle", enabled=False, last_changed_at=last_changed)
t.toggle()
assert t.enabled is True
assert t.last_changed_at > datetime.utcnow() - timedelta(seconds=2)
| 2.609375 | 3 |
{{cookiecutter.project_slug}}/{{cookiecutter.main_app}}/tests/test_{{cookiecutter.main_model|lower}}_status.py | huogerac/cookiecutter-djangofloppyforms | 3 | 12793427 | from datetime import datetime
import pytest
from model_bakery import baker
from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}}
from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service
def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db):
my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now())
assert my_{{cookiecutter.main_model|lower}}.status == 'pending'
def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db):
my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now())
{{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id)
assert {{cookiecutter.main_model|lower}}_updated.status == 'done'
def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db):
invalid_{{cookiecutter.main_model|lower}} = 0
with pytest.raises(RuntimeError) as error:
{{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}})
assert str(error.value) == f"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida"
def test_should_mark_as_undone(db):
my_{{cookiecutter.main_model|lower}} = baker.make(
{{cookiecutter.main_model}},
description='Create an ansible deploy script',
due_to=datetime.now(),
done=True)
{{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id)
assert {{cookiecutter.main_model|lower}}_updated.status == 'pending'
| 2.171875 | 2 |
abcmetaclasses.py | KT12/Python | 1 | 12793428 | <filename>abcmetaclasses.py
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 15 18:15:28 2016
@author: Ken
"""
from abc import ABCMeta, abstractmethod
class Pet(object):
__metaclass__ = ABCMeta
def __init__(self,name):
self.name = name
@abstractmethod
def can_swim(self):
pass
@abstractmethod
def speak(self):
pass
class Dog(Pet):
def can_swim(self):
super(Dog, self).can_swim()
return True
def speak(self):
super(Dog, self).speak()
return self.name + ' says "Woof!"'
class Cat(Pet):
def can_swim(self):
super(Cat, self).can_swim()
return False
def speak(self):
super(Cat, self).speak()
return self.name + ' says "Meow!"'
class Fish(Pet):
def can_swim(self):
super(Fish, self).can_swim()
return True
def speak(self):
super(Fish, self).speak()
return self.name + " can't speak."
class Plant(Pet):
def can_swim(self):
super(Plant, self).can_swim()
return False
def speak(self):
super(Plant, self).speak()
return self.name + " can't speak." | 3.90625 | 4 |
test_tenacity/main_test.py | Etuloser/python-playground | 0 | 12793429 | <gh_stars>0
import unittest
from test_tenacity.main import do_something_unreliable
class TestMain(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_do_something_unreliable(self):
got = do_something_unreliable()
print(got)
| 2.4375 | 2 |
node/executor.py | ktrany/pbft-poc | 0 | 12793430 | <reponame>ktrany/pbft-poc
#! /usr/bin/env python3
import index
from loggerWrapper import LoggerWrapper
import subprocess
import time
log = LoggerWrapper(__name__, index.PATH).logger
class Executor:
def __init__(self):
pass
def runTask(self, repoCloneUrl, targetBranch, imageTag):
start = time.perf_counter()
buildProcess = subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8')
if buildProcess.returncode != 0:
errMessage = f'Task: Build image failed. ErrCode={buildProcess.returncode}'
log.debug(errMessage)
return buildProcess
log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}')
taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8')
log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}')
# delete image for next run
deleteTask = subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8')
log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}')
log.info(f'result: {taskProcess}')
end = time.perf_counter()
log.info(f'Task execution time: {end - start}s')
return taskProcess | 1.953125 | 2 |
third_party/hlpr_object_labeling/src/object_labeling.py | kirmani/hlpr_cadence | 0 | 12793431 | <reponame>kirmani/hlpr_cadence
#!/usr/bin/env python
import os
import sys, time, math, cmath
from std_msgs.msg import String, Header
import numpy as np
import cv2
import roslib
import rospy
import pdb
import tf
import itertools
from Tkinter import *
from hlpr_feature_extraction.msg import PcFeatureArray
from hlpr_object_labeling.msg import LabeledObjects
from std_msgs.msg import String
pf = None
display = None
initX = None
def get_param(name, value=None):
private = "~%s" % name
if rospy.has_param(private):
return rospy.get_param(private)
elif rospy.has_param(name):
return rospy.get_param(name)
else:
return value
class filter:
def __init__(self):
self.labeled = None
self.tracked = None
self.errors = None
self.ids = None
self.labels = None
self.initialized = False
self.run = True
self.br = tf.TransformBroadcaster()
self.hueW = get_param("hsv_hue_weight", 2)
self.satW = get_param("hsv_sat_weight",1)
self.valW = get_param("hsv_val_weight",1)
self.sizeW = get_param("size_weight",50000)
fileref = get_param("feature_file_location")
print fileref
if fileref is not None:
self.filename = os.path.expanduser(fileref)
else:
self.filename = None
topicref = get_param("feature_file_rostopic")
if topicref is not None:
self.rostopic = os.path.expanduser(topicref)
self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1)
self.subscriber = rospy.Subscriber("/beliefs/features", PcFeatureArray, self.cbClusters, queue_size = 1)
self.pauseSub = rospy.Subscriber("/pause_labeling", String, self.cbPause, queue_size = 1)
self.orderPub = rospy.Publisher("/beliefs/labels", LabeledObjects, queue_size = 1)
def cbFile(self, ros_data):
if self.filename is not ros_data.data:
self.filename = ros_data.data
self.loadObjects()
self.initialized = True
print "Reading object features from " + self.filename
def cbPause(self, ros_data):
if ros_data.data == "pause":
self.run = False
if ros_data.data == "play":
self.run = True
def cbClusters(self, ros_data):
#Wait for filename to be received (if receiving from rostopic)
if self.filename is None:
return
if self.run is False:
self.pubMessages()
return
#Initialize object feature values
if self.initialized is False:
print "Reading object features from " + self.filename
self.loadObjects()
self.initialized = True
#Read cluster message
clusterArr = ros_data
clusters = ros_data.objects
self.transforms = ros_data.transforms
#Classify clusters
#self.labeled, self.tracked, self.errors, self.ids = self.run_filter(self.initX, self.labels, clusters)
self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX, self.labels, clusters)
#Publish labels
if len(clusters) is 0 or self.tracked is None:
return
self.outMsg = LabeledObjects()
msgTime = rospy.Time.now()
head = Header()
head.stamp = msgTime
self.outMsg.header = head
self.outMsg.objects = self.tracked
self.outMsg.labels = self.ids
self.pubMessages()
def pubMessages(self):
if self.outMsg is None or self.transforms is None or self.ids is None:
return
#Publish labels
msgTime = rospy.Time.now()
head = Header()
head.stamp = msgTime
self.outMsg.header = head
self.orderPub.publish(self.outMsg)
#Publish transforms
idx = 0
for l in self.ids:
t = self.transforms[idx]
tl = (t.translation.x, t.translation.y, t.translation.z)
r = (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w)
self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame')
idx += 1
def loadObjects(self):
self.initX = []
self.labels = []
objFile = open(self.filename, 'r')
for line in objFile.readlines():
self.initX.append(line[:-1].split(','))
self.labels.append(line.split(',')[0])
print str(len(self.initX)) + ' objects loaded'
def hsvDiff(self, c1,c2):
hsv1 = c1[1:4]
r2 = c2.rgba_color.r
g2 = c2.rgba_color.g
b2 = c2.rgba_color.b
hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV)
h1 = hsv2[0][0][0]
h2 = float(hsv1[0])
huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2))))
#return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2]))
return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2]))
def sizeDiff(self, c1,c2):
size = c2.bb_dims.x * c2.bb_dims.y
return abs(size - float(c1[4]))
def calculateError(self, init, cluster):
size = self.sizeW * self.sizeDiff(init,cluster)
hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster)
hue = self.hueW * hueDiff
sat = self.satW * satDiff
val = self.valW * valDiff
total = float(hue + sat + val + size)
return total
def getMatchingLabels(self, expected, labels, clusters):
## Evaluate all possible cluster-label pairs
errorMatrix = []
for l in expected:
labelErrors = []
for c in clusters:
e = self.calculateError(l, c)
labelErrors.append(e)
errorMatrix.append(labelErrors)
## Find the label assignment that minimizes total error
minMatch = None
minError = -1
objList = range(max(len(expected), len(clusters)))
assn = itertools.permutations(objList, len(clusters))
for a in assn:
e = 0
i = 0
newAssn = []
for idx in a:
if idx < len(expected):
e += errorMatrix[idx][i]
newAssn.append(idx)
else:
newAssn.append(-1)
i += 1
if minMatch is None or e < minError:
minMatch = newAssn
minError = e
#print "best: " + str(minMatch) + " with error " + str(minError)
## Convert the best assignment back to a list of labels
match = []
ordered = []
for i in range(len(minMatch)):
#if minMatch[i] is -1:
# match.append(None)
# ordered.append(None)
#else:
if minMatch[i] is not -1:
#print str(errorMatrix[minMatch[i]][i]) + " + "
match.append(clusters[i])
sMsg = String()
sMsg.data = labels[minMatch[i]]
ordered.append(sMsg)
return match, ordered, minError
class ui:
def __init__(self):
self.master = Tk()
self.canvas = Canvas(self.master,width=800,height=500)
self.canvas.pack()
self.timeout = 5
self.waitCount = 0
def startDrawing(self,labeling):
self.drawClusters(labeling.tracked,labeling.ids)
self.master.after(10, self.startDrawing, labeling)
def drawClusters(self,clusters,ids):
if clusters is None or ids is None:
time.sleep(1.0)
return
self.canvas.delete("all")
for idx in range(0,len(clusters)):
c = clusters[idx]
if c is None:
continue
pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)]
offset = complex(c.points_centroid.x,c.points_centroid.y)
cangle = 0 # cmath.exp(c.angle*1j)
rot = []
for x,y in pts:
r = cangle * (complex(x,y)-offset) + offset
rot.append((-r.real + 0.5) * 500)
rot.append((-r.imag + 0.5) * 500)
rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b)
poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5)
label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font="Verdana 10 bold")
self.canvas.pack()
def main(args):
global pf, display
pf = filter()
display = ui()
display.master.after(10,display.startDrawing,pf)
display.master.mainloop()
if __name__ == '__main__':
rospy.init_node("object_labeling", anonymous=False)
rospy.loginfo("Initializing the object labeling node")
main(sys.argv)
rospy.spin()
| 2.046875 | 2 |
main.py | hadarohana/fairseq | 0 | 12793432 | <filename>main.py
# Load the model in fairseq
import torch
from fairseq.models.roberta import RobertaModel
roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt')
roberta.eval() # disable dropout (or leave in train mode to finetune)
tokens = roberta.encode('Hello world!')
print(tokens)
# import torch
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval() # disable dropout (or leave in train mode to finetune)
# tokens = roberta.encode('Hello world!')
# print(tokens)
# assert tokens.tolist() == [0, 31414, 232, 328, 2]
# roberta.decode(tokens) # 'Hello world!' | 2.703125 | 3 |
lisc/tests/utils.py | jasongfleischer/lisc | 1 | 12793433 | <reponame>jasongfleischer/lisc
"""Helper functions for testing lisc."""
import pkg_resources as pkg
from functools import wraps
from os.path import join as pjoin
from lisc.objects.base import Base
from lisc.data import Articles, ArticlesAll, Term
from lisc.core.modutils import safe_import
from lisc.utils.db import SCDB, create_file_structure, check_directory
plt = safe_import('.pyplot', 'matplotlib')
###################################################################################################
###################################################################################################
class TestDB(SCDB):
"""Overloads the SCDB object as database object for tests."""
def __init__(self):
# Initialize from normal database object
base = pkg.resource_filename(__name__, 'test_db')
SCDB.__init__(self, base=base)
def create_files(directory):
"""Creates some test term files."""
term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w')
term_file.write('word\nthing, same')
term_file.close()
excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w')
excl_file.write('need\nrequired')
excl_file.close()
excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w')
excl_file.write('not\navoid')
excl_file.close()
def load_base(set_terms=False, set_clusions=False):
"""Helper function to load Base object for testing."""
base = Base()
if set_terms:
base.add_terms([['test1', 'test sin'], ['test2', 'uh oh']])
if set_clusions:
base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions')
base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions')
return base
def load_arts(add_data=False, n_data=1):
"""Helper function to load Articles object for testing."""
arts = Articles(Term('label', ['search'], ['inclusion'], ['exclusion']))
if add_data:
for ind in range(n_data):
arts.add_data('ids', 1)
arts.add_data('titles', 'title')
arts.add_data('journals', ['science', 'sc'])
arts.add_data('authors', [('A', 'B', 'C', 'D')])
arts.add_data('words', 'Lots of words data.')
arts.add_data('keywords', ['lots', 'of', 'keywords'])
arts.add_data('years', 2112)
arts.add_data('dois', 'doi_str')
return arts
def load_arts_all():
"""Helper function to load ArticlesAll object for testing."""
arts = load_arts(add_data=True, n_data=2)
arts_all = ArticlesAll(arts)
return arts_all
def plot_test(func):
"""Decorator for simple testing of plotting functions.
Notes
-----
This decorator closes all plots prior to the test.
After running the test function, it checks an axis was created with data.
It therefore performs a minimal test - asserting the plots exists, with no accuracy checking.
"""
@wraps(func)
def wrapper(*args, **kwargs):
plt.close('all')
func(*args, **kwargs)
ax = plt.gca()
assert ax.has_data()
return wrapper
def optional_test(dependency):
"""Decorator to only run a test if the specified optional dependency is present.
Parameters
----------
dependency : str
The name of an optional dependency to test import of.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if safe_import(dependency):
return func(*args, **kwargs)
return wrapper
return decorator
| 2.359375 | 2 |
img_applications.py | yushuinanrong/PPRL-VGAN | 18 | 12793434 | import os,random
os.environ["KERAS_BACKEND"] = "tensorflow"
from PIL import Image
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
import h5py
import numpy as np
from keras.layers import Input,merge,Lambda
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose
from keras.layers.normalization import *
from keras.optimizers import *
from keras import initializers
import matplotlib.pyplot as plt
import cPickle, random, sys, keras
from keras.models import Model
from functools import partial
normal = partial(initializers.normal, scale=.02)
## load and preprocess the dataset (use FERG for example) ##
batch_size = 256
num_ep = 7
num_pp = 6
epochs = 1000
img_rows, img_cols = 64, 64
clipvalue = 20
noise_dim = 10
c_dim = num_pp
n_dim = 10
z_dim = 128
date = 2018
#
print ('Loading data...')
f = h5py.File('FERG_64_64_color.mat')
print ('Finished loading....')
f = f['imdb']
label1 = f['id']
label1 = np.asarray(label1)
label1 -= 1
label2 = f['ep']
label2 = np.asarray(label2)
label2 -= 1
label3 = f['set']
label3 = np.asarray(label3)
FrameNum = f['fn']
FrameNum = np.asarray(FrameNum)
x = f['images']
x = np.asarray(x);
x = np.transpose(x, [3,2,1,0]) # matlab ordering to python ordering
print('x shape:', x.shape)
idx_train = np.asarray(np.where(label3 == 0))
idx_test = np.asarray(np.where(label3 == 1))
print('idx_test shape',idx_test.shape)
x_train = x[idx_train[1,:],:,:,:]
x_test = x[idx_test[1,:],:,:,:]
y_train1 = label1[:,idx_train[1,:]]
y_test1 = label1[:,idx_test[1,:]]
y_train2 = label2[:,idx_train[1,:]]
y_test2 = label2[:,idx_test[1,:]]
y_test1_ori = y_test1
y_test2_ori = y_test2
x_train = (x_train- 127.5)/127.5
x_test = (x_test- 127.5)/127.5
x_train = x_train.astype('float16')
x_test = x_test.astype('float16')
y_train1 = keras.utils.to_categorical(y_train1, num_pp)
y_test1 = keras.utils.to_categorical(y_test1, num_pp)
y_train2 = keras.utils.to_categorical(y_train2, num_ep)
y_test2 = keras.utils.to_categorical(y_test2, num_ep)
###############################
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('label 1 train', y_train1.shape)
print('label 1 test', y_test1.shape)
print('label 2 train', y_train2.shape)
print('label 2 test', y_test2.shape)
#
x_ori = (x - 127.5)/127.5
opt = RMSprop(lr = 0.0003,decay = 1e-6)
dopt = RMSprop(lr = 0.0003,decay = 1e-6)
epsilon_std = 1.0
def KL_loss(y_true, y_pred):
z_mean = y_pred[:, 0:z_dim]
z_log_var = y_pred[:, z_dim:2 * z_dim]
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(kl_loss)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp((z_log_var) / 2) * epsilon
############ Build the GAN architecture #################
def model_encoder(z_dim, input_shape, units=512, dropout=0.3):
k = 5
x = Input(input_shape)
h = Conv2D(units/8 , (k, k), strides = (2,2), border_mode='same')(x)
h = BatchNormalization(momentum=0.8)(h)
h = Dropout(dropout)(h)
# h = MaxPooling2D(pool_size=(2, 2))(h)
h = LeakyReLU(0.2)(h)
h = Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h)
h = BatchNormalization(momentum=0.8)(h)
h = Dropout(dropout)(h)
# h = MaxPooling2D(pool_size=(2, 2))(h)
h = LeakyReLU(0.2)(h)
h = Conv2D(units / 2, (k, k), strides = (2,2), border_mode='same')(h)
h = BatchNormalization(momentum=0.8)(h)
h = Dropout(dropout)(h)
# h = MaxPooling2D(pool_size=(2, 2))(h)
h = LeakyReLU(0.2)(h)
h = Conv2D(units , (k, k), strides = (2,2), border_mode='same')(h)
h = BatchNormalization(momentum=0.8)(h)
h = Dropout(dropout)(h)
h = LeakyReLU(0.2)(h)
# h = AveragePooling2D((6,6))(h)
h = Flatten()(h)
# h = Dense(latent_dim, name="encoder_mu")(h)
mean = Dense(z_dim, name="encoder_mean")(h)
logvar = Dense(z_dim, name="encoder_sigma", activation = 'sigmoid')(h)
# meansigma = Model(x, [mean, logsigma],name='encoder')
z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar])
h2 = keras.layers.concatenate([mean,logvar])
return Model(x,[z, h2], name = 'Encoder')
def model_decoder(z_dim, c_dim):
k = 5
x = Input(shape = (z_dim,))
auxiliary_c = Input(shape=(c_dim,), name='aux_input_c')
# auxiliary_z = Input(shape=(n_dim,), name='aux_input_z')
h = keras.layers.concatenate([x, auxiliary_c])
h = Dense(4 * 4 * 128, activation = 'relu')(h)
h = Reshape((4, 4, 128))(h)
# h = LeakyReLU(0.2)(h)
h = Conv2DTranspose(units, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 32*32*64
# h = Dropout(dropout)(h)
h = BatchNormalization(momentum=0.8)(h)
# h = LeakyReLU(0.2)(h)
h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 64*64*64
# h = Dropout(dropout)(h)
h = BatchNormalization(momentum=0.8)(h)
# h = LeakyReLU(0.2)(h)
h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 8*6*64
# h = Dropout(dropout)(h)
h = BatchNormalization(momentum=0.8)(h)
h = Conv2DTranspose(3, (k,k), strides = (2,2), padding = 'same', activation = 'tanh')(h) # 8*6*64
return Model([x,auxiliary_c], h, name="Decoder")
# #### reload the trained weights to implement the anticipated applications####
input_img = Input((img_rows,img_cols,3))
z_dim = 128
units = 256
ee = 200
auxiliary_c = Input(shape=(c_dim,), name='aux_input_c')
auxiliary_z = Input(shape=(n_dim,), name='aux_input_z')
# generator = model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3)
encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3)
encoder.load_weights('trained_weight_1.h5')
encoder.compile(loss = 'binary_crossentropy',optimizer = opt)
encoder.summary()
decoder = model_decoder(z_dim = z_dim, c_dim=c_dim)
decoder.load_weights('trained_weight_2.h5')
decoder.compile(loss = 'binary_crossentropy',optimizer = opt)
decoder.summary()
##### expression morphing #####x
for xx in xrange(0,1):
idx1 = 4300
idx2 = 7423
img1 = np.squeeze(x_ori[idx1, :, :, :])
img2 = np.squeeze(x_ori[idx2, :, :, :])
z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0))
z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0))
plt.figure(figsize=(2, 2))
img1 =np.squeeze(x_ori[idx1,:,:,:])
img1 = np.uint8(img1*127.5+127.5)
image = Image.fromarray(img1, 'RGB')
image.save('ori_1.tif')
img2 = np.squeeze(x_ori[idx2,:,:,:])
img2 = np.uint8(img2*127.5+127.5)
# plt.imshow(img2)
image = Image.fromarray(img2, 'RGB')
image.save('ori_2.tif')
arr = np.linspace(0.0, 1.0, num=1000)
for ii in xrange(0,1000):
c = np.ones((1,))*0
c = keras.utils.to_categorical(c, num_pp)
z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii])
z_interp = np.reshape(z_interp,(1,z_dim))
img = decoder.predict([z_interp,c])
img = np.squeeze(img)
img = np.uint8(img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('interp_'+str(ii)+'.tif')
# ############### Image impanting ##############
loc = 'bottom'
for pp in xrange(0,1):
for xx in xrange(0,8):
idx = 123
input_img = np.squeeze(x_ori[idx,:,:,:])
img = np.uint8(input_img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('original.tif')
impanted_img = np.squeeze(x_ori[idx,:,:,:])
impanted_img[40:55,18:47,:] = 0 # mouth blocked
print('impanted_img',impanted_img.shape)
z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0))
c = np.ones((1,))*1
c = keras.utils.to_categorical(c, num_pp)
print('c',c)
img_rec = decoder.predict([z_impanted,c])
img_rec = np.squeeze(img_rec)
img = np.uint8(impanted_img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('test_blocked_pp1'+'.tif')
img = np.uint8(img_rec*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('test_rec_pp1'+'.tif')
impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:]
img = np.uint8(impanted_img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('test_replaced_pp1'+'.tif')
#### Generate images without input image ###
def sampling_np( z_mean, z_log_var ):
epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), )
return z_mean + np.exp(z_log_var / 2) * epsilon
# mean and variance of the prior distribution #
mean_train_sup = np.zeros((1,128))
var_train_sup = np.ones((1,128))
for i in xrange(0,num_pp):
for xx in xrange(0,100):
z = sampling_np(mean_train_sup, var_train_sup)
print(z.shape)
c = np.ones(1,)*i
c = keras.utils.to_categorical(c, num_pp)
img = decoder.predict([z, c])
img = np.squeeze(img)
img = np.uint8(img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('synthesis_no_input_'+'pp_'+str(i)+'.tif') | 2.1875 | 2 |
cleanflow/__init__.py | vutsalsinghal/CleanFlow | 1 | 12793435 | <filename>cleanflow/__init__.py
from .assertions import assert_type_str,assert_cols_in_df, assert_type_str_or_list, assert_type_int_or_float
__all__ = ['assert_type_str_or_list','assert_type_int_or_float','assert_type_str','assert_cols_in_df'] | 1.5625 | 2 |
python/seldon_core/__init__.py | juldou/seldon-core | 3,049 | 12793436 | from seldon_core.version import __version__
from .storage import Storage
| 1.054688 | 1 |
panasonic_decode.py | EQware-Engineering-Inc/panasonic-remote | 0 | 12793437 | #!/usr/bin/env python3
import sys
from typing import TextIO
def is_short(x: int) -> bool:
return abs(0x10 - x) < 2
def is_long(x: int) -> bool:
return abs(0x30 - x) < 2
def parse(f: TextIO) -> None:
for line in f:
try:
data = [int(h, 16) for h in line.split(' ')]
except ValueError:
print(line, end='')
continue
byte = 0x00
bits = 0
data = data[6:] # Remove header and lead in burst
for i in range(0, len(data), 2):
pair = data[i], data[i+1]
if is_short(pair[0]) and is_short(pair[1]):
byte = (byte >> 1)
bits += 1
elif is_short(pair[0]) and is_long(pair[1]):
byte = (byte >> 1) | 0x80
bits += 1
else:
break
if bits == 8:
print(f"{byte:02X}", end=' ')
bits = 0
byte = 0x00
print()
with open(sys.argv[1], 'r') as f:
parse(f)
| 3.40625 | 3 |
examples/simplelogin/main.py | mekarpeles/waltz | 0 | 12793438 | <reponame>mekarpeles/waltz<gh_stars>0
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
main.py
~~~~~~~
Main waltz application.
:copyright: (c) Authentication Dance by Waltz.
:license: GPLv3, see LICENSE for more details.
"""
import waltz
# These are web.py url tuples which map a regex url route to the Class
# responsible for implementing its response. In other words, when the
# client issues/submits a HTTP request to the base server
# (e.g. http://example.com/) the response will be returned according
# to the Index class in the file home.py within the routes directory
# (e.g routes.home.Index)
urls = ('/analytics/?', 'waltz.modules.Analytics',
'/login/?', 'routes.auth.Login',
'/register/?', 'routes.auth.Register',
'/logout/?', 'routes.auth.Logout',
'/?', 'routes.home.Index')
# Default values for a user's session
sessions = {'email': None, 'logged': False}
# These environment variables will be made accessible within the scope
# of html files via the Templator markup language
env = {'split': lambda s, delim: s.split(delim) }
# Setting up and configuring the waltz application. To see all available
# options, refer to waltz/setup.py
app = waltz.setup.dancefloor(urls, globals(), sessions=sessions, env=env)
if __name__ == "__main__":
app.run()
| 2.765625 | 3 |
tests/test_storage_aws.py | Accelize/apyfal | 5 | 12793439 | <reponame>Accelize/apyfal
# coding=utf-8
"""apyfal.storage.aws tests"""
import pytest
from tests.test_storage import (
run_full_real_test_sequence, import_from_generic_test)
def test_s3class_import():
"""S3Storage import"""
# Test: Import by factory without errors
import_from_generic_test('AWS')
@pytest.mark.need_csp
@pytest.mark.need_csp_aws
def test_s3class_real(tmpdir):
"""S3Storage in real case"""
run_full_real_test_sequence('AWS', tmpdir)
| 1.65625 | 2 |
dist-packages/dtk/ui/breadcrumb.py | Jianwei-Wang/python2.7_lib | 0 | 12793440 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Zeng Zhi
#
# Author: <NAME> <<EMAIL>>
# Maintainer: <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from animation import Animation
from scrolled_window import ScrolledWindow
from button import Button
from theme import ui_theme
from menu import Menu
from constant import DEFAULT_FONT_SIZE
from draw import (draw_line, draw_text, draw_pixbuf)
from utils import (get_content_size, cairo_disable_antialias,
alpha_color_hex_to_cairo, cairo_state)
import gtk
import gobject
import pango
from poplist import Poplist
ARROW_BUTTON_WIDTH = 20
class Bread(gtk.HBox):
'''
Bread widget is a container which can hold crumbs widget.
@undocumented: create_crumb
@undocumented: enter_notify
@undocumented: leave_notify
@undocumented: event_box_press
@undocumented: enter_cb
@undocumented: redraw_bg
@undocumented: click_cb
@undocumented: move_right
@undocumented: move_left
'''
__gsignals__= {
"entry-changed" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)),
"item_clicked" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))
}
def __init__(self,
crumb,
arrow_right=ui_theme.get_pixbuf("treeview/arrow_right.png"),
arrow_down=ui_theme.get_pixbuf("treeview/arrow_down.png"),
show_others=False,
show_entry=False,
show_left_right_box=True
):
'''
Initialize Bread class.
@param crumb: Crumb instance or a list of crumb instances
@param arrow_right: Dynamic pixbuf for right arrow, default is \"treeview/arrow_right.png\" from ui theme.
@param arrow_down: Dynamic pixbuf for down arrow, default is \"treeview/arrow_down.png\" from ui theme.
@param show_others: If True, crumbs will not be destroyed, otherwise all crumbs on the right side will be destroyed.
@param show_entry: If True, an entry will pop up when click space area in Bread.
'''
# Init.
super(Bread, self).__init__(spacing = 0)
self.arrow_right = arrow_right
self.arrow_down = arrow_down
self.item_list = list()
self.show_others = show_others
self.show_entry = show_entry
self.crumb = self.create_crumb(crumb)
self.button_width = ARROW_BUTTON_WIDTH # for left & right buttons
self.in_event_box = False
# Init left button and right button.
self.show_left_right_box = show_left_right_box
left_box = gtk.HBox(spacing = 0)
right_box = gtk.HBox(spacing = 0)
# FIXME: left && right box static setting size
# it is better to consider whether or not shown left && right box
# at runtime
if self.show_left_right_box:
left_box.set_size_request(self.button_width, -1)
right_box.set_size_request(self.button_width, -1)
self.left_btn = Button("<")
self.right_btn = Button(">")
self.left_btn.set_no_show_all(True)
self.right_btn.set_no_show_all(True)
self.right_btn.connect("clicked", self.move_right)
self.left_btn.connect("clicked", self.move_left)
self.left_btn.set_size_request(self.button_width, -1)
self.right_btn.set_size_request(self.button_width, -1)
left_box.pack_start(self.left_btn, False, False)
right_box.pack_start(self.right_btn, False, False)
# Init Hbox
self.hbox = gtk.HBox(False, 0)
self.hbox.show()
self.eventbox = gtk.EventBox()
self.eventbox.set_visible_window(False)
if self.show_entry:
self.eventbox.connect("enter-notify-event", self.enter_notify)
self.eventbox.connect("leave-notify-event", self.leave_notify)
self.eventbox.connect("button-press-event", self.event_box_press)
self.hbox.pack_end(self.eventbox, True, True)
self.scroll_win = ScrolledWindow()
self.pack_start(left_box, False, True)
self.pack_start(self.hbox, True, True)
# Add Bread Items
self.adj = self.scroll_win.get_hadjustment()
self.add(self.crumb)
def create_crumb(self, crumb):
'''
Internal function to create a Crumb list for different types of inputs.
@param crumb: Support inputs are:
["a label", Menu]
[("a label",[(None, "menu label", None)])]
Crumb instance
[Crumb, Crumb]
'''
if isinstance(crumb, Crumb):
return [crumb,]
elif isinstance(crumb[0], str):
return [Crumb(crumb[0], crumb[1]),]
elif isinstance(crumb[0], Crumb):
return crumb
else:
return [Crumb(c[0], c[1]) for c in crumb]
def enter_notify(self, widget, event):
'''
Internal callback function to "enter-notify-event" signal.
@param widget: gtk.EventBox.
@param event: The pointer event of type gtk.gdk.Event.
'''
self.in_event_box = True
def leave_notify(self, widget, event):
'''
Internal callback function to "leave-notify-event" signal.
@param widget: Gtk.EventBox.
@param event: The pointer event of type gtk.gdk.Event.
'''
self.in_event_box = False
def event_box_press(self, widget, event):
'''
Internal callback function to "button-press-event" signal.
@param widget: gtk.eventbox.
@param event: event of type gtk.gdk.event.
'''
obj = self.hbox.get_children()
label = []
for o in obj[:-1]:
label.append("/"+o.label)
o.destroy()
self.entry = gtk.Entry()
self.entry.connect("activate", self.enter_cb)
self.entry.set_text("".join(label))
self.entry.show()
self.entry.select_region(0, len(self.entry.get_text()))
self.eventbox.hide()
self.hbox.pack_start(self.entry, True, True)
def enter_cb(self, widget):
'''
Internal callback function to "press-return" signal.
@param widget: gtk.Entry widget instance.
'''
label = widget.get_text()
widget.destroy()
self.eventbox.show()
self.emit("entry-changed", label)
def redraw_bg(self, widget, event):
'''
Internal callback function to "expose-event" signal.
@param widget: gtk.EventBox
@param event: event of type gtk.gdk.event
'''
cr = widget.window.cairo_create()
rect = widget.allocation
# Draw backgroud.
with cairo_state(cr):
cr.set_source_rgba(*alpha_color_hex_to_cairo(("#def5ff", 1)))
cr.rectangle(rect.x, rect.y, rect.width, rect.height)
cr.fill()
return False
def add(self, crumbs):
'''
Add crumbs. Can accept Crumb instance or a list of Crumb instances
@param crumbs: Supported inputs are:
["a label", Menu]
[("a label",[(None, "menu label", None)])]
Crumb instance
[Crumb, Crumb]
'''
crumbs = self.create_crumb(crumbs)
for crumb in crumbs:
crumb.show()
crumb.arrow_right = self.arrow_right
crumb.arrow_down = self.arrow_down
crumb.index_id = len(self.item_list)
crumb.connect("item_clicked", self.click_cb)
self.hbox.pack_start(crumb, False, False)
self.item_list.append(crumb.get_size_request()[0])
page_size = self.adj.page_size
# Show right button if crumbs exceed scrolled window size.
if sum(self.item_list) > page_size and not page_size == 1.0:
self.right_btn.show()
def change_node(self, index, crumbs):
'''
Change any nodes start from specified index
@param index: Start index
@param crumbs: Crumb instance or Crumb list
For instance, there exist a list contain [Crumb1, Crumb2],
by using change_node(1, [Crumb3, Crumb4]), previous list will be change
to [Crumb1, Crumb3, Crumb4]. In this way, application can operate crumbs
'''
objects = self.hbox.get_children()
for i in objects[index: -1]:
i.destroy()
self.item_list[index:] = []
self.add(crumbs)
def remove_node_after_index(self, index):
'''
Remove any nodes after given index.
@param index: To specified remove after given index.
'''
for i in self.hbox.get_children()[(index + 1): -1]:
i.destroy()
self.item_list[(index + 1):] = []
def click_cb(self, widget, index, label):
'''
Internal callback function to "clicked" signal.
@param widget: Crumb instance.
@param index: The index value of clicked crumb.
@param label: Label of the crumb.
'''
if not self.show_others:
for i in self.hbox.get_children()[(index + 1): -1]:
i.destroy()
self.item_list[(index + 1):] = []
self.emit("item_clicked", index, label)
def move_right(self, widget):
'''
Internal callback function to "clicked" signal.
@param widget: Right button.
'''
upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value
shift_value = 0
temp = 0
if upper > (page_size + value):
self.left_btn.show()
for i in xrange(len(self.item_list)+1):
temp += self.item_list[i]
if temp > (page_size + value):
shift_value = temp - (page_size + value)
#play animation
ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value])
ani.start()
break
if not upper > (page_size + self.adj.value + shift_value):
self.right_btn.hide()
def move_left(self, widget):
'''
Internal callback function to "clicked" signal.
@param widget: Left button.
'''
upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value
shift_value = 0
temp = 0
if not value == 0:
self.right_btn.show()
for i in xrange(len(self.item_list)):
temp += self.item_list[i]
if temp >= value:
shift_value = self.item_list[i] - (temp - value)
break
#play animation
ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value])
ani.start()
if (self.adj.value - shift_value) == 0:
self.left_btn.hide()
def set_size(self, width, height):
'''
Set Bread size.
@param width: Width of Bread.
@param height: Height of Bread.
'''
self.scroll_win.set_size_request(width - 2 * self.button_width, height)
self.hbox.set_size_request(-1, self.hbox.get_children()[0].height)
gobject.type_register(Bread)
class BreadMenu(Poplist):
'''
Popup menu for bread.
@undocumented: draw_treeview_mask
@undocumented: shape_bread_menu_frame
@undocumented: expose_bread_menu_frame
'''
def __init__(self,
items,
max_height=None,
max_width=None,
):
'''
Initialize BreadMenu class.
@param items: Item for TreeView.
@param max_height: Maximum height of bread menu, by default is None.
@param max_width: Maximum width of bread menu, by default is None.
'''
Poplist.__init__(self,
items=items,
max_height=max_height,
max_width=max_width,
shadow_visible=False,
shape_frame_function=self.shape_bread_menu_frame,
expose_frame_function=self.expose_bread_menu_frame,
align_size=2,
)
self.set_skip_pager_hint(True)
self.set_skip_taskbar_hint(True)
self.treeview.draw_mask = self.draw_treeview_mask
self.expose_window_frame = self.expose_bread_menu_frame
def draw_treeview_mask(self, cr, x, y, w, h):
cr.set_source_rgb(1, 1, 1)
cr.rectangle(x, y, w, h)
cr.fill()
def shape_bread_menu_frame(self, widget, event):
pass
def expose_bread_menu_frame(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
with cairo_disable_antialias(cr):
outside_border = alpha_color_hex_to_cairo(("#666666", 0.5))
cr.set_line_width(1)
cr.set_source_rgba(*outside_border)
cr.rectangle(rect.x + 1, rect.y + 1, rect.width - 2, rect.height - 2)
cr.fill()
gobject.type_register(BreadMenu)
class Crumb(gtk.Button):
'''
Crumb class .
@undocumented: enter_button
@undocumented: motion_notify_cb
@undocumented: create_menu
@undocumented: hide_cb
@undocumented: button_press_cb
@undocumented: button_clicked
@undocumented: expose_cb
'''
__gsignals__= {
"item_clicked" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,gobject.TYPE_STRING,))}
def __init__(self,
label,
menu_items = None,
font_size = DEFAULT_FONT_SIZE,
padding_x = 15,
):
'''
Initialize Crumb class.
@param label: Crumb item label
@param menu_items: Crumb menu, could be a Menu instance or a list, default is None
@param font_size: Font size, default is DEFAULT_FONT_SIZE.
@param padding_x: Horizontal padding, default is 15 pixels.
'''
super(Crumb, self).__init__()
self.arrow_right = None
self.arrow_down = None
self.menu_min = 18 # menu bar width
self.btn_min = 50 # button width
self.height = 24 # crumb height
self.font_size = font_size
self.padding_x = padding_x
self.menu = self.create_menu(menu_items)
if self.menu != None:
self.menu.connect("hide", self.hide_cb)
self.menu_press = False
self.menu_show = False
self.index_id = 0
self.set_label(label)
self.in_button = True
self.in_menu = True
self.connect("expose_event", self.expose_cb)
self.connect("button_press_event", self.button_press_cb)
self.connect("clicked", self.button_clicked)
self.connect("motion-notify-event", self.motion_notify_cb)
self.connect("enter-notify-event", self.enter_button)
self.add_events(gtk.gdk.POINTER_MOTION_MASK)
def enter_button(self, widget, event):
in_menu = event.x > self.button_width
self.in_menu =in_menu
def motion_notify_cb(self, widget, event):
'''
Internal callback function to Crumb "motion-notify-event" signal.
@param widget: Crumb
@param event: an event of gtk.gdk.event
'''
in_menu = event.x > self.button_width
if self.in_menu !=in_menu:
self.in_menu = in_menu
self.queue_draw()
def create_menu(self, menu_items):
'''
Internal function to create menu.
@param menu_items: menu_items
@return: Menu instance
'''
if menu_items != None and isinstance(menu_items, list):
return BreadMenu(menu_items)
else:
return None
def hide_cb(self, widget):
'''
Internal callback function to Menu's ""hide" signal.
@param widget: Menu
'''
if self.menu_press:
self.set_state(gtk.STATE_PRELIGHT)
else:
self.menu_show = False
self.set_state(gtk.STATE_NORMAL)
def button_press_cb(self, widget, event):
'''
Internal callback function to "button-press-event" signal.
@param widget: Crumb
@param event: An event of gtk.gdk.Event
'''
if self.menu == None:
self.in_button = True
self.menu_press = False
else:
self.in_button = event.x < (widget.allocation.width - self.menu_min)
if not self.in_button:
self.menu_press = True
def button_clicked(self, widget):
'''
Intenal callback function to "clicked" signal.
@param widget: Crumb
'''
if self.in_button:
self.emit("item_clicked", self.index_id, self.label)
else:
self.menu_press = False
self.menu_show = not self.menu_show
if self.menu_show:
(wx, wy) = self.get_toplevel().window.get_root_origin()
(offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0)
(menu_width, menu_height) = widget.allocation.width, widget.allocation.height
arrow_button_width = ARROW_BUTTON_WIDTH
self.menu.show((wx + offset_x + menu_width - arrow_button_width,
wy + offset_y + menu_height,
),
(0, 0))
def set_label(self, label, font_size = DEFAULT_FONT_SIZE):
'''
Set label for left button.
@param label: Label
@param font_size: Label's Font size, default is DEFAULT_FONT_SIZE.
'''
self.label = label
(self.label_w, self.label_h) = get_content_size(self.label, font_size)
if self.menu == None:
self.set_size_request(
max(self.label_w + 2 * self.padding_x, self.btn_min),
self.height)
self.button_width = self.get_size_request()[0]
else:
self.set_size_request(
max(self.label_w + 2 * self.padding_x + self.menu_min, self.btn_min + self.menu_min),
self.height)
self.button_width = self.get_size_request()[0] - self.menu_min
self.queue_draw()
def expose_cb(self, widget, event):
'''
Internal expose callback function.
@param widget: Crumb instance.
@param event: An event of gtk.gdk.Event.
'''
if self.menu == None:
self.menu_min = 0
cr = widget.window.cairo_create()
rect = widget.allocation
x, y, w, h = rect.x, rect.y, rect.width, rect.height
# Should move this part to Bread class since app_theme is golobalized.
arrow_right = self.arrow_right
arrow_down = self.arrow_down
arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height()
arrow_pixbuf = arrow_right
outside_border = alpha_color_hex_to_cairo(("#000000", 0.15))
inner_border = alpha_color_hex_to_cairo(("#ffffff", 0.5))
active_mask = alpha_color_hex_to_cairo(("#000000", 0.1))
if self.menu_show:
self.set_state(gtk.STATE_PRELIGHT)
if widget.state == gtk.STATE_NORMAL:
text_color = ui_theme.get_color("title_text").get_color()
button_color = None
menu_color = None
arrow_pixbuf = arrow_right
elif widget.state == gtk.STATE_PRELIGHT:
text_color = ui_theme.get_color("title_text").get_color()
if self.menu_show:
arrow_pixbuf = arrow_down
else:
arrow_pixbuf = arrow_right
if self.in_menu:
button_color = None
menu_color = inner_border
else:
button_color = inner_border
menu_color = None
elif widget.state == gtk.STATE_ACTIVE:
text_color = ui_theme.get_color("title_text").get_color()
if self.in_button:
button_color = inner_border
menu_color = None
arrow_pixbuf = arrow_right
else:
button_color = None
menu_color = inner_border
arrow_pixbuf = arrow_down
elif widget.state == gtk.STATE_INSENSITIVE:
arrow_pixbuf = arrow_right
text_color = ui_theme.get_color("disable_text").get_color()
disable_bg = ui_theme.get_color("disable_background").get_color()
button_color = [(0, (disable_bg, 1.0)),
(1, (disable_bg, 1.0))]
menu_color = [(0, (disable_bg, 1.0)),
(1, (disable_bg, 1.0))]
# Draw background.
if not widget.state == gtk.STATE_NORMAL:
# Draw button border.
def draw_rectangle(cr, x, y , w, h):
draw_line(cr, x -1 , y , x + w, y) # top
draw_line(cr, x , y + h, x + w, y + h) # bottom
draw_line(cr, x , y , x , y + h) # left
draw_line(cr, x + w , y , x + w , y + h -1) # right
cr.set_source_rgba(*outside_border)
if button_color:
draw_rectangle(cr, x + 1 , y + 1 , self.button_width -1 , h -1)
elif menu_color:
draw_rectangle(cr, x + self.button_width, y + 1, self.menu_min, h - 1)
# Draw innner border.
cr.set_source_rgba(*inner_border)
if button_color:
draw_rectangle(cr, x + 2, y + 2, self.button_width - 3, h -3)
elif menu_color:
draw_rectangle(cr, x + self.button_width + 1, y + 2, self.menu_min - 2, h -3)
if widget.state == gtk.STATE_ACTIVE:
cr.set_source_rgba(*active_mask)
if button_color:
cr.rectangle(x + 2, y + 2, self.button_width - 4, h -4)
cr.fill()
elif menu_color:
cr.rectangle( x + self.button_width + 1, y + 2, self.menu_min - 3, h -4)
cr.fill()
if self.menu != None:
# Draw an arrow.
draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min - arrow_width) / 2, y + (h - arrow_height) / 2)
# Draw text.
draw_text(cr, self.label, x, y , self.button_width, h, self.font_size, text_color,
alignment = pango.ALIGN_CENTER)
return True
gobject.type_register(Crumb)
if __name__ == "__main__":
import gtk
def add_panel(widget):
crumb = Crumb("Child",menu)
bread.add(crumb)
def change_root_node( widget):
crumb1 = Crumb("Yet Another Root", menu)
crumb2 = Crumb("Yet Another Child", menu)
bread.change_node(0, [crumb1, crumb2])
def change_entry(widget, path):
# Application can check if path is valid or not
path_list = path.split("/")[1:]
bread.change_node(0, [Crumb(i , menu) for i in path_list])
menu = Menu([
(None, "测试1", None),
(None, "测试2", None),
],
shadow_visible = False,
)
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
win.connect("destroy", lambda w: gtk.main_quit())
win.set_default_size(600,300)
vbox = gtk.VBox()
######################################
# test breadcrumb widget
bread = Bread([("Root", menu),
("Level1", menu)],
show_others = False,
show_entry = True)
bread.add(["xxx",menu])
# Must set_size
bread.set_size(200, -1)
bread.connect("entry-changed", change_entry)
#####################################
vbox.pack_start(bread, False, False, 0)
# Test Item
add_path_button = gtk.Button("Add Item")
add_path_button.connect("clicked", add_panel)
vbox.pack_start(add_path_button, True, False, 0)
test_change_node = gtk.Button("Change Root node")
test_change_node.connect("clicked", change_root_node)
vbox.pack_start(test_change_node, True, False , 0)
win.add(vbox)
win.show_all()
gtk.main()
| 2.09375 | 2 |
component/parameter/directory.py | BuddyVolly/bfast_preanalysis | 1 | 12793441 | from pathlib import Path
result_dir = Path().home().joinpath('module_results/bfast_preanalysis')
start = """
### Start date selection
Pick the date of the timeseries' start.
"""
end = """
### End date selection
Pick the date of the timeseries' end.
"""
select = """
### Satellite selection
Select the satellite(s) you want to include for the pre-analysis.
"""
sr = """
### Selection of collection type
Choose between Surface Reflectance or Top-of-Atmosphere collections for the slected satellites.
"""
stats = """
### Selection of statistics
Select the statistical measure you want to apply and switch on annual for per-year calculations
"""
| 2.453125 | 2 |
roobet-Listing1.py | AdamSierakowski/Math-Behind-Roobet-s-Crash-Game | 1 | 12793442 | import hashlib
def prev_hash(hash_code):
return hashlib.sha256(hash_code.encode()).hexdigest()
def main():
game_hash = 'cc4a75236ecbc038c37729aa5ced461e36155319e88fa375c\
994933b6a42a0c4'
print(prev_hash(game_hash))
main()
| 2.875 | 3 |
home/urls.py | mxpxgx/moiprez.com | 0 | 12793443 | # from django.conf.urls import url
# from home.views import HomeView
# urlpatterns = [
# url(r'^', HomeView.as_view()))
# ] | 1.429688 | 1 |
datasets/dataset_read.py | nik1806/HLCV-Project | 8 | 12793444 | import sys
sys.path.append('../loader')
# from unaligned_data_loader import UnalignedDataLoader
from datasets.svhn import load_svhn
from datasets.mnist import load_mnist
from datasets.usps import load_usps
# from gtsrb import load_gtsrb
# from synth_traffic import load_syntraffic
from datasets.create_dataloader import create_DataLoader
def return_dataset(data, scale=False, usps=False, all_use='no'):
if data == 'svhn':
train_image, train_label, \
test_image, test_label = load_svhn()
if data == 'mnist':
train_image, train_label, \
test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use)
print(train_image.shape)
if data == 'usps':
train_image, train_label, \
test_image, test_label = load_usps(all_use=all_use)
# if data == 'synth':
# train_image, train_label, \
# test_image, test_label = load_syntraffic()
# if data == 'gtsrb':
# train_image, train_label, \
# test_image, test_label = load_gtsrb()
return train_image, train_label, test_image, test_label
# we don't need target just source
def dataset_read(source, target, batch_size, scale=False, all_use='no'):
# Return train and test loader
S = {}
S_test = {}
# T = {}
# T_test = {}
usps = False
if source == 'usps': # or target == 'usps':
usps = True
train_source, s_label_train, test_source, s_label_test = return_dataset(source, scale=scale,
usps=usps, all_use=all_use)
# train_target, t_label_train, test_target, t_label_test = return_dataset(target, scale=scale, usps=usps,
# all_use=all_use)
S['imgs'] = train_source
S['labels'] = s_label_train
# T['imgs'] = train_target
# T['labels'] = t_label_train
# input target samples for both
S_test['imgs'] = test_source
S_test['labels'] = s_label_test
# T_test['imgs'] = test_target
# T_test['labels'] = t_label_test
scale = 40 if source == 'synth' else 28 if source == 'usps' or target == 'usps' else 32
# scale = 40 if source == 'synth' else 28 if source == 'usps' else 32
# train_loader = UnalignedDataLoader()
train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False, )
# dataset = train_loader.load_data()
# test_loader = UnalignedDataLoader()
val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False)
# dataset_test = test_loader.load_data()
return train_loader, val_loader
| 2.15625 | 2 |
DatasetTools/DownloadDataset/DownloadSingleDataset.py | frenky-strasak/HTTPSDetector | 9 | 12793445 | <filename>DatasetTools/DownloadDataset/DownloadSingleDataset.py
"""
Download all datasets which have bro folder.
USAGE:
python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/
"""
import sys
from bs4 import BeautifulSoup
import requests
import urllib2
import os
import shutil
def find_files(url):
# url = "https://mcfp.felk.cvut.cz/publicDatasets/"
soup = BeautifulSoup(requests.get(url).text, "lxml")
hrefs = []
for a in soup.find_all('a'):
try:
# print a['href']
hrefs.append(a['href'])
except:
pass
# print hrefs
return hrefs
def save_manager(url, dataset_name):
directiry_name = "/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/" + dataset_name
if os.path.exists(directiry_name):
shutil.rmtree(directiry_name)
os.makedirs(directiry_name)
# Bro
folder_path = directiry_name + "/bro/"
os.makedirs(folder_path)
file_sizes = 0
bro = find_files(url + 'bro/')
for i in range(len(bro)):
if '.log' in bro[i]:
file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'bro')
# Suricata
folder_path = directiry_name + "/suricata/"
os.makedirs(folder_path)
file_sizes = 0
bro = find_files(url + 'suricata/')
for i in range(len(bro)):
if '.log' in bro[i] or '.json' in bro[i]:
file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'suricata')
return file_sizes
def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder):
print bro_log, "is downloading..."
file_size = 0
u = urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/' + bro_log)
meta = u.info()
file_size += int(meta.getheaders("Content-Length")[0])
f = open(file_name, 'wb')
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8) * (len(status) + 1)
print status,
f.close()
return file_size
def get_dataset_name_from_url(url):
names = url.split('/')
names.pop()
return names.pop()
if __name__ == '__main__':
datasets_size = 0
if len(sys.argv) == 2:
url = sys.argv[1]
datasets_size += save_manager(url, get_dataset_name_from_url(url))
# find_files(url+'CTU-Malware-Capture-Botnet-31/')
print "Complet Dataset size:", (datasets_size / (1024.0 * 1024.0)), "MB"
| 3.3125 | 3 |
tests/datastructures/arrays/test_spiral_matrix.py | sikakente/educative-io-python | 1 | 12793446 | <reponame>sikakente/educative-io-python<filename>tests/datastructures/arrays/test_spiral_matrix.py<gh_stars>1-10
import unittest
import pytest
from datastructures.arrays import spiral_matrix as spm
@pytest.mark.parametrize("matrix,expected", [
([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [1, 2, 3, 6, 9, 8, 7, 4, 5]),
([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7])
])
def test_spiral_matrix(matrix, expected):
assert expected == spm.spiral_matrix(matrix)
if __name__ == '__main__':
unittest.main()
| 3.015625 | 3 |
core/utils/middlewares/logger_middleware.py | AKurmazov/hoteluni_bot | 2 | 12793447 | import logging
import time
from aiogram import Dispatcher, types
from aiogram.dispatcher.middlewares import BaseMiddleware
HANDLED_STR = ["Unhandled", "Handled"]
class LoggingMiddleware(BaseMiddleware):
def __init__(self, logger=None):
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(self.__class__.__name__)
self.logger = logger
super(LoggingMiddleware, self).__init__()
def check_timeout(self, obj):
start = obj.conf.get("_start", None)
if start:
del obj.conf["_start"]
return round((time.time() - start) * 1000)
return -1
async def on_pre_process_update(self, update: types.Update, data: dict):
update.conf["_start"] = time.time()
pass
async def on_post_process_update(self, update: types.Update, result, data: dict):
timeout = self.check_timeout(update)
if timeout > 0:
self.logger.info(
f"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)"
)
async def on_pre_process_message(self, message: types.Message, data: dict):
self.logger.info(
f'Received message [TEXT: "{message.text}"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]'
)
async def on_post_process_message(
self, message: types.Message, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]")
async def on_pre_process_edited_message(self, edited_message, data: dict):
pass
# self.logger.info(f"Received edited message [ID:{edited_message.message_id}] "
# f"in chat [{edited_message.chat.type}:{edited_message.chat.id}]")
async def on_post_process_edited_message(self, edited_message, results, data: dict):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"edited message [ID:{edited_message.message_id}] "
# f"in chat [{edited_message.chat.type}:{edited_message.chat.id}]")
async def on_pre_process_channel_post(
self, channel_post: types.Message, data: dict
):
pass
# self.logger.info(f"Received channel post [ID:{channel_post.message_id}] "
# f"in channel [ID:{channel_post.chat.id}]")
async def on_post_process_channel_post(
self, channel_post: types.Message, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"channel post [ID:{channel_post.message_id}] "
# f"in chat [{channel_post.chat.type}:{channel_post.chat.id}]")
async def on_pre_process_edited_channel_post(
self, edited_channel_post: types.Message, data: dict
):
pass
# self.logger.info(f"Received edited channel post [ID:{edited_channel_post.message_id}] "
# f"in channel [ID:{edited_channel_post.chat.id}]")
async def on_post_process_edited_channel_post(
self, edited_channel_post: types.Message, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"edited channel post [ID:{edited_channel_post.message_id}] "
# f"in channel [ID:{edited_channel_post.chat.id}]")
async def on_pre_process_inline_query(
self, inline_query: types.InlineQuery, data: dict
):
pass
# self.logger.info(f"Received inline query [ID:{inline_query.id}] "
# f"from user [ID:{inline_query.from_user.id}]")
async def on_post_process_inline_query(
self, inline_query: types.InlineQuery, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"inline query [ID:{inline_query.id}] "
# f"from user [ID:{inline_query.from_user.id}]")
async def on_pre_process_chosen_inline_result(
self, chosen_inline_result: types.ChosenInlineResult, data: dict
):
pass
# self.logger.info(f"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] "
# f"from user [ID:{chosen_inline_result.from_user.id}] "
# f"result [ID:{chosen_inline_result.result_id}]")
async def on_post_process_chosen_inline_result(
self, chosen_inline_result, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] "
# f"from user [ID:{chosen_inline_result.from_user.id}] "
# f"result [ID:{chosen_inline_result.result_id}]")
async def on_pre_process_callback_query(
self, callback_query: types.CallbackQuery, data: dict
):
if callback_query.message:
self.logger.info(
f"Received callback query [DATA:{callback_query.data}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] "
f"from user [USERNAME:{callback_query.from_user.username}]"
)
async def on_post_process_callback_query(self, callback_query, results, data: dict):
pass
# if callback_query.message:
# if callback_query.message.from_user:
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"callback query [ID:{callback_query.id}] "
# f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] "
# f"from user [ID:{callback_query.message.from_user.id}]")
# else:
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"callback query [ID:{callback_query.id}] "
# f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]")
# else:
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"callback query [ID:{callback_query.id}] "
# f"from inline message [ID:{callback_query.inline_message_id}] "
# f"from user [ID:{callback_query.from_user.id}]")
async def on_pre_process_shipping_query(
self, shipping_query: types.ShippingQuery, data: dict
):
pass
# self.logger.info(f"Received shipping query [ID:{shipping_query.id}] "
# f"from user [ID:{shipping_query.from_user.id}]")
async def on_post_process_shipping_query(self, shipping_query, results, data: dict):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"shipping query [ID:{shipping_query.id}] "
# f"from user [ID:{shipping_query.from_user.id}]")
async def on_pre_process_pre_checkout_query(
self, pre_checkout_query: types.PreCheckoutQuery, data: dict
):
pass
# self.logger.info(f"Received pre-checkout query [ID:{pre_checkout_query.id}] "
# f"from user [ID:{pre_checkout_query.from_user.id}]")
async def on_post_process_pre_checkout_query(
self, pre_checkout_query, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"pre-checkout query [ID:{pre_checkout_query.id}] "
# f"from user [ID:{pre_checkout_query.from_user.id}]")
async def on_pre_process_error(self, update: types.Update, error, data: dict):
timeout = self.check_timeout(update)
if timeout > 0:
self.logger.info(
f"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in {timeout} ms)"
)
def on_startup(dp: Dispatcher):
dp.middleware.setup(LoggingMiddleware())
| 2.3125 | 2 |
iblog/api_server/model.py | openjw/blog | 0 | 12793448 | from dataclasses import dataclass, field
from typing import Any
@dataclass
class Response(object):
ok: bool = field(default=False)
data: Any = field(default=None)
message: str = field(default='')
| 2.6875 | 3 |
montreal_forced_aligner/multiprocessing/__init__.py | ai-zahran/Montreal-Forced-Aligner | 0 | 12793449 | """Multiprocessing functions and classes for Montreal Forced Aligner"""
from .alignment import acc_stats # noqa
from .alignment import align # noqa
from .alignment import calc_fmllr # noqa
from .alignment import calc_lda_mllt # noqa
from .alignment import compile_information # noqa
from .alignment import compile_train_graphs # noqa
from .alignment import compute_alignment_improvement # noqa
from .alignment import convert_ali_to_textgrids # noqa
from .alignment import convert_alignments # noqa
from .alignment import create_align_model # noqa
from .alignment import lda_acc_stats # noqa
from .alignment import mono_align_equal # noqa
from .alignment import train_map # noqa
from .alignment import tree_stats # noqa; noqa
from .helper import Counter, Stopped, run_mp, run_non_mp # noqa
from .ivector import acc_global_stats # noqa
from .ivector import acc_ivector_stats # noqa
from .ivector import extract_ivectors # noqa
from .ivector import gauss_to_post # noqa
from .ivector import gmm_gselect # noqa
from .ivector import segment_vad # noqa
from .pronunciations import generate_pronunciations # noqa
from .transcription import transcribe, transcribe_fmllr # noqa
| 2.03125 | 2 |
about/models.py | sahin88/Django_RestFramework_ReactJS_PortfolioApp_Frontend | 0 | 12793450 | from django.db import models
class About(models.Model):
about_image = models.ImageField(upload_to="about/")
about_exp1 = models.TextField(blank=True, null=True)
about_exp2 = models.TextField(blank=True, null=True)
class Programms(models.Model):
name = models.CharField(max_length=255)
icon = models.CharField(max_length=255)
percentage = models.CharField(max_length=25, blank=True, null=True)
| 2.1875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.